1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/backend/mid-tier-register-allocator.h"
6
7 #include "src/base/bits.h"
8 #include "src/base/logging.h"
9 #include "src/base/macros.h"
10 #include "src/base/optional.h"
11 #include "src/codegen/machine-type.h"
12 #include "src/codegen/register-configuration.h"
13 #include "src/codegen/tick-counter.h"
14 #include "src/common/globals.h"
15 #include "src/compiler/backend/instruction.h"
16 #include "src/compiler/linkage.h"
17 #include "src/logging/counters.h"
18 #include "src/utils/bit-vector.h"
19 #include "src/zone/zone-containers.h"
20
21 namespace v8 {
22 namespace internal {
23 namespace compiler {
24
25 class RegisterState;
26 class DeferredBlocksRegion;
27
28 // BlockState stores details associated with a particular basic block.
29 class BlockState final {
30 public:
BlockState(int block_count,Zone * zone)31 BlockState(int block_count, Zone* zone)
32 : general_registers_in_state_(nullptr),
33 double_registers_in_state_(nullptr),
34 deferred_blocks_region_(nullptr),
35 dominated_blocks_(block_count, zone),
36 successors_phi_index_(-1),
37 is_deferred_block_boundary_(false) {}
38
39 // Returns the RegisterState that applies to the input of this block. Can be
40 // |nullptr| if the no registers of |kind| have been allocated up to this
41 // point.
42 RegisterState* register_in_state(RegisterKind kind);
43 void set_register_in_state(RegisterState* register_state, RegisterKind kind);
44
45 // Returns a bitvector representing all the basic blocks that are dominated
46 // by this basic block.
dominated_blocks()47 BitVector* dominated_blocks() { return &dominated_blocks_; }
48
49 // Set / get this block's index for successor's phi operations. Will return
50 // -1 if this block has no successor's with phi operations.
successors_phi_index() const51 int successors_phi_index() const { return successors_phi_index_; }
set_successors_phi_index(int index)52 void set_successors_phi_index(int index) {
53 DCHECK_EQ(successors_phi_index_, -1);
54 successors_phi_index_ = index;
55 }
56
57 // If this block is deferred, this represents region of deferred blocks
58 // that are directly reachable from this block.
deferred_blocks_region() const59 DeferredBlocksRegion* deferred_blocks_region() const {
60 return deferred_blocks_region_;
61 }
set_deferred_blocks_region(DeferredBlocksRegion * region)62 void set_deferred_blocks_region(DeferredBlocksRegion* region) {
63 DCHECK_NULL(deferred_blocks_region_);
64 deferred_blocks_region_ = region;
65 }
66
67 // Returns true if this block represents either a transition from
68 // non-deferred to deferred or vice versa.
is_deferred_block_boundary() const69 bool is_deferred_block_boundary() const {
70 return is_deferred_block_boundary_;
71 }
MarkAsDeferredBlockBoundary()72 void MarkAsDeferredBlockBoundary() { is_deferred_block_boundary_ = true; }
73
74 MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(BlockState);
75
76 private:
77 RegisterState* general_registers_in_state_;
78 RegisterState* double_registers_in_state_;
79
80 DeferredBlocksRegion* deferred_blocks_region_;
81
82 BitVector dominated_blocks_;
83 int successors_phi_index_;
84 bool is_deferred_block_boundary_;
85 };
86
register_in_state(RegisterKind kind)87 RegisterState* BlockState::register_in_state(RegisterKind kind) {
88 switch (kind) {
89 case RegisterKind::kGeneral:
90 return general_registers_in_state_;
91 case RegisterKind::kDouble:
92 return double_registers_in_state_;
93 }
94 }
95
set_register_in_state(RegisterState * register_state,RegisterKind kind)96 void BlockState::set_register_in_state(RegisterState* register_state,
97 RegisterKind kind) {
98 switch (kind) {
99 case RegisterKind::kGeneral:
100 DCHECK_NULL(general_registers_in_state_);
101 general_registers_in_state_ = register_state;
102 break;
103 case RegisterKind::kDouble:
104 DCHECK_NULL(double_registers_in_state_);
105 double_registers_in_state_ = register_state;
106 break;
107 }
108 }
109
MidTierRegisterAllocationData(const RegisterConfiguration * config,Zone * zone,Frame * frame,InstructionSequence * code,TickCounter * tick_counter,const char * debug_name)110 MidTierRegisterAllocationData::MidTierRegisterAllocationData(
111 const RegisterConfiguration* config, Zone* zone, Frame* frame,
112 InstructionSequence* code, TickCounter* tick_counter,
113 const char* debug_name)
114 : RegisterAllocationData(Type::kMidTier),
115 allocation_zone_(zone),
116 frame_(frame),
117 code_(code),
118 debug_name_(debug_name),
119 config_(config),
120 virtual_register_data_(code->VirtualRegisterCount(), allocation_zone()),
121 block_states_(allocation_zone()),
122 reference_map_instructions_(allocation_zone()),
123 spilled_virtual_registers_(code->VirtualRegisterCount(),
124 allocation_zone()),
125 tick_counter_(tick_counter) {
126 int basic_block_count = code->InstructionBlockCount();
127 block_states_.reserve(basic_block_count);
128 for (int i = 0; i < basic_block_count; i++) {
129 block_states_.emplace_back(basic_block_count, allocation_zone());
130 }
131 }
132
AddGapMove(int instr_index,Instruction::GapPosition position,const InstructionOperand & from,const InstructionOperand & to)133 MoveOperands* MidTierRegisterAllocationData::AddGapMove(
134 int instr_index, Instruction::GapPosition position,
135 const InstructionOperand& from, const InstructionOperand& to) {
136 Instruction* instr = code()->InstructionAt(instr_index);
137 ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone());
138 return moves->AddMove(from, to);
139 }
140
AddPendingOperandGapMove(int instr_index,Instruction::GapPosition position)141 MoveOperands* MidTierRegisterAllocationData::AddPendingOperandGapMove(
142 int instr_index, Instruction::GapPosition position) {
143 return AddGapMove(instr_index, position, PendingOperand(), PendingOperand());
144 }
145
RepresentationFor(int virtual_register)146 MachineRepresentation MidTierRegisterAllocationData::RepresentationFor(
147 int virtual_register) {
148 if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
149 return InstructionSequence::DefaultRepresentation();
150 } else {
151 DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
152 return code()->GetRepresentation(virtual_register);
153 }
154 }
155
block_state(RpoNumber rpo_number)156 BlockState& MidTierRegisterAllocationData::block_state(RpoNumber rpo_number) {
157 return block_states_[rpo_number.ToInt()];
158 }
159
GetBlock(RpoNumber rpo_number)160 const InstructionBlock* MidTierRegisterAllocationData::GetBlock(
161 RpoNumber rpo_number) {
162 return code()->InstructionBlockAt(rpo_number);
163 }
164
GetBlock(int instr_index)165 const InstructionBlock* MidTierRegisterAllocationData::GetBlock(
166 int instr_index) {
167 return code()->InstructionAt(instr_index)->block();
168 }
169
GetBlocksDominatedBy(const InstructionBlock * block)170 const BitVector* MidTierRegisterAllocationData::GetBlocksDominatedBy(
171 const InstructionBlock* block) {
172 return block_state(block->rpo_number()).dominated_blocks();
173 }
174
175 // RegisterIndex represents a particular register of a given kind (depending
176 // on the RegisterKind of the allocator).
177 class RegisterIndex final {
178 public:
RegisterIndex()179 RegisterIndex() : index_(kInvalidIndex) {}
RegisterIndex(int index)180 explicit RegisterIndex(int index) : index_(index) {}
Invalid()181 static RegisterIndex Invalid() { return RegisterIndex(); }
182
is_valid() const183 bool is_valid() const { return index_ != kInvalidIndex; }
184
ToInt() const185 int ToInt() const {
186 DCHECK(is_valid());
187 return index_;
188 }
189
ToBit(MachineRepresentation rep) const190 uintptr_t ToBit(MachineRepresentation rep) const {
191 if (kSimpleFPAliasing || rep != MachineRepresentation::kSimd128) {
192 return 1ull << ToInt();
193 } else {
194 DCHECK_EQ(rep, MachineRepresentation::kSimd128);
195 return 3ull << ToInt();
196 }
197 }
198
operator ==(const RegisterIndex & rhs) const199 bool operator==(const RegisterIndex& rhs) const {
200 return index_ == rhs.index_;
201 }
operator !=(const RegisterIndex & rhs) const202 bool operator!=(const RegisterIndex& rhs) const {
203 return index_ != rhs.index_;
204 }
205
206 class Iterator {
207 public:
Iterator(int index)208 explicit Iterator(int index) : index_(index) {}
209
operator !=(const Iterator & rhs) const210 bool operator!=(const Iterator& rhs) const { return index_ != rhs.index_; }
operator ++()211 void operator++() { index_++; }
operator *() const212 RegisterIndex operator*() const { return RegisterIndex(index_); }
213
214 private:
215 int index_;
216 };
217
218 private:
219 static const int kInvalidIndex = -1;
220 int8_t index_;
221 };
222
223 // A Range from [start, end] of instructions, inclusive of start and end.
224 class Range {
225 public:
Range()226 Range() : start_(kMaxInt), end_(0) {}
Range(int start,int end)227 Range(int start, int end) : start_(start), end_(end) {}
228
AddInstr(int index)229 void AddInstr(int index) {
230 start_ = std::min(start_, index);
231 end_ = std::max(end_, index);
232 }
233
AddRange(const Range & other)234 void AddRange(const Range& other) {
235 start_ = std::min(start_, other.start_);
236 end_ = std::max(end_, other.end_);
237 }
238
239 // Returns true if index is greater than start and less than or equal to end.
Contains(int index)240 bool Contains(int index) { return index >= start_ && index <= end_; }
241
start() const242 int start() const { return start_; }
end() const243 int end() const { return end_; }
244
245 private:
246 int start_;
247 int end_;
248 };
249
250 // Represents a connected region of deferred basic blocks.
251 class DeferredBlocksRegion final {
252 public:
DeferredBlocksRegion(Zone * zone,int number_of_blocks)253 explicit DeferredBlocksRegion(Zone* zone, int number_of_blocks)
254 : spilled_vregs_(zone), blocks_covered_(number_of_blocks, zone) {}
255
AddBlock(RpoNumber block,MidTierRegisterAllocationData * data)256 void AddBlock(RpoNumber block, MidTierRegisterAllocationData* data) {
257 DCHECK(data->GetBlock(block)->IsDeferred());
258 blocks_covered_.Add(block.ToInt());
259 data->block_state(block).set_deferred_blocks_region(this);
260 }
261
262 // Adds |vreg| to the list of variables to potentially defer their output to
263 // a spill slot until we enter this deferred block region.
DeferSpillOutputUntilEntry(int vreg)264 void DeferSpillOutputUntilEntry(int vreg) { spilled_vregs_.insert(vreg); }
265
begin() const266 ZoneSet<int>::iterator begin() const { return spilled_vregs_.begin(); }
end() const267 ZoneSet<int>::iterator end() const { return spilled_vregs_.end(); }
268
blocks_covered() const269 const BitVector* blocks_covered() const { return &blocks_covered_; }
270
271 private:
272 ZoneSet<int> spilled_vregs_;
273 BitVector blocks_covered_;
274 };
275
276 // VirtualRegisterData stores data specific to a particular virtual register,
277 // and tracks spilled operands for that virtual register.
278 class VirtualRegisterData final {
279 public:
280 VirtualRegisterData() = default;
281
282 // Define VirtualRegisterData with the type of output that produces this
283 // virtual register.
284 void DefineAsUnallocatedOperand(int virtual_register, int instr_index,
285 bool is_deferred_block,
286 bool is_exceptional_call_output);
287 void DefineAsFixedSpillOperand(AllocatedOperand* operand,
288 int virtual_register, int instr_index,
289 bool is_deferred_block,
290 bool is_exceptional_call_output);
291 void DefineAsConstantOperand(ConstantOperand* operand, int instr_index,
292 bool is_deferred_block);
293 void DefineAsPhi(int virtual_register, int instr_index,
294 bool is_deferred_block);
295
296 // Spill an operand that is assigned to this virtual register.
297 void SpillOperand(InstructionOperand* operand, int instr_index,
298 MidTierRegisterAllocationData* data);
299
300 // Emit gap moves to / from the spill slot.
301 void EmitGapMoveToInputFromSpillSlot(AllocatedOperand to_operand,
302 int instr_index,
303 MidTierRegisterAllocationData* data);
304 void EmitGapMoveFromOutputToSpillSlot(AllocatedOperand from_operand,
305 const InstructionBlock* current_block,
306 int instr_index,
307 MidTierRegisterAllocationData* data);
308 void EmitGapMoveToSpillSlot(AllocatedOperand from_operand, int instr_index,
309 MidTierRegisterAllocationData* data);
310
311 // Adds pending spills for deferred-blocks.
312 void AddDeferredSpillUse(int instr_index,
313 MidTierRegisterAllocationData* data);
314 void AddDeferredSpillOutput(AllocatedOperand allocated_op, int instr_index,
315 MidTierRegisterAllocationData* data);
316
317 // Accessors for spill operand, which may still be pending allocation.
HasSpillOperand() const318 bool HasSpillOperand() const { return spill_operand_ != nullptr; }
spill_operand() const319 InstructionOperand* spill_operand() const {
320 DCHECK(HasSpillOperand());
321 return spill_operand_;
322 }
323
HasPendingSpillOperand() const324 bool HasPendingSpillOperand() const {
325 return HasSpillOperand() && spill_operand_->IsPending();
326 }
HasAllocatedSpillOperand() const327 bool HasAllocatedSpillOperand() const {
328 return HasSpillOperand() && spill_operand_->IsAllocated();
329 }
HasConstantSpillOperand() const330 bool HasConstantSpillOperand() const {
331 DCHECK_EQ(is_constant(), HasSpillOperand() && spill_operand_->IsConstant());
332 return is_constant();
333 }
334
335 // Returns true if the virtual register should be spilled when it is output.
NeedsSpillAtOutput() const336 bool NeedsSpillAtOutput() const { return needs_spill_at_output_; }
MarkAsNeedsSpillAtOutput()337 void MarkAsNeedsSpillAtOutput() {
338 if (is_constant()) return;
339 needs_spill_at_output_ = true;
340 if (HasSpillRange()) spill_range()->ClearDeferredBlockSpills();
341 }
342
343 // Returns true if the virtual register should be spilled at entry to deferred
344 // blocks in which it is spilled (to avoid spilling on output on
345 // non-deferred blocks).
346 bool NeedsSpillAtDeferredBlocks() const;
347 void EmitDeferredSpillOutputs(MidTierRegisterAllocationData* data);
348
IsSpilledAt(int instr_index,MidTierRegisterAllocationData * data)349 bool IsSpilledAt(int instr_index, MidTierRegisterAllocationData* data) {
350 DCHECK_GE(instr_index, output_instr_index());
351 if (NeedsSpillAtOutput() || HasConstantSpillOperand()) return true;
352 if (HasSpillOperand() && data->GetBlock(instr_index)->IsDeferred()) {
353 return true;
354 }
355 return false;
356 }
357
358 // Allocates pending spill operands to the |allocated| spill slot.
359 void AllocatePendingSpillOperand(const AllocatedOperand& allocated);
360
vreg() const361 int vreg() const { return vreg_; }
output_instr_index() const362 int output_instr_index() const { return output_instr_index_; }
is_constant() const363 bool is_constant() const { return is_constant_; }
is_phi() const364 bool is_phi() const { return is_phi_; }
is_defined_in_deferred_block() const365 bool is_defined_in_deferred_block() const {
366 return is_defined_in_deferred_block_;
367 }
is_exceptional_call_output() const368 bool is_exceptional_call_output() const {
369 return is_exceptional_call_output_;
370 }
371
372 struct DeferredSpillSlotOutput {
373 public:
DeferredSpillSlotOutputv8::internal::compiler::VirtualRegisterData::DeferredSpillSlotOutput374 explicit DeferredSpillSlotOutput(int instr, AllocatedOperand op,
375 const BitVector* blocks)
376 : instr_index(instr), operand(op), live_blocks(blocks) {}
377
378 int instr_index;
379 AllocatedOperand operand;
380 const BitVector* live_blocks;
381 };
382
383 // Represents the range of instructions for which this virtual register needs
384 // to be spilled on the stack.
385 class SpillRange : public ZoneObject {
386 public:
387 // Defines a spill range for an output operand.
SpillRange(int definition_instr_index,const InstructionBlock * definition_block,MidTierRegisterAllocationData * data)388 SpillRange(int definition_instr_index,
389 const InstructionBlock* definition_block,
390 MidTierRegisterAllocationData* data)
391 : live_range_(definition_instr_index, definition_instr_index),
392 live_blocks_(data->GetBlocksDominatedBy(definition_block)),
393 deferred_spill_outputs_(nullptr) {}
394
395 // Defines a spill range for a Phi variable.
SpillRange(const InstructionBlock * phi_block,MidTierRegisterAllocationData * data)396 SpillRange(const InstructionBlock* phi_block,
397 MidTierRegisterAllocationData* data)
398 : live_range_(phi_block->first_instruction_index(),
399 phi_block->first_instruction_index()),
400 live_blocks_(data->GetBlocksDominatedBy(phi_block)),
401 deferred_spill_outputs_(nullptr) {
402 // For phis, add the gap move instructions in the predecssor blocks to
403 // the live range.
404 for (RpoNumber pred_rpo : phi_block->predecessors()) {
405 const InstructionBlock* block = data->GetBlock(pred_rpo);
406 live_range_.AddInstr(block->last_instruction_index());
407 }
408 }
409
410 SpillRange(const SpillRange&) = delete;
411 SpillRange& operator=(const SpillRange&) = delete;
412
IsLiveAt(int instr_index,InstructionBlock * block)413 bool IsLiveAt(int instr_index, InstructionBlock* block) {
414 if (!live_range_.Contains(instr_index)) return false;
415
416 int block_rpo = block->rpo_number().ToInt();
417 if (!live_blocks_->Contains(block_rpo)) return false;
418
419 if (!HasDeferredBlockSpills()) {
420 return true;
421 } else {
422 // If this spill range is only output for deferred block, then the spill
423 // slot will only be live for the deferred blocks, not all blocks that
424 // the virtual register is live.
425 for (auto deferred_spill_output : *deferred_spill_outputs()) {
426 if (deferred_spill_output.live_blocks->Contains(block_rpo)) {
427 return true;
428 }
429 }
430 return false;
431 }
432 }
433
ExtendRangeTo(int instr_index)434 void ExtendRangeTo(int instr_index) { live_range_.AddInstr(instr_index); }
435
AddDeferredSpillOutput(AllocatedOperand allocated_op,int instr_index,MidTierRegisterAllocationData * data)436 void AddDeferredSpillOutput(AllocatedOperand allocated_op, int instr_index,
437 MidTierRegisterAllocationData* data) {
438 if (deferred_spill_outputs_ == nullptr) {
439 Zone* zone = data->allocation_zone();
440 deferred_spill_outputs_ =
441 zone->New<ZoneVector<DeferredSpillSlotOutput>>(zone);
442 }
443 const InstructionBlock* block = data->GetBlock(instr_index);
444 DCHECK_EQ(block->first_instruction_index(), instr_index);
445 BlockState& block_state = data->block_state(block->rpo_number());
446 const BitVector* deferred_blocks =
447 block_state.deferred_blocks_region()->blocks_covered();
448 deferred_spill_outputs_->emplace_back(instr_index, allocated_op,
449 deferred_blocks);
450 }
451
ClearDeferredBlockSpills()452 void ClearDeferredBlockSpills() { deferred_spill_outputs_ = nullptr; }
HasDeferredBlockSpills() const453 bool HasDeferredBlockSpills() const {
454 return deferred_spill_outputs_ != nullptr;
455 }
deferred_spill_outputs() const456 const ZoneVector<DeferredSpillSlotOutput>* deferred_spill_outputs() const {
457 DCHECK(HasDeferredBlockSpills());
458 return deferred_spill_outputs_;
459 }
460
live_range()461 Range& live_range() { return live_range_; }
462
463 private:
464 Range live_range_;
465 const BitVector* live_blocks_;
466 ZoneVector<DeferredSpillSlotOutput>* deferred_spill_outputs_;
467 };
468
HasSpillRange() const469 bool HasSpillRange() const { return spill_range_ != nullptr; }
spill_range() const470 SpillRange* spill_range() const {
471 DCHECK(HasSpillRange());
472 return spill_range_;
473 }
474
475 private:
476 void Initialize(int virtual_register, InstructionOperand* spill_operand,
477 int instr_index, bool is_phi, bool is_constant,
478 bool is_defined_in_deferred_block,
479 bool is_exceptional_call_output);
480
481 void AddSpillUse(int instr_index, MidTierRegisterAllocationData* data);
482 void AddPendingSpillOperand(PendingOperand* pending_operand);
483 void EnsureSpillRange(MidTierRegisterAllocationData* data);
484 bool CouldSpillOnEntryToDeferred(const InstructionBlock* block);
485
486 InstructionOperand* spill_operand_;
487 SpillRange* spill_range_;
488 int output_instr_index_;
489
490 int vreg_;
491 bool is_phi_ : 1;
492 bool is_constant_ : 1;
493 bool is_defined_in_deferred_block_ : 1;
494 bool needs_spill_at_output_ : 1;
495 bool is_exceptional_call_output_ : 1;
496 };
497
VirtualRegisterDataFor(int virtual_register)498 VirtualRegisterData& MidTierRegisterAllocationData::VirtualRegisterDataFor(
499 int virtual_register) {
500 DCHECK_GE(virtual_register, 0);
501 DCHECK_LT(virtual_register, virtual_register_data_.size());
502 return virtual_register_data_[virtual_register];
503 }
504
Initialize(int virtual_register,InstructionOperand * spill_operand,int instr_index,bool is_phi,bool is_constant,bool is_defined_in_deferred_block,bool is_exceptional_call_output)505 void VirtualRegisterData::Initialize(int virtual_register,
506 InstructionOperand* spill_operand,
507 int instr_index, bool is_phi,
508 bool is_constant,
509 bool is_defined_in_deferred_block,
510 bool is_exceptional_call_output) {
511 vreg_ = virtual_register;
512 spill_operand_ = spill_operand;
513 spill_range_ = nullptr;
514 output_instr_index_ = instr_index;
515 is_phi_ = is_phi;
516 is_constant_ = is_constant;
517 is_defined_in_deferred_block_ = is_defined_in_deferred_block;
518 needs_spill_at_output_ = !is_constant_ && spill_operand_ != nullptr;
519 is_exceptional_call_output_ = is_exceptional_call_output;
520 }
521
DefineAsConstantOperand(ConstantOperand * operand,int instr_index,bool is_deferred_block)522 void VirtualRegisterData::DefineAsConstantOperand(ConstantOperand* operand,
523 int instr_index,
524 bool is_deferred_block) {
525 Initialize(operand->virtual_register(), operand, instr_index, false, true,
526 is_deferred_block, false);
527 }
528
DefineAsFixedSpillOperand(AllocatedOperand * operand,int virtual_register,int instr_index,bool is_deferred_block,bool is_exceptional_call_output)529 void VirtualRegisterData::DefineAsFixedSpillOperand(
530 AllocatedOperand* operand, int virtual_register, int instr_index,
531 bool is_deferred_block, bool is_exceptional_call_output) {
532 Initialize(virtual_register, operand, instr_index, false, false,
533 is_deferred_block, is_exceptional_call_output);
534 }
535
DefineAsUnallocatedOperand(int virtual_register,int instr_index,bool is_deferred_block,bool is_exceptional_call_output)536 void VirtualRegisterData::DefineAsUnallocatedOperand(
537 int virtual_register, int instr_index, bool is_deferred_block,
538 bool is_exceptional_call_output) {
539 Initialize(virtual_register, nullptr, instr_index, false, false,
540 is_deferred_block, is_exceptional_call_output);
541 }
542
DefineAsPhi(int virtual_register,int instr_index,bool is_deferred_block)543 void VirtualRegisterData::DefineAsPhi(int virtual_register, int instr_index,
544 bool is_deferred_block) {
545 Initialize(virtual_register, nullptr, instr_index, true, false,
546 is_deferred_block, false);
547 }
548
EnsureSpillRange(MidTierRegisterAllocationData * data)549 void VirtualRegisterData::EnsureSpillRange(
550 MidTierRegisterAllocationData* data) {
551 DCHECK(!is_constant());
552 if (HasSpillRange()) return;
553
554 const InstructionBlock* definition_block =
555 data->GetBlock(output_instr_index_);
556 if (is_phi()) {
557 // Define a spill slot that is defined for the phi's range.
558 spill_range_ =
559 data->allocation_zone()->New<SpillRange>(definition_block, data);
560 } else {
561 if (is_exceptional_call_output()) {
562 // If this virtual register is output by a call which has an exception
563 // catch handler, then the output will only be live in the IfSuccess
564 // successor block, not the IfException side, so make the definition block
565 // the IfSuccess successor block explicitly.
566 DCHECK_EQ(output_instr_index_,
567 definition_block->last_instruction_index() - 1);
568 DCHECK_EQ(definition_block->SuccessorCount(), 2);
569 DCHECK(data->GetBlock(definition_block->successors()[1])->IsHandler());
570 definition_block = data->GetBlock(definition_block->successors()[0]);
571 }
572 // The spill slot will be defined after the instruction that outputs it.
573 spill_range_ = data->allocation_zone()->New<SpillRange>(
574 output_instr_index_ + 1, definition_block, data);
575 }
576 data->spilled_virtual_registers().Add(vreg());
577 }
578
AddSpillUse(int instr_index,MidTierRegisterAllocationData * data)579 void VirtualRegisterData::AddSpillUse(int instr_index,
580 MidTierRegisterAllocationData* data) {
581 if (is_constant()) return;
582
583 EnsureSpillRange(data);
584 spill_range_->ExtendRangeTo(instr_index);
585
586 const InstructionBlock* block = data->GetBlock(instr_index);
587 if (CouldSpillOnEntryToDeferred(block)) {
588 data->block_state(block->rpo_number())
589 .deferred_blocks_region()
590 ->DeferSpillOutputUntilEntry(vreg());
591 } else {
592 MarkAsNeedsSpillAtOutput();
593 }
594 }
595
AddDeferredSpillUse(int instr_index,MidTierRegisterAllocationData * data)596 void VirtualRegisterData::AddDeferredSpillUse(
597 int instr_index, MidTierRegisterAllocationData* data) {
598 DCHECK(data->GetBlock(instr_index)->IsDeferred());
599 DCHECK(!is_defined_in_deferred_block());
600 AddSpillUse(instr_index, data);
601 }
602
CouldSpillOnEntryToDeferred(const InstructionBlock * block)603 bool VirtualRegisterData::CouldSpillOnEntryToDeferred(
604 const InstructionBlock* block) {
605 return !NeedsSpillAtOutput() && block->IsDeferred() &&
606 !is_defined_in_deferred_block() && !is_constant();
607 }
608
AddDeferredSpillOutput(AllocatedOperand allocated_op,int instr_index,MidTierRegisterAllocationData * data)609 void VirtualRegisterData::AddDeferredSpillOutput(
610 AllocatedOperand allocated_op, int instr_index,
611 MidTierRegisterAllocationData* data) {
612 DCHECK(!NeedsSpillAtOutput());
613 spill_range_->AddDeferredSpillOutput(allocated_op, instr_index, data);
614 }
615
SpillOperand(InstructionOperand * operand,int instr_index,MidTierRegisterAllocationData * data)616 void VirtualRegisterData::SpillOperand(InstructionOperand* operand,
617 int instr_index,
618 MidTierRegisterAllocationData* data) {
619 AddSpillUse(instr_index, data);
620 if (HasAllocatedSpillOperand() || HasConstantSpillOperand()) {
621 InstructionOperand::ReplaceWith(operand, spill_operand());
622 } else {
623 PendingOperand pending_op;
624 InstructionOperand::ReplaceWith(operand, &pending_op);
625 AddPendingSpillOperand(PendingOperand::cast(operand));
626 }
627 }
628
NeedsSpillAtDeferredBlocks() const629 bool VirtualRegisterData::NeedsSpillAtDeferredBlocks() const {
630 return HasSpillRange() && spill_range()->HasDeferredBlockSpills();
631 }
632
EmitDeferredSpillOutputs(MidTierRegisterAllocationData * data)633 void VirtualRegisterData::EmitDeferredSpillOutputs(
634 MidTierRegisterAllocationData* data) {
635 DCHECK(NeedsSpillAtDeferredBlocks());
636 for (auto deferred_spill : *spill_range()->deferred_spill_outputs()) {
637 EmitGapMoveToSpillSlot(deferred_spill.operand, deferred_spill.instr_index,
638 data);
639 }
640 }
641
EmitGapMoveToInputFromSpillSlot(AllocatedOperand to_operand,int instr_index,MidTierRegisterAllocationData * data)642 void VirtualRegisterData::EmitGapMoveToInputFromSpillSlot(
643 AllocatedOperand to_operand, int instr_index,
644 MidTierRegisterAllocationData* data) {
645 AddSpillUse(instr_index, data);
646 DCHECK(!to_operand.IsPending());
647 if (HasAllocatedSpillOperand() || HasConstantSpillOperand()) {
648 data->AddGapMove(instr_index, Instruction::END, *spill_operand(),
649 to_operand);
650 } else {
651 MoveOperands* move_ops =
652 data->AddPendingOperandGapMove(instr_index, Instruction::END);
653 AddPendingSpillOperand(PendingOperand::cast(&move_ops->source()));
654 InstructionOperand::ReplaceWith(&move_ops->destination(), &to_operand);
655 }
656 }
657
EmitGapMoveToSpillSlot(AllocatedOperand from_operand,int instr_index,MidTierRegisterAllocationData * data)658 void VirtualRegisterData::EmitGapMoveToSpillSlot(
659 AllocatedOperand from_operand, int instr_index,
660 MidTierRegisterAllocationData* data) {
661 AddSpillUse(instr_index, data);
662 if (HasAllocatedSpillOperand() || HasConstantSpillOperand()) {
663 data->AddGapMove(instr_index, Instruction::START, from_operand,
664 *spill_operand());
665 } else {
666 MoveOperands* move_ops =
667 data->AddPendingOperandGapMove(instr_index, Instruction::START);
668 InstructionOperand::ReplaceWith(&move_ops->source(), &from_operand);
669 AddPendingSpillOperand(PendingOperand::cast(&move_ops->destination()));
670 }
671 }
672
EmitGapMoveFromOutputToSpillSlot(AllocatedOperand from_operand,const InstructionBlock * current_block,int instr_index,MidTierRegisterAllocationData * data)673 void VirtualRegisterData::EmitGapMoveFromOutputToSpillSlot(
674 AllocatedOperand from_operand, const InstructionBlock* current_block,
675 int instr_index, MidTierRegisterAllocationData* data) {
676 DCHECK_EQ(data->GetBlock(instr_index), current_block);
677 if (instr_index == current_block->last_instruction_index()) {
678 // Add gap move to the first instruction of every successor block.
679 for (const RpoNumber& succ : current_block->successors()) {
680 const InstructionBlock* successor = data->GetBlock(succ);
681 DCHECK_EQ(1, successor->PredecessorCount());
682 EmitGapMoveToSpillSlot(from_operand, successor->first_instruction_index(),
683 data);
684 }
685 } else {
686 // Add gap move to the next instruction.
687 EmitGapMoveToSpillSlot(from_operand, instr_index + 1, data);
688 }
689 }
690
AddPendingSpillOperand(PendingOperand * pending_op)691 void VirtualRegisterData::AddPendingSpillOperand(PendingOperand* pending_op) {
692 DCHECK(HasSpillRange());
693 DCHECK_NULL(pending_op->next());
694 if (HasSpillOperand()) {
695 pending_op->set_next(PendingOperand::cast(spill_operand()));
696 }
697 spill_operand_ = pending_op;
698 }
699
AllocatePendingSpillOperand(const AllocatedOperand & allocated)700 void VirtualRegisterData::AllocatePendingSpillOperand(
701 const AllocatedOperand& allocated) {
702 DCHECK(!HasAllocatedSpillOperand() && !HasConstantSpillOperand());
703 PendingOperand* current = PendingOperand::cast(spill_operand_);
704 while (current) {
705 PendingOperand* next = current->next();
706 InstructionOperand::ReplaceWith(current, &allocated);
707 current = next;
708 }
709 }
710
711 // RegisterState represents the state of the |kind| registers at a particular
712 // point in program execution. The RegisterState can be cloned or merged with
713 // other RegisterStates to model branches and merges in program control flow.
714 class RegisterState final : public ZoneObject {
715 public:
New(RegisterKind kind,int num_allocatable_registers,Zone * zone)716 static RegisterState* New(RegisterKind kind, int num_allocatable_registers,
717 Zone* zone) {
718 return zone->New<RegisterState>(kind, num_allocatable_registers, zone);
719 }
720
721 RegisterState(RegisterKind kind, int num_allocatable_registers, Zone* zone);
722 RegisterState(const RegisterState& other) V8_NOEXCEPT;
723
724 bool IsAllocated(RegisterIndex reg);
725 bool IsShared(RegisterIndex reg);
726 int VirtualRegisterForRegister(RegisterIndex reg);
727
728 // Commit the |reg| with the |allocated| operand.
729 void Commit(RegisterIndex reg, AllocatedOperand allocated,
730 InstructionOperand* operand, MidTierRegisterAllocationData* data);
731
732 // Spill the contents of |reg| for an instruction in |current_block| using
733 // the |allocated| operand to commit the spill gap move.
734 void Spill(RegisterIndex reg, AllocatedOperand allocated,
735 const InstructionBlock* current_block,
736 MidTierRegisterAllocationData* data);
737
738 // Add a pending spill of the contents of |reg| at the exit point of a
739 // deferred block at |instr_index| using |allocated| operand to commit the
740 // spill gap move, if the register never gets spilled in a non-deferred block.
741 void SpillForDeferred(RegisterIndex reg, AllocatedOperand allocated,
742 int instr_index, MidTierRegisterAllocationData* data);
743
744 // Add a pending gap move from |reg| to |virtual_register|'s spill at the
745 // entry point of a deferred block at |instr_index|, if the |virtual_register|
746 // never spilled in a non-deferred block.
747 void MoveToSpillSlotOnDeferred(RegisterIndex reg, int virtual_register,
748 int instr_index,
749 MidTierRegisterAllocationData* data);
750
751 // Allocate |reg| to |virtual_register| for the instruction at |instr_index|.
752 // If the register is later spilled, a gap move will be added immediately
753 // before |instr_index| to move |virtual_register| into this register.
754 void AllocateUse(RegisterIndex reg, int virtual_register,
755 InstructionOperand* operand, int instr_index,
756 MidTierRegisterAllocationData* data);
757
758 // Allocate |reg| as a pending use of |virtual_register| for |operand| in the
759 // instruction at |instr_index|. If |virtual_register| later gets committed to
760 // this register, then |operand| will be too, otherwise |operand| will be
761 // replaced with |virtual_register|'s spill operand.
762 void AllocatePendingUse(RegisterIndex reg, int virtual_register,
763 InstructionOperand* operand, int instr_index);
764
765 // Mark that the register is holding a phi operand that is yet to be allocated
766 // by the source block in the gap just before the last instruction in the
767 // source block.
768 void UseForPhiGapMove(RegisterIndex reg);
769 bool IsPhiGapMove(RegisterIndex reg);
770
771 // Returns true if |reg| only has pending uses allocated to it.
772 bool HasPendingUsesOnly(RegisterIndex reg);
773
774 // Clone this RegisterState for a successor block.
775 RegisterState* Clone();
776
777 // Copy register details for |reg| from |source| to |this| RegisterState.
778 void CopyFrom(RegisterIndex reg, RegisterState* source);
779
780 // Returns true if the register details for |reg| are equal in |source| and
781 // |this| RegisterStates.
782 bool Equals(RegisterIndex reg, RegisterState* source);
783
784 // Signals that the registers in this state are going to be shared across
785 // |shared_use_count| blocks.
786 void AddSharedUses(int shared_use_count);
787
788 // When merging multiple block's RegisterState into the successor block with
789 // |this| RegisterState, commit |reg| as being merged from a given predecessor
790 // block.
791 void CommitAtMerge(RegisterIndex reg);
792
793 // Resets |reg| if it has register data that was shared with other basic
794 // blocks and was spilled in those blocks.
795 void ResetIfSpilledWhileShared(RegisterIndex reg);
796
797 // Enable range-based for on allocatable register indices.
begin() const798 RegisterIndex::Iterator begin() const { return RegisterIndex::Iterator(0); }
end() const799 RegisterIndex::Iterator end() const {
800 return RegisterIndex::Iterator(num_allocatable_registers());
801 }
802
803 private:
804 // Represents a particular register and details of what virtual_register it is
805 // currently holding, and how it should be updated if committed or spilled.
806 class Register final : public ZoneObject {
807 public:
808 Register();
809 void Reset();
810
811 // Operations for committing, spilling and allocating uses of the register.
812 void Commit(AllocatedOperand allocated_operand,
813 MidTierRegisterAllocationData* data);
814 void Spill(AllocatedOperand allocated_op,
815 const InstructionBlock* current_block,
816 MidTierRegisterAllocationData* data);
817 void Use(int virtual_register, int instr_index);
818 void PendingUse(InstructionOperand* operand, int virtual_register,
819 int instr_index);
820 void SpillForDeferred(AllocatedOperand allocated, int instr_index,
821 MidTierRegisterAllocationData* data);
822 void MoveToSpillSlotOnDeferred(int virtual_register, int instr_index,
823 MidTierRegisterAllocationData* data);
824
825 // Mark register as holding a phi.
826 void MarkAsPhiMove();
is_phi_gap_move() const827 bool is_phi_gap_move() const { return is_phi_gap_move_; }
828
829 // The register has deferred block spills, that will be emitted if the
830 // register is committed without having been spilled in a non-deferred block
831 void AddDeferredBlockSpill(int instr_index, bool on_exit, Zone* zone);
has_deferred_block_spills() const832 bool has_deferred_block_spills() const {
833 return deferred_block_spills_.has_value();
834 }
835
836 // Operations related to dealing with a Register that is shared across
837 // multiple basic blocks.
838 void CommitAtMerge();
839 void AddSharedUses(int shared_use_count);
is_shared() const840 bool is_shared() const { return is_shared_; }
was_spilled_while_shared() const841 bool was_spilled_while_shared() const {
842 return is_shared() && !is_allocated();
843 }
844
is_allocated() const845 bool is_allocated() const {
846 return virtual_register_ != InstructionOperand::kInvalidVirtualRegister;
847 }
848
849 // The current virtual register held by this register.
virtual_register() const850 int virtual_register() const { return virtual_register_; }
851
852 // The instruction index for the last use of the current in-progress
853 // allocation of this register in the instruction stream. Used both
854 // as the instruction too add a gap move if |needs_gap_move_on_spill| and
855 // the intruction which the virtual register's spill range should be
856 // extended too if the register is spilled.
last_use_instr_index() const857 int last_use_instr_index() const { return last_use_instr_index_; }
858
859 // Returns true if a gap move should be added if the register is spilled.
needs_gap_move_on_spill() const860 bool needs_gap_move_on_spill() const { return needs_gap_move_on_spill_; }
861
862 // Returns a threaded list of the operands that have pending uses of this
863 // register and will be resolved either to the register, or a spill slot
864 // depending on whether this register is spilled or committed.
pending_uses() const865 PendingOperand* pending_uses() const { return pending_uses_; }
866
867 private:
868 struct DeferredBlockSpill {
DeferredBlockSpillv8::internal::compiler::RegisterState::Register::DeferredBlockSpill869 DeferredBlockSpill(int instr, bool on_exit)
870 : instr_index(instr), on_deferred_exit(on_exit) {}
871
872 int instr_index;
873 bool on_deferred_exit;
874 };
875
876 void SpillPendingUses(MidTierRegisterAllocationData* data);
877 void SpillPhiGapMove(AllocatedOperand allocated_op,
878 const InstructionBlock* block,
879 MidTierRegisterAllocationData* data);
880
881 bool needs_gap_move_on_spill_;
882 bool is_shared_;
883 bool is_phi_gap_move_;
884 int last_use_instr_index_;
885
886 int num_commits_required_;
887 int virtual_register_;
888 PendingOperand* pending_uses_;
889 base::Optional<ZoneVector<DeferredBlockSpill>> deferred_block_spills_;
890 };
891
892 void ResetDataFor(RegisterIndex reg);
893
894 bool HasRegisterData(RegisterIndex reg);
895 void EnsureRegisterData(RegisterIndex reg);
896
num_allocatable_registers() const897 int num_allocatable_registers() const {
898 return static_cast<int>(register_data_.size());
899 }
900 Register& reg_data(RegisterIndex reg);
zone() const901 Zone* zone() const { return zone_; }
902
903 ZoneVector<Register*> register_data_;
904 Zone* zone_;
905 };
906
Register()907 RegisterState::Register::Register() { Reset(); }
908
Reset()909 void RegisterState::Register::Reset() {
910 is_shared_ = false;
911 is_phi_gap_move_ = false;
912 needs_gap_move_on_spill_ = false;
913 last_use_instr_index_ = -1;
914 num_commits_required_ = 0;
915 virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
916 pending_uses_ = nullptr;
917 deferred_block_spills_.reset();
918 }
919
Use(int virtual_register,int instr_index)920 void RegisterState::Register::Use(int virtual_register, int instr_index) {
921 // A register can have many pending uses, but should only ever have a single
922 // non-pending use, since any subsiquent use will commit the preceeding use
923 // first.
924 DCHECK(!is_allocated());
925 needs_gap_move_on_spill_ = true;
926 virtual_register_ = virtual_register;
927 last_use_instr_index_ = instr_index;
928 num_commits_required_ = 1;
929 }
930
PendingUse(InstructionOperand * operand,int virtual_register,int instr_index)931 void RegisterState::Register::PendingUse(InstructionOperand* operand,
932 int virtual_register,
933 int instr_index) {
934 if (!is_allocated()) {
935 virtual_register_ = virtual_register;
936 last_use_instr_index_ = instr_index;
937 num_commits_required_ = 1;
938 }
939 DCHECK_EQ(virtual_register_, virtual_register);
940
941 PendingOperand pending_op(pending_uses());
942 InstructionOperand::ReplaceWith(operand, &pending_op);
943 pending_uses_ = PendingOperand::cast(operand);
944 }
945
MarkAsPhiMove()946 void RegisterState::Register::MarkAsPhiMove() {
947 DCHECK(is_allocated());
948 is_phi_gap_move_ = true;
949 }
950
AddDeferredBlockSpill(int instr_index,bool on_exit,Zone * zone)951 void RegisterState::Register::AddDeferredBlockSpill(int instr_index,
952 bool on_exit, Zone* zone) {
953 DCHECK(is_allocated());
954 if (!deferred_block_spills_) {
955 deferred_block_spills_.emplace(zone);
956 }
957 deferred_block_spills_->emplace_back(instr_index, on_exit);
958 }
959
AddSharedUses(int shared_use_count)960 void RegisterState::Register::AddSharedUses(int shared_use_count) {
961 is_shared_ = true;
962 num_commits_required_ += shared_use_count;
963 }
964
CommitAtMerge()965 void RegisterState::Register::CommitAtMerge() {
966 DCHECK(is_shared());
967 DCHECK(is_allocated());
968 --num_commits_required_;
969 // We should still have commits required that will be resolved in the merge
970 // block.
971 DCHECK_GT(num_commits_required_, 0);
972 }
973
Commit(AllocatedOperand allocated_op,MidTierRegisterAllocationData * data)974 void RegisterState::Register::Commit(AllocatedOperand allocated_op,
975 MidTierRegisterAllocationData* data) {
976 DCHECK(is_allocated());
977 DCHECK_GT(num_commits_required_, 0);
978
979 if (--num_commits_required_ == 0) {
980 // Allocate all pending uses to |allocated_op| if this commit is non-shared,
981 // or if it is the final commit required on a register data shared across
982 // blocks.
983 PendingOperand* pending_use = pending_uses();
984 while (pending_use) {
985 PendingOperand* next = pending_use->next();
986 InstructionOperand::ReplaceWith(pending_use, &allocated_op);
987 pending_use = next;
988 }
989 pending_uses_ = nullptr;
990
991 VirtualRegisterData& vreg_data =
992 data->VirtualRegisterDataFor(virtual_register());
993
994 // If there are deferred block gap moves pending, emit them now that the
995 // register has been committed.
996 if (has_deferred_block_spills()) {
997 for (DeferredBlockSpill& spill : *deferred_block_spills_) {
998 if (spill.on_deferred_exit) {
999 vreg_data.EmitGapMoveToInputFromSpillSlot(allocated_op,
1000 spill.instr_index, data);
1001 } else if (!vreg_data.NeedsSpillAtOutput()) {
1002 vreg_data.AddDeferredSpillOutput(allocated_op, spill.instr_index,
1003 data);
1004 }
1005 }
1006 }
1007
1008 // If this register was used as a phi gap move, then it being commited
1009 // is the point at which we have output the Phi.
1010 if (is_phi_gap_move() && vreg_data.NeedsSpillAtDeferredBlocks()) {
1011 vreg_data.EmitDeferredSpillOutputs(data);
1012 }
1013 }
1014 DCHECK_IMPLIES(num_commits_required_ > 0, is_shared());
1015 }
1016
Spill(AllocatedOperand allocated_op,const InstructionBlock * current_block,MidTierRegisterAllocationData * data)1017 void RegisterState::Register::Spill(AllocatedOperand allocated_op,
1018 const InstructionBlock* current_block,
1019 MidTierRegisterAllocationData* data) {
1020 VirtualRegisterData& vreg_data =
1021 data->VirtualRegisterDataFor(virtual_register());
1022 SpillPendingUses(data);
1023 if (is_phi_gap_move()) {
1024 SpillPhiGapMove(allocated_op, current_block, data);
1025 }
1026 if (needs_gap_move_on_spill()) {
1027 vreg_data.EmitGapMoveToInputFromSpillSlot(allocated_op,
1028 last_use_instr_index(), data);
1029 }
1030 if (has_deferred_block_spills() || !current_block->IsDeferred()) {
1031 vreg_data.MarkAsNeedsSpillAtOutput();
1032 }
1033 virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
1034 }
1035
SpillPhiGapMove(AllocatedOperand allocated_op,const InstructionBlock * current_block,MidTierRegisterAllocationData * data)1036 void RegisterState::Register::SpillPhiGapMove(
1037 AllocatedOperand allocated_op, const InstructionBlock* current_block,
1038 MidTierRegisterAllocationData* data) {
1039 DCHECK_EQ(current_block->SuccessorCount(), 1);
1040 const InstructionBlock* phi_block =
1041 data->GetBlock(current_block->successors()[0]);
1042
1043 // Add gap moves to the spilled phi for all blocks we previously allocated
1044 // assuming the the phi was in a register.
1045 VirtualRegisterData& vreg_data =
1046 data->VirtualRegisterDataFor(virtual_register());
1047 for (RpoNumber predecessor : phi_block->predecessors()) {
1048 // If the predecessor has a lower rpo number than the current block, then
1049 // we have already processed it, so add the required gap move.
1050 if (predecessor > current_block->rpo_number()) {
1051 const InstructionBlock* predecessor_block = data->GetBlock(predecessor);
1052 vreg_data.EmitGapMoveToSpillSlot(
1053 allocated_op, predecessor_block->last_instruction_index(), data);
1054 }
1055 }
1056 }
1057
SpillPendingUses(MidTierRegisterAllocationData * data)1058 void RegisterState::Register::SpillPendingUses(
1059 MidTierRegisterAllocationData* data) {
1060 VirtualRegisterData& vreg_data =
1061 data->VirtualRegisterDataFor(virtual_register());
1062 PendingOperand* pending_use = pending_uses();
1063 while (pending_use) {
1064 // Spill all the pending operands associated with this register.
1065 PendingOperand* next = pending_use->next();
1066 vreg_data.SpillOperand(pending_use, last_use_instr_index(), data);
1067 pending_use = next;
1068 }
1069 pending_uses_ = nullptr;
1070 }
1071
SpillForDeferred(AllocatedOperand allocated,int instr_index,MidTierRegisterAllocationData * data)1072 void RegisterState::Register::SpillForDeferred(
1073 AllocatedOperand allocated, int instr_index,
1074 MidTierRegisterAllocationData* data) {
1075 DCHECK(is_allocated());
1076 DCHECK(is_shared());
1077 // Add a pending deferred spill, then commit the register (with the commit
1078 // being fullfilled by the deferred spill if the register is fully commited).
1079 data->VirtualRegisterDataFor(virtual_register())
1080 .AddDeferredSpillUse(instr_index, data);
1081 AddDeferredBlockSpill(instr_index, true, data->allocation_zone());
1082 Commit(allocated, data);
1083 }
1084
MoveToSpillSlotOnDeferred(int virtual_register,int instr_index,MidTierRegisterAllocationData * data)1085 void RegisterState::Register::MoveToSpillSlotOnDeferred(
1086 int virtual_register, int instr_index,
1087 MidTierRegisterAllocationData* data) {
1088 if (!is_allocated()) {
1089 virtual_register_ = virtual_register;
1090 last_use_instr_index_ = instr_index;
1091 num_commits_required_ = 1;
1092 }
1093 AddDeferredBlockSpill(instr_index, false, data->allocation_zone());
1094 }
1095
RegisterState(RegisterKind kind,int num_allocatable_registers,Zone * zone)1096 RegisterState::RegisterState(RegisterKind kind, int num_allocatable_registers,
1097 Zone* zone)
1098 : register_data_(num_allocatable_registers, zone), zone_(zone) {}
1099
RegisterState(const RegisterState & other)1100 RegisterState::RegisterState(const RegisterState& other) V8_NOEXCEPT
1101 : register_data_(other.register_data_.begin(), other.register_data_.end(),
1102 other.zone_),
1103 zone_(other.zone_) {}
1104
VirtualRegisterForRegister(RegisterIndex reg)1105 int RegisterState::VirtualRegisterForRegister(RegisterIndex reg) {
1106 if (IsAllocated(reg)) {
1107 return reg_data(reg).virtual_register();
1108 } else {
1109 return InstructionOperand::kInvalidVirtualRegister;
1110 }
1111 }
1112
IsPhiGapMove(RegisterIndex reg)1113 bool RegisterState::IsPhiGapMove(RegisterIndex reg) {
1114 DCHECK(IsAllocated(reg));
1115 return reg_data(reg).is_phi_gap_move();
1116 }
1117
Commit(RegisterIndex reg,AllocatedOperand allocated,InstructionOperand * operand,MidTierRegisterAllocationData * data)1118 void RegisterState::Commit(RegisterIndex reg, AllocatedOperand allocated,
1119 InstructionOperand* operand,
1120 MidTierRegisterAllocationData* data) {
1121 InstructionOperand::ReplaceWith(operand, &allocated);
1122 if (IsAllocated(reg)) {
1123 reg_data(reg).Commit(allocated, data);
1124 ResetDataFor(reg);
1125 }
1126 }
1127
Spill(RegisterIndex reg,AllocatedOperand allocated,const InstructionBlock * current_block,MidTierRegisterAllocationData * data)1128 void RegisterState::Spill(RegisterIndex reg, AllocatedOperand allocated,
1129 const InstructionBlock* current_block,
1130 MidTierRegisterAllocationData* data) {
1131 DCHECK(IsAllocated(reg));
1132 reg_data(reg).Spill(allocated, current_block, data);
1133 ResetDataFor(reg);
1134 }
1135
SpillForDeferred(RegisterIndex reg,AllocatedOperand allocated,int instr_index,MidTierRegisterAllocationData * data)1136 void RegisterState::SpillForDeferred(RegisterIndex reg,
1137 AllocatedOperand allocated,
1138 int instr_index,
1139 MidTierRegisterAllocationData* data) {
1140 DCHECK(IsAllocated(reg));
1141 reg_data(reg).SpillForDeferred(allocated, instr_index, data);
1142 ResetDataFor(reg);
1143 }
1144
MoveToSpillSlotOnDeferred(RegisterIndex reg,int virtual_register,int instr_index,MidTierRegisterAllocationData * data)1145 void RegisterState::MoveToSpillSlotOnDeferred(
1146 RegisterIndex reg, int virtual_register, int instr_index,
1147 MidTierRegisterAllocationData* data) {
1148 EnsureRegisterData(reg);
1149 reg_data(reg).MoveToSpillSlotOnDeferred(virtual_register, instr_index, data);
1150 }
1151
AllocateUse(RegisterIndex reg,int virtual_register,InstructionOperand * operand,int instr_index,MidTierRegisterAllocationData * data)1152 void RegisterState::AllocateUse(RegisterIndex reg, int virtual_register,
1153 InstructionOperand* operand, int instr_index,
1154 MidTierRegisterAllocationData* data) {
1155 EnsureRegisterData(reg);
1156 reg_data(reg).Use(virtual_register, instr_index);
1157 }
1158
AllocatePendingUse(RegisterIndex reg,int virtual_register,InstructionOperand * operand,int instr_index)1159 void RegisterState::AllocatePendingUse(RegisterIndex reg, int virtual_register,
1160 InstructionOperand* operand,
1161 int instr_index) {
1162 EnsureRegisterData(reg);
1163 reg_data(reg).PendingUse(operand, virtual_register, instr_index);
1164 }
1165
UseForPhiGapMove(RegisterIndex reg)1166 void RegisterState::UseForPhiGapMove(RegisterIndex reg) {
1167 DCHECK(IsAllocated(reg));
1168 reg_data(reg).MarkAsPhiMove();
1169 }
1170
reg_data(RegisterIndex reg)1171 RegisterState::Register& RegisterState::reg_data(RegisterIndex reg) {
1172 DCHECK(HasRegisterData(reg));
1173 return *register_data_[reg.ToInt()];
1174 }
1175
IsShared(RegisterIndex reg)1176 bool RegisterState::IsShared(RegisterIndex reg) {
1177 return HasRegisterData(reg) && reg_data(reg).is_shared();
1178 }
1179
IsAllocated(RegisterIndex reg)1180 bool RegisterState::IsAllocated(RegisterIndex reg) {
1181 return HasRegisterData(reg) && reg_data(reg).is_allocated();
1182 }
1183
HasPendingUsesOnly(RegisterIndex reg)1184 bool RegisterState::HasPendingUsesOnly(RegisterIndex reg) {
1185 DCHECK(IsAllocated(reg));
1186 return !reg_data(reg).needs_gap_move_on_spill();
1187 }
1188
ResetDataFor(RegisterIndex reg)1189 void RegisterState::ResetDataFor(RegisterIndex reg) {
1190 DCHECK(HasRegisterData(reg));
1191 if (reg_data(reg).is_shared()) {
1192 register_data_[reg.ToInt()] = nullptr;
1193 } else {
1194 reg_data(reg).Reset();
1195 }
1196 }
1197
HasRegisterData(RegisterIndex reg)1198 bool RegisterState::HasRegisterData(RegisterIndex reg) {
1199 DCHECK_LT(reg.ToInt(), register_data_.size());
1200 return register_data_[reg.ToInt()] != nullptr;
1201 }
1202
EnsureRegisterData(RegisterIndex reg)1203 void RegisterState::EnsureRegisterData(RegisterIndex reg) {
1204 if (!HasRegisterData(reg)) {
1205 register_data_[reg.ToInt()] = zone()->New<RegisterState::Register>();
1206 }
1207 }
1208
ResetIfSpilledWhileShared(RegisterIndex reg)1209 void RegisterState::ResetIfSpilledWhileShared(RegisterIndex reg) {
1210 if (HasRegisterData(reg) && reg_data(reg).was_spilled_while_shared()) {
1211 ResetDataFor(reg);
1212 }
1213 }
1214
CommitAtMerge(RegisterIndex reg)1215 void RegisterState::CommitAtMerge(RegisterIndex reg) {
1216 DCHECK(IsAllocated(reg));
1217 reg_data(reg).CommitAtMerge();
1218 }
1219
CopyFrom(RegisterIndex reg,RegisterState * source)1220 void RegisterState::CopyFrom(RegisterIndex reg, RegisterState* source) {
1221 register_data_[reg.ToInt()] = source->register_data_[reg.ToInt()];
1222 }
1223
Equals(RegisterIndex reg,RegisterState * other)1224 bool RegisterState::Equals(RegisterIndex reg, RegisterState* other) {
1225 return register_data_[reg.ToInt()] == other->register_data_[reg.ToInt()];
1226 }
1227
AddSharedUses(int shared_use_count)1228 void RegisterState::AddSharedUses(int shared_use_count) {
1229 for (RegisterIndex reg : *this) {
1230 if (HasRegisterData(reg)) {
1231 reg_data(reg).AddSharedUses(shared_use_count);
1232 }
1233 }
1234 }
1235
Clone()1236 RegisterState* RegisterState::Clone() {
1237 return zone_->New<RegisterState>(*this);
1238 }
1239
1240 class RegisterBitVector {
1241 public:
RegisterBitVector()1242 RegisterBitVector() : bits_(0) {}
1243
Contains(RegisterIndex reg,MachineRepresentation rep) const1244 bool Contains(RegisterIndex reg, MachineRepresentation rep) const {
1245 return bits_ & reg.ToBit(rep);
1246 }
1247
GetFirstSet() const1248 RegisterIndex GetFirstSet() const {
1249 return RegisterIndex(base::bits::CountTrailingZeros(bits_));
1250 }
1251
GetFirstCleared(int max_reg) const1252 RegisterIndex GetFirstCleared(int max_reg) const {
1253 int reg_index = base::bits::CountTrailingZeros(~bits_);
1254 if (reg_index < max_reg) {
1255 return RegisterIndex(reg_index);
1256 } else {
1257 return RegisterIndex::Invalid();
1258 }
1259 }
1260
Add(RegisterIndex reg,MachineRepresentation rep)1261 void Add(RegisterIndex reg, MachineRepresentation rep) {
1262 bits_ |= reg.ToBit(rep);
1263 }
1264
Clear(RegisterIndex reg,MachineRepresentation rep)1265 void Clear(RegisterIndex reg, MachineRepresentation rep) {
1266 bits_ &= ~reg.ToBit(rep);
1267 }
1268
Union(const RegisterBitVector & other)1269 RegisterBitVector Union(const RegisterBitVector& other) {
1270 return RegisterBitVector(bits_ | other.bits_);
1271 }
1272
Reset()1273 void Reset() { bits_ = 0; }
IsEmpty() const1274 bool IsEmpty() const { return bits_ == 0; }
1275
1276 private:
RegisterBitVector(uintptr_t bits)1277 explicit RegisterBitVector(uintptr_t bits) : bits_(bits) {}
1278
1279 static_assert(RegisterConfiguration::kMaxRegisters <= sizeof(uintptr_t) * 8,
1280 "Maximum registers must fit in uintptr_t bitmap");
1281 uintptr_t bits_;
1282 };
1283
1284 // A SinglePassRegisterAllocator is a fast register allocator that does a single
1285 // pass through the instruction stream without performing any live-range
1286 // analysis beforehand. It deals with a single RegisterKind, either general or
1287 // double registers, with the MidTierRegisterAllocator choosing the correct
1288 // SinglePassRegisterAllocator based on a values representation.
1289 class SinglePassRegisterAllocator final {
1290 public:
1291 SinglePassRegisterAllocator(RegisterKind kind,
1292 MidTierRegisterAllocationData* data);
1293
1294 // Convert to / from a register code and a register index.
1295 RegisterIndex FromRegCode(int reg_code, MachineRepresentation rep) const;
1296 int ToRegCode(RegisterIndex index, MachineRepresentation rep) const;
1297
1298 // Allocation routines used to allocate a particular operand to either a
1299 // register or a spill slot.
1300 void AllocateConstantOutput(ConstantOperand* operand);
1301 void AllocateOutput(UnallocatedOperand* operand, int instr_index);
1302 void AllocateInput(UnallocatedOperand* operand, int instr_index);
1303 void AllocateSameInputOutput(UnallocatedOperand* output,
1304 UnallocatedOperand* input, int instr_index);
1305 void AllocateGapMoveInput(UnallocatedOperand* operand, int instr_index);
1306 void AllocateTemp(UnallocatedOperand* operand, int instr_index);
1307 void AllocatePhi(int virtual_register, const InstructionBlock* block);
1308 void AllocatePhiGapMove(int to_vreg, int from_vreg, int instr_index);
1309
1310 // Reserve any fixed registers for the operands on an instruction before doing
1311 // allocation on the operands.
1312 void ReserveFixedInputRegister(const UnallocatedOperand* operand,
1313 int instr_index);
1314 void ReserveFixedTempRegister(const UnallocatedOperand* operand,
1315 int instr_index);
1316 void ReserveFixedOutputRegister(const UnallocatedOperand* operand,
1317 int instr_index);
1318
1319 // Spills all registers that are currently holding data, for example, due to
1320 // an instruction that clobbers all registers.
1321 void SpillAllRegisters();
1322
1323 // Inform the allocator that we are starting / ending a block or ending
1324 // allocation for the current instruction.
1325 void StartBlock(const InstructionBlock* block);
1326 void EndBlock(const InstructionBlock* block);
1327 void EndInstruction();
1328
1329 void UpdateForDeferredBlock(int instr_index);
1330 void AllocateDeferredBlockSpillOutput(int instr_index,
1331 RpoNumber deferred_block,
1332 int virtual_register);
1333
kind() const1334 RegisterKind kind() const { return kind_; }
assigned_registers() const1335 BitVector* assigned_registers() const { return assigned_registers_; }
1336
1337 private:
1338 enum class UsePosition {
1339 // Operand used at start of instruction.
1340 kStart,
1341 // Operand used at end of instruction.
1342 kEnd,
1343 // Operand is used at both the start and end of instruction.
1344 kAll,
1345 // Operand is not used in the instruction (used when initializing register
1346 // state on block entry).
1347 kNone,
1348 };
1349
1350 // The allocator is initialized without any RegisterState by default to avoid
1351 // having to allocate per-block allocator state for functions that don't
1352 // allocate registers of a particular type. All allocation functions should
1353 // call EnsureRegisterState to allocate a RegisterState if necessary.
1354 void EnsureRegisterState();
1355
1356 // Clone the register state from |successor| into the current register state.
1357 void CloneStateFrom(RpoNumber successor);
1358
1359 // Merge the register state of |successors| into the current register state.
1360 void MergeStateFrom(const InstructionBlock::Successors& successors);
1361
1362 // Spill a register in a previously processed successor block when merging
1363 // state into the current block.
1364 void SpillRegisterAtMerge(RegisterState* reg_state, RegisterIndex reg);
1365
1366 // Introduce a gap move to move |virtual_register| from reg |from| to reg |to|
1367 // on entry to a |successor| block.
1368 void MoveRegisterOnMerge(RegisterIndex from, RegisterIndex to,
1369 int virtual_register, RpoNumber successor,
1370 RegisterState* succ_state);
1371
1372 // Update the virtual register data with the data in register_state()
1373 void UpdateVirtualRegisterState();
1374
1375 // Returns true if |virtual_register| is defined after use position |pos| at
1376 // |instr_index|.
1377 bool DefinedAfter(int virtual_register, int instr_index, UsePosition pos);
1378
1379 // Allocate |reg| to |virtual_register| for |operand| of the instruction at
1380 // |instr_index|. The register will be reserved for this use for the specified
1381 // |pos| use position.
1382 void AllocateUse(RegisterIndex reg, int virtual_register,
1383 InstructionOperand* operand, int instr_index,
1384 UsePosition pos);
1385
1386 // Allocate |reg| to |virtual_register| as a pending use (i.e., only if the
1387 // register is not subsequently spilled) for |operand| of the instruction at
1388 // |instr_index|.
1389 void AllocatePendingUse(RegisterIndex reg, int virtual_register,
1390 InstructionOperand* operand, int instr_index);
1391
1392 // Allocate |operand| to |reg| and add a gap move to move |virtual_register|
1393 // to this register for the instruction at |instr_index|. |reg| will be
1394 // reserved for this use for the specified |pos| use position.
1395 void AllocateUseWithMove(RegisterIndex reg, int virtual_register,
1396 UnallocatedOperand* operand, int instr_index,
1397 UsePosition pos);
1398
1399 void CommitRegister(RegisterIndex reg, int virtual_register,
1400 InstructionOperand* operand, UsePosition pos);
1401 void SpillRegister(RegisterIndex reg);
1402 void SpillRegisterForVirtualRegister(int virtual_register);
1403
1404 // Pre-emptively spill the register at the exit of deferred blocks such that
1405 // uses of this register in non-deferred blocks don't need to be spilled.
1406 void SpillRegisterForDeferred(RegisterIndex reg, int instr_index);
1407
1408 // Returns an AllocatedOperand corresponding to the use of |reg| for
1409 // |virtual_register|.
1410 AllocatedOperand AllocatedOperandForReg(RegisterIndex reg,
1411 int virtual_register);
1412
1413 void ReserveFixedRegister(const UnallocatedOperand* operand, int instr_index,
1414 UsePosition pos);
1415 RegisterIndex AllocateOutput(UnallocatedOperand* operand, int instr_index,
1416 UsePosition pos);
1417 void EmitGapMoveFromOutput(InstructionOperand from, InstructionOperand to,
1418 int instr_index);
1419
1420 // Helper functions to choose the best register for a given operand.
1421 V8_INLINE RegisterIndex
1422 ChooseRegisterFor(VirtualRegisterData& virtual_register, int instr_index,
1423 UsePosition pos, bool must_use_register);
1424 V8_INLINE RegisterIndex ChooseRegisterFor(MachineRepresentation rep,
1425 UsePosition pos,
1426 bool must_use_register);
1427 V8_INLINE RegisterIndex ChooseFreeRegister(MachineRepresentation rep,
1428 UsePosition pos);
1429 V8_INLINE RegisterIndex ChooseFreeRegister(
1430 const RegisterBitVector& allocated_regs, MachineRepresentation rep);
1431 V8_INLINE RegisterIndex ChooseRegisterToSpill(MachineRepresentation rep,
1432 UsePosition pos);
1433
1434 // Assign, free and mark use's of |reg| for a |virtual_register| at use
1435 // position |pos|.
1436 V8_INLINE void AssignRegister(RegisterIndex reg, int virtual_register,
1437 UsePosition pos);
1438 V8_INLINE void FreeRegister(RegisterIndex reg, int virtual_register);
1439 V8_INLINE void MarkRegisterUse(RegisterIndex reg, MachineRepresentation rep,
1440 UsePosition pos);
1441 V8_INLINE RegisterBitVector InUseBitmap(UsePosition pos);
1442 V8_INLINE bool IsValidForRep(RegisterIndex reg, MachineRepresentation rep);
1443
1444 // Return the register allocated to |virtual_register|, if any.
1445 RegisterIndex RegisterForVirtualRegister(int virtual_register);
1446 // Return the virtual register being held by |reg|, or kInvalidVirtualRegister
1447 // if |reg| is unallocated.
1448 int VirtualRegisterForRegister(RegisterIndex reg);
1449
1450 // Returns true if |reg| is unallocated or holds |virtual_register|.
1451 bool IsFreeOrSameVirtualRegister(RegisterIndex reg, int virtual_register);
1452 // Returns true if |virtual_register| is unallocated or is allocated to |reg|.
1453 bool VirtualRegisterIsUnallocatedOrInReg(int virtual_register,
1454 RegisterIndex reg);
1455
1456 // Returns a RegisterBitVector representing the allocated registers in
1457 // reg_state.
1458 RegisterBitVector GetAllocatedRegBitVector(RegisterState* reg_state);
1459
1460 // Check the consistency of reg->vreg and vreg->reg mappings if a debug build.
1461 void CheckConsistency();
1462
HasRegisterState() const1463 bool HasRegisterState() const { return register_state_; }
register_state() const1464 RegisterState* register_state() const {
1465 DCHECK(HasRegisterState());
1466 return register_state_;
1467 }
1468
VirtualRegisterDataFor(int virtual_register) const1469 VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const {
1470 return data()->VirtualRegisterDataFor(virtual_register);
1471 }
1472
RepresentationFor(int virtual_register) const1473 MachineRepresentation RepresentationFor(int virtual_register) const {
1474 return data()->RepresentationFor(virtual_register);
1475 }
1476
num_allocatable_registers() const1477 int num_allocatable_registers() const { return num_allocatable_registers_; }
current_block() const1478 const InstructionBlock* current_block() const { return current_block_; }
data() const1479 MidTierRegisterAllocationData* data() const { return data_; }
1480
1481 // Virtual register to register mapping.
1482 ZoneVector<RegisterIndex> virtual_register_to_reg_;
1483
1484 // Current register state during allocation.
1485 RegisterState* register_state_;
1486
1487 // The current block being processed.
1488 const InstructionBlock* current_block_;
1489
1490 const RegisterKind kind_;
1491 const int num_allocatable_registers_;
1492 ZoneVector<RegisterIndex> reg_code_to_index_;
1493 const int* index_to_reg_code_;
1494 BitVector* assigned_registers_;
1495
1496 MidTierRegisterAllocationData* data_;
1497
1498 RegisterBitVector in_use_at_instr_start_bits_;
1499 RegisterBitVector in_use_at_instr_end_bits_;
1500 RegisterBitVector allocated_registers_bits_;
1501
1502 // These fields are only used when kSimpleFPAliasing == false.
1503 base::Optional<ZoneVector<RegisterIndex>> float32_reg_code_to_index_;
1504 base::Optional<ZoneVector<int>> index_to_float32_reg_code_;
1505 base::Optional<ZoneVector<RegisterIndex>> simd128_reg_code_to_index_;
1506 base::Optional<ZoneVector<int>> index_to_simd128_reg_code_;
1507 };
1508
SinglePassRegisterAllocator(RegisterKind kind,MidTierRegisterAllocationData * data)1509 SinglePassRegisterAllocator::SinglePassRegisterAllocator(
1510 RegisterKind kind, MidTierRegisterAllocationData* data)
1511 : virtual_register_to_reg_(data->code()->VirtualRegisterCount(),
1512 data->allocation_zone()),
1513 register_state_(nullptr),
1514 current_block_(nullptr),
1515 kind_(kind),
1516 num_allocatable_registers_(
1517 GetAllocatableRegisterCount(data->config(), kind)),
1518 reg_code_to_index_(GetRegisterCount(data->config(), kind),
1519 data->allocation_zone()),
1520 index_to_reg_code_(GetAllocatableRegisterCodes(data->config(), kind)),
1521 assigned_registers_(data->code_zone()->New<BitVector>(
1522 GetRegisterCount(data->config(), kind), data->code_zone())),
1523 data_(data),
1524 in_use_at_instr_start_bits_(),
1525 in_use_at_instr_end_bits_(),
1526 allocated_registers_bits_() {
1527 for (int i = 0; i < num_allocatable_registers_; i++) {
1528 int reg_code = index_to_reg_code_[i];
1529 reg_code_to_index_[reg_code] = RegisterIndex(i);
1530 }
1531
1532 // If the architecture has non-simple FP aliasing, initialize float and
1533 // simd128 specific register details.
1534 if (!kSimpleFPAliasing && kind == RegisterKind::kDouble) {
1535 const RegisterConfiguration* config = data->config();
1536
1537 // Float registers.
1538 float32_reg_code_to_index_.emplace(config->num_float_registers(),
1539 data->allocation_zone());
1540 index_to_float32_reg_code_.emplace(num_allocatable_registers_, -1,
1541 data->allocation_zone());
1542 for (int i = 0; i < config->num_allocatable_float_registers(); i++) {
1543 int reg_code = config->allocatable_float_codes()[i];
1544 // Only add even float register codes to avoid overlapping multiple float
1545 // registers on each RegisterIndex.
1546 if (reg_code % 2 != 0) continue;
1547 int double_reg_base_code;
1548 CHECK_EQ(1, config->GetAliases(MachineRepresentation::kFloat32, reg_code,
1549 MachineRepresentation::kFloat64,
1550 &double_reg_base_code));
1551 RegisterIndex double_reg(reg_code_to_index_[double_reg_base_code]);
1552 float32_reg_code_to_index_->at(reg_code) = double_reg;
1553 index_to_float32_reg_code_->at(double_reg.ToInt()) = reg_code;
1554 }
1555
1556 // Simd128 registers.
1557 simd128_reg_code_to_index_.emplace(config->num_simd128_registers(),
1558 data->allocation_zone());
1559 index_to_simd128_reg_code_.emplace(num_allocatable_registers_, -1,
1560 data->allocation_zone());
1561 for (int i = 0; i < config->num_allocatable_simd128_registers(); i++) {
1562 int reg_code = config->allocatable_simd128_codes()[i];
1563 int double_reg_base_code;
1564 CHECK_EQ(2, config->GetAliases(MachineRepresentation::kSimd128, reg_code,
1565 MachineRepresentation::kFloat64,
1566 &double_reg_base_code));
1567 RegisterIndex double_reg(reg_code_to_index_[double_reg_base_code]);
1568 simd128_reg_code_to_index_->at(reg_code) = double_reg;
1569 index_to_simd128_reg_code_->at(double_reg.ToInt()) = reg_code;
1570 }
1571 }
1572 }
1573
VirtualRegisterForRegister(RegisterIndex reg)1574 int SinglePassRegisterAllocator::VirtualRegisterForRegister(RegisterIndex reg) {
1575 return register_state()->VirtualRegisterForRegister(reg);
1576 }
1577
RegisterForVirtualRegister(int virtual_register)1578 RegisterIndex SinglePassRegisterAllocator::RegisterForVirtualRegister(
1579 int virtual_register) {
1580 DCHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister);
1581 return virtual_register_to_reg_[virtual_register];
1582 }
1583
UpdateForDeferredBlock(int instr_index)1584 void SinglePassRegisterAllocator::UpdateForDeferredBlock(int instr_index) {
1585 if (!HasRegisterState()) return;
1586 for (RegisterIndex reg : *register_state()) {
1587 SpillRegisterForDeferred(reg, instr_index);
1588 }
1589 }
1590
EndInstruction()1591 void SinglePassRegisterAllocator::EndInstruction() {
1592 in_use_at_instr_end_bits_.Reset();
1593 in_use_at_instr_start_bits_.Reset();
1594 }
1595
StartBlock(const InstructionBlock * block)1596 void SinglePassRegisterAllocator::StartBlock(const InstructionBlock* block) {
1597 DCHECK(!HasRegisterState());
1598 DCHECK_NULL(current_block_);
1599 DCHECK(in_use_at_instr_start_bits_.IsEmpty());
1600 DCHECK(in_use_at_instr_end_bits_.IsEmpty());
1601 DCHECK(allocated_registers_bits_.IsEmpty());
1602
1603 // Update the current block we are processing.
1604 current_block_ = block;
1605
1606 if (block->SuccessorCount() == 1) {
1607 // If we have only a single successor, we can directly clone our state
1608 // from that successor.
1609 CloneStateFrom(block->successors()[0]);
1610 } else if (block->SuccessorCount() > 1) {
1611 // If we have multiple successors, merge the state from all the successors
1612 // into our block.
1613 MergeStateFrom(block->successors());
1614 }
1615 }
1616
EndBlock(const InstructionBlock * block)1617 void SinglePassRegisterAllocator::EndBlock(const InstructionBlock* block) {
1618 DCHECK(in_use_at_instr_start_bits_.IsEmpty());
1619 DCHECK(in_use_at_instr_end_bits_.IsEmpty());
1620
1621 // If we didn't allocate any registers of this kind, or we have reached the
1622 // start, nothing to do here.
1623 if (!HasRegisterState() || block->PredecessorCount() == 0) {
1624 current_block_ = nullptr;
1625 return;
1626 }
1627
1628 if (block->PredecessorCount() > 1) {
1629 register_state()->AddSharedUses(
1630 static_cast<int>(block->PredecessorCount()) - 1);
1631 }
1632
1633 BlockState& block_state = data()->block_state(block->rpo_number());
1634 block_state.set_register_in_state(register_state(), kind());
1635
1636 // Remove virtual register to register mappings and clear register state.
1637 // We will update the register state when starting the next block.
1638 while (!allocated_registers_bits_.IsEmpty()) {
1639 RegisterIndex reg = allocated_registers_bits_.GetFirstSet();
1640 FreeRegister(reg, VirtualRegisterForRegister(reg));
1641 }
1642 current_block_ = nullptr;
1643 register_state_ = nullptr;
1644 }
1645
CloneStateFrom(RpoNumber successor)1646 void SinglePassRegisterAllocator::CloneStateFrom(RpoNumber successor) {
1647 BlockState& block_state = data()->block_state(successor);
1648 RegisterState* successor_registers = block_state.register_in_state(kind());
1649 if (successor_registers != nullptr) {
1650 if (data()->GetBlock(successor)->PredecessorCount() == 1) {
1651 // Avoids cloning for successors where we are the only predecessor.
1652 register_state_ = successor_registers;
1653 } else {
1654 register_state_ = successor_registers->Clone();
1655 }
1656 UpdateVirtualRegisterState();
1657 }
1658 }
1659
MergeStateFrom(const InstructionBlock::Successors & successors)1660 void SinglePassRegisterAllocator::MergeStateFrom(
1661 const InstructionBlock::Successors& successors) {
1662 for (RpoNumber successor : successors) {
1663 BlockState& block_state = data()->block_state(successor);
1664 RegisterState* successor_registers = block_state.register_in_state(kind());
1665 if (successor_registers == nullptr) {
1666 continue;
1667 }
1668
1669 if (register_state_ == nullptr) {
1670 // If we haven't merged any register state yet, just use successor's
1671 // register directly.
1672 register_state_ = successor_registers;
1673 UpdateVirtualRegisterState();
1674 } else {
1675 // Otherwise try to merge our state with the existing state.
1676 RegisterBitVector processed_regs;
1677 RegisterBitVector succ_allocated_regs =
1678 GetAllocatedRegBitVector(successor_registers);
1679 for (RegisterIndex reg : *successor_registers) {
1680 // If |reg| isn't allocated in successor registers, nothing to do.
1681 if (!successor_registers->IsAllocated(reg)) continue;
1682
1683 int virtual_register =
1684 successor_registers->VirtualRegisterForRegister(reg);
1685 MachineRepresentation rep = RepresentationFor(virtual_register);
1686
1687 // If we have already processed |reg|, e.g., adding gap move to that
1688 // register, then we can continue.
1689 if (processed_regs.Contains(reg, rep)) continue;
1690 processed_regs.Add(reg, rep);
1691
1692 if (register_state()->IsAllocated(reg)) {
1693 if (successor_registers->Equals(reg, register_state())) {
1694 // Both match, keep the merged register data.
1695 register_state()->CommitAtMerge(reg);
1696 } else {
1697 // Try to find a new register for this successor register in the
1698 // merge block, and add a gap move on entry of the successor block.
1699 RegisterIndex new_reg =
1700 RegisterForVirtualRegister(virtual_register);
1701 if (!new_reg.is_valid()) {
1702 new_reg = ChooseFreeRegister(
1703 allocated_registers_bits_.Union(succ_allocated_regs), rep);
1704 } else if (new_reg != reg) {
1705 // Spill the |new_reg| in the successor block to be able to use it
1706 // for this gap move. It would be spilled anyway since it contains
1707 // a different virtual register than the merge block.
1708 SpillRegisterAtMerge(successor_registers, new_reg);
1709 }
1710
1711 if (new_reg.is_valid()) {
1712 MoveRegisterOnMerge(new_reg, reg, virtual_register, successor,
1713 successor_registers);
1714 processed_regs.Add(new_reg, rep);
1715 } else {
1716 SpillRegisterAtMerge(successor_registers, reg);
1717 }
1718 }
1719 } else {
1720 DCHECK(successor_registers->IsAllocated(reg));
1721 if (RegisterForVirtualRegister(virtual_register).is_valid()) {
1722 // If we already hold the virtual register in a different register
1723 // then spill this register in the sucessor block to avoid
1724 // invalidating the 1:1 vreg<->reg mapping.
1725 // TODO(rmcilroy): Add a gap move to avoid spilling.
1726 SpillRegisterAtMerge(successor_registers, reg);
1727 } else {
1728 // Register is free in our current register state, so merge the
1729 // successor block's register details into it.
1730 register_state()->CopyFrom(reg, successor_registers);
1731 AssignRegister(reg, virtual_register, UsePosition::kNone);
1732 }
1733 }
1734 }
1735 }
1736 }
1737 }
1738
GetAllocatedRegBitVector(RegisterState * reg_state)1739 RegisterBitVector SinglePassRegisterAllocator::GetAllocatedRegBitVector(
1740 RegisterState* reg_state) {
1741 RegisterBitVector allocated_regs;
1742 for (RegisterIndex reg : *reg_state) {
1743 if (reg_state->IsAllocated(reg)) {
1744 int virtual_register = reg_state->VirtualRegisterForRegister(reg);
1745 allocated_regs.Add(reg, RepresentationFor(virtual_register));
1746 }
1747 }
1748 return allocated_regs;
1749 }
1750
SpillRegisterAtMerge(RegisterState * reg_state,RegisterIndex reg)1751 void SinglePassRegisterAllocator::SpillRegisterAtMerge(RegisterState* reg_state,
1752 RegisterIndex reg) {
1753 DCHECK_NE(reg_state, register_state());
1754 if (reg_state->IsAllocated(reg)) {
1755 int virtual_register = reg_state->VirtualRegisterForRegister(reg);
1756 AllocatedOperand allocated = AllocatedOperandForReg(reg, virtual_register);
1757 reg_state->Spill(reg, allocated, current_block(), data());
1758 }
1759 }
1760
MoveRegisterOnMerge(RegisterIndex from,RegisterIndex to,int virtual_register,RpoNumber successor,RegisterState * succ_state)1761 void SinglePassRegisterAllocator::MoveRegisterOnMerge(
1762 RegisterIndex from, RegisterIndex to, int virtual_register,
1763 RpoNumber successor, RegisterState* succ_state) {
1764 int instr_index = data()->GetBlock(successor)->first_instruction_index();
1765 MoveOperands* move =
1766 data()->AddPendingOperandGapMove(instr_index, Instruction::START);
1767 succ_state->Commit(to, AllocatedOperandForReg(to, virtual_register),
1768 &move->destination(), data());
1769 AllocatePendingUse(from, virtual_register, &move->source(), instr_index);
1770 }
1771
UpdateVirtualRegisterState()1772 void SinglePassRegisterAllocator::UpdateVirtualRegisterState() {
1773 // Update to the new register state and update vreg_to_register map and
1774 // resetting any shared registers that were spilled by another block.
1775 DCHECK(HasRegisterState());
1776 for (RegisterIndex reg : *register_state()) {
1777 register_state()->ResetIfSpilledWhileShared(reg);
1778 int virtual_register = VirtualRegisterForRegister(reg);
1779 if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
1780 AssignRegister(reg, virtual_register, UsePosition::kNone);
1781 }
1782 }
1783 CheckConsistency();
1784 }
1785
CheckConsistency()1786 void SinglePassRegisterAllocator::CheckConsistency() {
1787 #ifdef DEBUG
1788 for (int virtual_register = 0;
1789 virtual_register < data()->code()->VirtualRegisterCount();
1790 virtual_register++) {
1791 RegisterIndex reg = RegisterForVirtualRegister(virtual_register);
1792 if (reg.is_valid()) {
1793 CHECK_EQ(virtual_register, VirtualRegisterForRegister(reg));
1794 CHECK(allocated_registers_bits_.Contains(
1795 reg, RepresentationFor(virtual_register)));
1796 }
1797 }
1798
1799 for (RegisterIndex reg : *register_state()) {
1800 int virtual_register = VirtualRegisterForRegister(reg);
1801 if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
1802 CHECK_EQ(reg, RegisterForVirtualRegister(virtual_register));
1803 CHECK(allocated_registers_bits_.Contains(
1804 reg, RepresentationFor(virtual_register)));
1805 }
1806 }
1807 #endif
1808 }
1809
FromRegCode(int reg_code,MachineRepresentation rep) const1810 RegisterIndex SinglePassRegisterAllocator::FromRegCode(
1811 int reg_code, MachineRepresentation rep) const {
1812 if (!kSimpleFPAliasing && kind() == RegisterKind::kDouble) {
1813 if (rep == MachineRepresentation::kFloat32) {
1814 return RegisterIndex(float32_reg_code_to_index_->at(reg_code));
1815 } else if (rep == MachineRepresentation::kSimd128) {
1816 return RegisterIndex(simd128_reg_code_to_index_->at(reg_code));
1817 }
1818 DCHECK_EQ(rep, MachineRepresentation::kFloat64);
1819 }
1820
1821 return RegisterIndex(reg_code_to_index_[reg_code]);
1822 }
1823
ToRegCode(RegisterIndex reg,MachineRepresentation rep) const1824 int SinglePassRegisterAllocator::ToRegCode(RegisterIndex reg,
1825 MachineRepresentation rep) const {
1826 if (!kSimpleFPAliasing && kind() == RegisterKind::kDouble) {
1827 if (rep == MachineRepresentation::kFloat32) {
1828 DCHECK_NE(-1, index_to_float32_reg_code_->at(reg.ToInt()));
1829 return index_to_float32_reg_code_->at(reg.ToInt());
1830 } else if (rep == MachineRepresentation::kSimd128) {
1831 DCHECK_NE(-1, index_to_simd128_reg_code_->at(reg.ToInt()));
1832 return index_to_simd128_reg_code_->at(reg.ToInt());
1833 }
1834 DCHECK_EQ(rep, MachineRepresentation::kFloat64);
1835 }
1836 return index_to_reg_code_[reg.ToInt()];
1837 }
1838
VirtualRegisterIsUnallocatedOrInReg(int virtual_register,RegisterIndex reg)1839 bool SinglePassRegisterAllocator::VirtualRegisterIsUnallocatedOrInReg(
1840 int virtual_register, RegisterIndex reg) {
1841 RegisterIndex existing_reg = RegisterForVirtualRegister(virtual_register);
1842 return !existing_reg.is_valid() || existing_reg == reg;
1843 }
1844
IsFreeOrSameVirtualRegister(RegisterIndex reg,int virtual_register)1845 bool SinglePassRegisterAllocator::IsFreeOrSameVirtualRegister(
1846 RegisterIndex reg, int virtual_register) {
1847 int allocated_vreg = VirtualRegisterForRegister(reg);
1848 return allocated_vreg == InstructionOperand::kInvalidVirtualRegister ||
1849 allocated_vreg == virtual_register;
1850 }
1851
EmitGapMoveFromOutput(InstructionOperand from,InstructionOperand to,int instr_index)1852 void SinglePassRegisterAllocator::EmitGapMoveFromOutput(InstructionOperand from,
1853 InstructionOperand to,
1854 int instr_index) {
1855 DCHECK(from.IsAllocated());
1856 DCHECK(to.IsAllocated());
1857 const InstructionBlock* block = current_block();
1858 DCHECK_EQ(data()->GetBlock(instr_index), block);
1859 if (instr_index == block->last_instruction_index()) {
1860 // Add gap move to the first instruction of every successor block.
1861 for (const RpoNumber succ : block->successors()) {
1862 const InstructionBlock* successor = data()->GetBlock(succ);
1863 DCHECK_EQ(1, successor->PredecessorCount());
1864 data()->AddGapMove(successor->first_instruction_index(),
1865 Instruction::START, from, to);
1866 }
1867 } else {
1868 data()->AddGapMove(instr_index + 1, Instruction::START, from, to);
1869 }
1870 }
1871
AssignRegister(RegisterIndex reg,int virtual_register,UsePosition pos)1872 void SinglePassRegisterAllocator::AssignRegister(RegisterIndex reg,
1873 int virtual_register,
1874 UsePosition pos) {
1875 MachineRepresentation rep = RepresentationFor(virtual_register);
1876 assigned_registers()->Add(ToRegCode(reg, rep));
1877 allocated_registers_bits_.Add(reg, rep);
1878 MarkRegisterUse(reg, rep, pos);
1879 if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
1880 virtual_register_to_reg_[virtual_register] = reg;
1881 }
1882 }
1883
MarkRegisterUse(RegisterIndex reg,MachineRepresentation rep,UsePosition pos)1884 void SinglePassRegisterAllocator::MarkRegisterUse(RegisterIndex reg,
1885 MachineRepresentation rep,
1886 UsePosition pos) {
1887 if (pos == UsePosition::kStart || pos == UsePosition::kAll) {
1888 in_use_at_instr_start_bits_.Add(reg, rep);
1889 }
1890 if (pos == UsePosition::kEnd || pos == UsePosition::kAll) {
1891 in_use_at_instr_end_bits_.Add(reg, rep);
1892 }
1893 }
1894
FreeRegister(RegisterIndex reg,int virtual_register)1895 void SinglePassRegisterAllocator::FreeRegister(RegisterIndex reg,
1896 int virtual_register) {
1897 allocated_registers_bits_.Clear(reg, RepresentationFor(virtual_register));
1898 if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
1899 virtual_register_to_reg_[virtual_register] = RegisterIndex::Invalid();
1900 }
1901 }
1902
ChooseRegisterFor(VirtualRegisterData & virtual_register,int instr_index,UsePosition pos,bool must_use_register)1903 RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
1904 VirtualRegisterData& virtual_register, int instr_index, UsePosition pos,
1905 bool must_use_register) {
1906 // If register is already allocated to the virtual register, use that.
1907 RegisterIndex reg = RegisterForVirtualRegister(virtual_register.vreg());
1908
1909 // If we don't need a register, only try to allocate one if the virtual
1910 // register hasn't yet been spilled, to try to avoid spilling it.
1911 if (!reg.is_valid() && (must_use_register ||
1912 !virtual_register.IsSpilledAt(instr_index, data()))) {
1913 reg = ChooseRegisterFor(RepresentationFor(virtual_register.vreg()), pos,
1914 must_use_register);
1915 }
1916 return reg;
1917 }
1918
ChooseRegisterFor(MachineRepresentation rep,UsePosition pos,bool must_use_register)1919 RegisterIndex SinglePassRegisterAllocator::ChooseRegisterFor(
1920 MachineRepresentation rep, UsePosition pos, bool must_use_register) {
1921 RegisterIndex reg = ChooseFreeRegister(rep, pos);
1922 if (!reg.is_valid() && must_use_register) {
1923 reg = ChooseRegisterToSpill(rep, pos);
1924 SpillRegister(reg);
1925 }
1926 return reg;
1927 }
1928
InUseBitmap(UsePosition pos)1929 RegisterBitVector SinglePassRegisterAllocator::InUseBitmap(UsePosition pos) {
1930 switch (pos) {
1931 case UsePosition::kStart:
1932 return in_use_at_instr_start_bits_;
1933 case UsePosition::kEnd:
1934 return in_use_at_instr_end_bits_;
1935 case UsePosition::kAll:
1936 return in_use_at_instr_start_bits_.Union(in_use_at_instr_end_bits_);
1937 case UsePosition::kNone:
1938 UNREACHABLE();
1939 }
1940 }
1941
IsValidForRep(RegisterIndex reg,MachineRepresentation rep)1942 bool SinglePassRegisterAllocator::IsValidForRep(RegisterIndex reg,
1943 MachineRepresentation rep) {
1944 if (kSimpleFPAliasing || kind() == RegisterKind::kGeneral) {
1945 return true;
1946 } else {
1947 switch (rep) {
1948 case MachineRepresentation::kFloat32:
1949 return index_to_float32_reg_code_->at(reg.ToInt()) != -1;
1950 case MachineRepresentation::kFloat64:
1951 return true;
1952 case MachineRepresentation::kSimd128:
1953 return index_to_simd128_reg_code_->at(reg.ToInt()) != -1;
1954 default:
1955 UNREACHABLE();
1956 }
1957 }
1958 }
1959
ChooseFreeRegister(MachineRepresentation rep,UsePosition pos)1960 RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister(
1961 MachineRepresentation rep, UsePosition pos) {
1962 // Take the first free, non-blocked register, if available.
1963 // TODO(rmcilroy): Consider a better heuristic.
1964 RegisterBitVector allocated_or_in_use =
1965 InUseBitmap(pos).Union(allocated_registers_bits_);
1966 return ChooseFreeRegister(allocated_or_in_use, rep);
1967 }
1968
ChooseFreeRegister(const RegisterBitVector & allocated_regs,MachineRepresentation rep)1969 RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister(
1970 const RegisterBitVector& allocated_regs, MachineRepresentation rep) {
1971 RegisterIndex chosen_reg = RegisterIndex::Invalid();
1972 if (kSimpleFPAliasing || kind() == RegisterKind::kGeneral) {
1973 chosen_reg = allocated_regs.GetFirstCleared(num_allocatable_registers());
1974 } else {
1975 // If we don't have simple fp aliasing, we need to check each register
1976 // individually to get one with the required representation.
1977 for (RegisterIndex reg : *register_state()) {
1978 if (IsValidForRep(reg, rep) && !allocated_regs.Contains(reg, rep)) {
1979 chosen_reg = reg;
1980 break;
1981 }
1982 }
1983 }
1984
1985 DCHECK_IMPLIES(chosen_reg.is_valid(), IsValidForRep(chosen_reg, rep));
1986 return chosen_reg;
1987 }
1988
ChooseRegisterToSpill(MachineRepresentation rep,UsePosition pos)1989 RegisterIndex SinglePassRegisterAllocator::ChooseRegisterToSpill(
1990 MachineRepresentation rep, UsePosition pos) {
1991 RegisterBitVector in_use = InUseBitmap(pos);
1992
1993 // Choose a register that will need to be spilled. Preferentially choose:
1994 // - A register with only pending uses, to avoid having to add a gap move for
1995 // a non-pending use.
1996 // - A register holding a virtual register that has already been spilled, to
1997 // avoid adding a new gap move to spill the virtual register when it is
1998 // output.
1999 // - Prefer the register holding the virtual register with the earliest
2000 // definition point, since it is more likely to be spilled anyway.
2001 RegisterIndex chosen_reg;
2002 int earliest_definition = kMaxInt;
2003 bool pending_only_use = false;
2004 bool already_spilled = false;
2005 for (RegisterIndex reg : *register_state()) {
2006 // Skip if register is in use, or not valid for representation.
2007 if (!IsValidForRep(reg, rep) || in_use.Contains(reg, rep)) continue;
2008
2009 VirtualRegisterData& vreg_data =
2010 VirtualRegisterDataFor(VirtualRegisterForRegister(reg));
2011 if ((!pending_only_use && register_state()->HasPendingUsesOnly(reg)) ||
2012 (!already_spilled && vreg_data.HasSpillOperand()) ||
2013 vreg_data.output_instr_index() < earliest_definition) {
2014 chosen_reg = reg;
2015 earliest_definition = vreg_data.output_instr_index();
2016 pending_only_use = register_state()->HasPendingUsesOnly(reg);
2017 already_spilled = vreg_data.HasSpillOperand();
2018 }
2019 }
2020
2021 // There should always be an unblocked register available.
2022 DCHECK(chosen_reg.is_valid());
2023 DCHECK(IsValidForRep(chosen_reg, rep));
2024 return chosen_reg;
2025 }
2026
CommitRegister(RegisterIndex reg,int virtual_register,InstructionOperand * operand,UsePosition pos)2027 void SinglePassRegisterAllocator::CommitRegister(RegisterIndex reg,
2028 int virtual_register,
2029 InstructionOperand* operand,
2030 UsePosition pos) {
2031 // Committing the output operation, and mark the register use in this
2032 // instruction, then mark it as free going forward.
2033 AllocatedOperand allocated = AllocatedOperandForReg(reg, virtual_register);
2034 register_state()->Commit(reg, allocated, operand, data());
2035 MarkRegisterUse(reg, RepresentationFor(virtual_register), pos);
2036 FreeRegister(reg, virtual_register);
2037 CheckConsistency();
2038 }
2039
SpillRegister(RegisterIndex reg)2040 void SinglePassRegisterAllocator::SpillRegister(RegisterIndex reg) {
2041 if (!register_state()->IsAllocated(reg)) return;
2042
2043 // Spill the register and free register.
2044 int virtual_register = VirtualRegisterForRegister(reg);
2045 AllocatedOperand allocated = AllocatedOperandForReg(reg, virtual_register);
2046 register_state()->Spill(reg, allocated, current_block(), data());
2047 FreeRegister(reg, virtual_register);
2048 }
2049
SpillAllRegisters()2050 void SinglePassRegisterAllocator::SpillAllRegisters() {
2051 if (!HasRegisterState()) return;
2052
2053 for (RegisterIndex reg : *register_state()) {
2054 SpillRegister(reg);
2055 }
2056 }
2057
SpillRegisterForVirtualRegister(int virtual_register)2058 void SinglePassRegisterAllocator::SpillRegisterForVirtualRegister(
2059 int virtual_register) {
2060 DCHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister);
2061 RegisterIndex reg = RegisterForVirtualRegister(virtual_register);
2062 if (reg.is_valid()) {
2063 SpillRegister(reg);
2064 }
2065 }
2066
SpillRegisterForDeferred(RegisterIndex reg,int instr_index)2067 void SinglePassRegisterAllocator::SpillRegisterForDeferred(RegisterIndex reg,
2068 int instr_index) {
2069 // Committing the output operation, and mark the register use in this
2070 // instruction, then mark it as free going forward.
2071 if (register_state()->IsAllocated(reg) && register_state()->IsShared(reg)) {
2072 int virtual_register = VirtualRegisterForRegister(reg);
2073 AllocatedOperand allocated = AllocatedOperandForReg(reg, virtual_register);
2074 register_state()->SpillForDeferred(reg, allocated, instr_index, data());
2075 FreeRegister(reg, virtual_register);
2076 }
2077 CheckConsistency();
2078 }
2079
AllocateDeferredBlockSpillOutput(int instr_index,RpoNumber deferred_block,int virtual_register)2080 void SinglePassRegisterAllocator::AllocateDeferredBlockSpillOutput(
2081 int instr_index, RpoNumber deferred_block, int virtual_register) {
2082 DCHECK(data()->GetBlock(deferred_block)->IsDeferred());
2083 VirtualRegisterData& vreg_data =
2084 data()->VirtualRegisterDataFor(virtual_register);
2085 if (!vreg_data.NeedsSpillAtOutput() &&
2086 !DefinedAfter(virtual_register, instr_index, UsePosition::kEnd)) {
2087 // If a register has been assigned to the virtual register, and the virtual
2088 // register still doesn't need to be spilled at it's output, and add a
2089 // pending move to output the virtual register to it's spill slot on entry
2090 // of the deferred block (to avoid spilling on in non-deferred code).
2091 // TODO(rmcilroy): Consider assigning a register even if the virtual
2092 // register isn't yet assigned - currently doing this regresses performance.
2093 RegisterIndex reg = RegisterForVirtualRegister(virtual_register);
2094 if (reg.is_valid()) {
2095 int deferred_block_start =
2096 data()->GetBlock(deferred_block)->first_instruction_index();
2097 register_state()->MoveToSpillSlotOnDeferred(reg, virtual_register,
2098 deferred_block_start, data());
2099 return;
2100 } else {
2101 vreg_data.MarkAsNeedsSpillAtOutput();
2102 }
2103 }
2104 }
2105
AllocatedOperandForReg(RegisterIndex reg,int virtual_register)2106 AllocatedOperand SinglePassRegisterAllocator::AllocatedOperandForReg(
2107 RegisterIndex reg, int virtual_register) {
2108 MachineRepresentation rep = RepresentationFor(virtual_register);
2109 return AllocatedOperand(AllocatedOperand::REGISTER, rep, ToRegCode(reg, rep));
2110 }
2111
AllocateUse(RegisterIndex reg,int virtual_register,InstructionOperand * operand,int instr_index,UsePosition pos)2112 void SinglePassRegisterAllocator::AllocateUse(RegisterIndex reg,
2113 int virtual_register,
2114 InstructionOperand* operand,
2115 int instr_index,
2116 UsePosition pos) {
2117 DCHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister);
2118 DCHECK(IsFreeOrSameVirtualRegister(reg, virtual_register));
2119
2120 AllocatedOperand allocated = AllocatedOperandForReg(reg, virtual_register);
2121 register_state()->Commit(reg, allocated, operand, data());
2122 register_state()->AllocateUse(reg, virtual_register, operand, instr_index,
2123 data());
2124 AssignRegister(reg, virtual_register, pos);
2125 CheckConsistency();
2126 }
2127
AllocatePendingUse(RegisterIndex reg,int virtual_register,InstructionOperand * operand,int instr_index)2128 void SinglePassRegisterAllocator::AllocatePendingUse(
2129 RegisterIndex reg, int virtual_register, InstructionOperand* operand,
2130 int instr_index) {
2131 DCHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister);
2132 DCHECK(IsFreeOrSameVirtualRegister(reg, virtual_register));
2133
2134 register_state()->AllocatePendingUse(reg, virtual_register, operand,
2135 instr_index);
2136 // Since this is a pending use and the operand doesn't need to use a register,
2137 // allocate with UsePosition::kNone to avoid blocking it's use by other
2138 // operands in this instruction.
2139 AssignRegister(reg, virtual_register, UsePosition::kNone);
2140 CheckConsistency();
2141 }
2142
AllocateUseWithMove(RegisterIndex reg,int virtual_register,UnallocatedOperand * operand,int instr_index,UsePosition pos)2143 void SinglePassRegisterAllocator::AllocateUseWithMove(
2144 RegisterIndex reg, int virtual_register, UnallocatedOperand* operand,
2145 int instr_index, UsePosition pos) {
2146 AllocatedOperand to = AllocatedOperandForReg(reg, virtual_register);
2147 UnallocatedOperand from = UnallocatedOperand(
2148 UnallocatedOperand::REGISTER_OR_SLOT, virtual_register);
2149 data()->AddGapMove(instr_index, Instruction::END, from, to);
2150 InstructionOperand::ReplaceWith(operand, &to);
2151 MarkRegisterUse(reg, RepresentationFor(virtual_register), pos);
2152 CheckConsistency();
2153 }
2154
AllocateInput(UnallocatedOperand * operand,int instr_index)2155 void SinglePassRegisterAllocator::AllocateInput(UnallocatedOperand* operand,
2156 int instr_index) {
2157 EnsureRegisterState();
2158 int virtual_register = operand->virtual_register();
2159 MachineRepresentation rep = RepresentationFor(virtual_register);
2160 VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
2161
2162 // Spill slot policy operands.
2163 if (operand->HasFixedSlotPolicy()) {
2164 // If the operand is from a fixed slot, allocate it to that fixed slot,
2165 // then add a gap move from an unconstrained copy of that input operand,
2166 // and spill the gap move's input operand.
2167 // TODO(rmcilroy): We could allocate a register for the gap move however
2168 // we would need to wait until we've done all the allocations for the
2169 // instruction since the allocation needs to reflect the state before
2170 // the instruction (at the gap move). For now spilling is fine since
2171 // fixed slot inputs are uncommon.
2172 UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
2173 virtual_register);
2174 AllocatedOperand allocated = AllocatedOperand(
2175 AllocatedOperand::STACK_SLOT, rep, operand->fixed_slot_index());
2176 InstructionOperand::ReplaceWith(operand, &allocated);
2177 MoveOperands* move_op =
2178 data()->AddGapMove(instr_index, Instruction::END, input_copy, *operand);
2179 vreg_data.SpillOperand(&move_op->source(), instr_index, data());
2180 return;
2181 } else if (operand->HasSlotPolicy()) {
2182 vreg_data.SpillOperand(operand, instr_index, data());
2183 return;
2184 }
2185
2186 // Otherwise try to allocate a register for the operation.
2187 UsePosition pos =
2188 operand->IsUsedAtStart() ? UsePosition::kStart : UsePosition::kAll;
2189 if (operand->HasFixedRegisterPolicy() ||
2190 operand->HasFixedFPRegisterPolicy()) {
2191 // With a fixed register operand, we must use that register.
2192 RegisterIndex reg = FromRegCode(operand->fixed_register_index(), rep);
2193 if (!VirtualRegisterIsUnallocatedOrInReg(virtual_register, reg)) {
2194 // If the virtual register is already in a different register, then just
2195 // add a gap move from that register to the fixed register.
2196 AllocateUseWithMove(reg, virtual_register, operand, instr_index, pos);
2197 } else {
2198 // Otherwise allocate a use of the fixed register for |virtual_register|.
2199 AllocateUse(reg, virtual_register, operand, instr_index, pos);
2200 }
2201 } else {
2202 bool must_use_register = operand->HasRegisterPolicy() ||
2203 (vreg_data.is_constant() &&
2204 !operand->HasRegisterOrSlotOrConstantPolicy());
2205 RegisterIndex reg =
2206 ChooseRegisterFor(vreg_data, instr_index, pos, must_use_register);
2207
2208 if (reg.is_valid()) {
2209 if (must_use_register) {
2210 AllocateUse(reg, virtual_register, operand, instr_index, pos);
2211 } else {
2212 AllocatePendingUse(reg, virtual_register, operand, instr_index);
2213 }
2214 } else {
2215 vreg_data.SpillOperand(operand, instr_index, data());
2216 }
2217 }
2218 }
2219
AllocateGapMoveInput(UnallocatedOperand * operand,int instr_index)2220 void SinglePassRegisterAllocator::AllocateGapMoveInput(
2221 UnallocatedOperand* operand, int instr_index) {
2222 EnsureRegisterState();
2223 int virtual_register = operand->virtual_register();
2224 VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
2225
2226 // Gap move inputs should be unconstrained.
2227 DCHECK(operand->HasRegisterOrSlotPolicy());
2228 RegisterIndex reg =
2229 ChooseRegisterFor(vreg_data, instr_index, UsePosition::kStart, false);
2230 if (reg.is_valid()) {
2231 AllocatePendingUse(reg, virtual_register, operand, instr_index);
2232 } else {
2233 vreg_data.SpillOperand(operand, instr_index, data());
2234 }
2235 }
2236
AllocateConstantOutput(ConstantOperand * operand)2237 void SinglePassRegisterAllocator::AllocateConstantOutput(
2238 ConstantOperand* operand) {
2239 EnsureRegisterState();
2240 // If the constant is allocated to a register, spill it now to add the
2241 // necessary gap moves from the constant operand to the register.
2242 int virtual_register = operand->virtual_register();
2243 SpillRegisterForVirtualRegister(virtual_register);
2244 }
2245
AllocateOutput(UnallocatedOperand * operand,int instr_index)2246 void SinglePassRegisterAllocator::AllocateOutput(UnallocatedOperand* operand,
2247 int instr_index) {
2248 AllocateOutput(operand, instr_index, UsePosition::kEnd);
2249 }
2250
AllocateOutput(UnallocatedOperand * operand,int instr_index,UsePosition pos)2251 RegisterIndex SinglePassRegisterAllocator::AllocateOutput(
2252 UnallocatedOperand* operand, int instr_index, UsePosition pos) {
2253 EnsureRegisterState();
2254 int virtual_register = operand->virtual_register();
2255 VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
2256
2257 RegisterIndex reg;
2258 if (operand->HasSlotPolicy() || operand->HasFixedSlotPolicy()) {
2259 // We can't allocate a register for output given the policy, so make sure
2260 // to spill the register holding this virtual register if any.
2261 SpillRegisterForVirtualRegister(virtual_register);
2262 reg = RegisterIndex::Invalid();
2263 } else if (operand->HasFixedPolicy()) {
2264 reg = FromRegCode(operand->fixed_register_index(),
2265 RepresentationFor(virtual_register));
2266 } else {
2267 reg = ChooseRegisterFor(vreg_data, instr_index, pos,
2268 operand->HasRegisterPolicy());
2269 }
2270
2271 // TODO(rmcilroy): support secondary storage.
2272 if (!reg.is_valid()) {
2273 vreg_data.SpillOperand(operand, instr_index, data());
2274 } else {
2275 InstructionOperand move_output_to;
2276 if (!VirtualRegisterIsUnallocatedOrInReg(virtual_register, reg)) {
2277 // If the |virtual register| was in a different register (e.g., due to
2278 // the output having a fixed register), then commit its use in that
2279 // register here, and move it from the output operand below.
2280 RegisterIndex existing_reg = RegisterForVirtualRegister(virtual_register);
2281 // Don't mark |existing_reg| as used in this instruction, since it is used
2282 // in the (already allocated) following instruction's gap-move.
2283 CommitRegister(existing_reg, virtual_register, &move_output_to,
2284 UsePosition::kNone);
2285 }
2286 CommitRegister(reg, virtual_register, operand, pos);
2287 if (move_output_to.IsAllocated()) {
2288 // Emit a move from output to the register that the |virtual_register| was
2289 // allocated to.
2290 EmitGapMoveFromOutput(*operand, move_output_to, instr_index);
2291 }
2292 if (vreg_data.NeedsSpillAtOutput()) {
2293 vreg_data.EmitGapMoveFromOutputToSpillSlot(
2294 *AllocatedOperand::cast(operand), current_block(), instr_index,
2295 data());
2296 } else if (vreg_data.NeedsSpillAtDeferredBlocks()) {
2297 vreg_data.EmitDeferredSpillOutputs(data());
2298 }
2299 }
2300
2301 return reg;
2302 }
2303
AllocateSameInputOutput(UnallocatedOperand * output,UnallocatedOperand * input,int instr_index)2304 void SinglePassRegisterAllocator::AllocateSameInputOutput(
2305 UnallocatedOperand* output, UnallocatedOperand* input, int instr_index) {
2306 EnsureRegisterState();
2307 int input_vreg = input->virtual_register();
2308 int output_vreg = output->virtual_register();
2309
2310 // The input operand has the details of the register constraints, so replace
2311 // the output operand with a copy of the input, with the output's vreg.
2312 UnallocatedOperand output_as_input(*input, output_vreg);
2313 InstructionOperand::ReplaceWith(output, &output_as_input);
2314 RegisterIndex reg = AllocateOutput(output, instr_index, UsePosition::kAll);
2315
2316 if (reg.is_valid()) {
2317 // Replace the input operand with an unallocated fixed register policy for
2318 // the same register.
2319 UnallocatedOperand::ExtendedPolicy policy =
2320 kind() == RegisterKind::kGeneral
2321 ? UnallocatedOperand::FIXED_REGISTER
2322 : UnallocatedOperand::FIXED_FP_REGISTER;
2323 MachineRepresentation rep = RepresentationFor(input_vreg);
2324 UnallocatedOperand fixed_input(policy, ToRegCode(reg, rep), input_vreg);
2325 InstructionOperand::ReplaceWith(input, &fixed_input);
2326 } else {
2327 // Output was spilled. Due to the SameAsInput allocation policy, we need to
2328 // make the input operand the same as the output, i.e., the output virtual
2329 // register's spill slot. As such, spill this input operand using the output
2330 // virtual register's spill slot, then add a gap-move to move the input
2331 // value into this spill slot.
2332 VirtualRegisterData& output_vreg_data = VirtualRegisterDataFor(output_vreg);
2333 output_vreg_data.SpillOperand(input, instr_index, data());
2334
2335 // Add an unconstrained gap move for the input virtual register.
2336 UnallocatedOperand unconstrained_input(UnallocatedOperand::REGISTER_OR_SLOT,
2337 input_vreg);
2338 MoveOperands* move_ops = data()->AddGapMove(
2339 instr_index, Instruction::END, unconstrained_input, PendingOperand());
2340 output_vreg_data.SpillOperand(&move_ops->destination(), instr_index,
2341 data());
2342 }
2343 }
2344
AllocateTemp(UnallocatedOperand * operand,int instr_index)2345 void SinglePassRegisterAllocator::AllocateTemp(UnallocatedOperand* operand,
2346 int instr_index) {
2347 EnsureRegisterState();
2348 int virtual_register = operand->virtual_register();
2349 RegisterIndex reg;
2350 DCHECK(!operand->HasFixedSlotPolicy());
2351 if (operand->HasSlotPolicy()) {
2352 reg = RegisterIndex::Invalid();
2353 } else if (operand->HasFixedRegisterPolicy() ||
2354 operand->HasFixedFPRegisterPolicy()) {
2355 reg = FromRegCode(operand->fixed_register_index(),
2356 RepresentationFor(virtual_register));
2357 } else {
2358 reg = ChooseRegisterFor(RepresentationFor(virtual_register),
2359 UsePosition::kAll, operand->HasRegisterPolicy());
2360 }
2361
2362 if (reg.is_valid()) {
2363 DCHECK(virtual_register == InstructionOperand::kInvalidVirtualRegister ||
2364 VirtualRegisterIsUnallocatedOrInReg(virtual_register, reg));
2365 CommitRegister(reg, virtual_register, operand, UsePosition::kAll);
2366 } else {
2367 VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
2368 vreg_data.SpillOperand(operand, instr_index, data());
2369 }
2370 }
2371
DefinedAfter(int virtual_register,int instr_index,UsePosition pos)2372 bool SinglePassRegisterAllocator::DefinedAfter(int virtual_register,
2373 int instr_index,
2374 UsePosition pos) {
2375 if (virtual_register == InstructionOperand::kInvalidVirtualRegister)
2376 return false;
2377 int defined_at =
2378 VirtualRegisterDataFor(virtual_register).output_instr_index();
2379 return defined_at > instr_index ||
2380 (defined_at == instr_index && pos == UsePosition::kStart);
2381 }
2382
ReserveFixedInputRegister(const UnallocatedOperand * operand,int instr_index)2383 void SinglePassRegisterAllocator::ReserveFixedInputRegister(
2384 const UnallocatedOperand* operand, int instr_index) {
2385 ReserveFixedRegister(
2386 operand, instr_index,
2387 operand->IsUsedAtStart() ? UsePosition::kStart : UsePosition::kAll);
2388 }
2389
ReserveFixedTempRegister(const UnallocatedOperand * operand,int instr_index)2390 void SinglePassRegisterAllocator::ReserveFixedTempRegister(
2391 const UnallocatedOperand* operand, int instr_index) {
2392 ReserveFixedRegister(operand, instr_index, UsePosition::kAll);
2393 }
2394
ReserveFixedOutputRegister(const UnallocatedOperand * operand,int instr_index)2395 void SinglePassRegisterAllocator::ReserveFixedOutputRegister(
2396 const UnallocatedOperand* operand, int instr_index) {
2397 ReserveFixedRegister(operand, instr_index, UsePosition::kEnd);
2398 }
2399
ReserveFixedRegister(const UnallocatedOperand * operand,int instr_index,UsePosition pos)2400 void SinglePassRegisterAllocator::ReserveFixedRegister(
2401 const UnallocatedOperand* operand, int instr_index, UsePosition pos) {
2402 EnsureRegisterState();
2403 int virtual_register = operand->virtual_register();
2404 MachineRepresentation rep = RepresentationFor(virtual_register);
2405 RegisterIndex reg = FromRegCode(operand->fixed_register_index(), rep);
2406 if (!IsFreeOrSameVirtualRegister(reg, virtual_register) &&
2407 !DefinedAfter(virtual_register, instr_index, pos)) {
2408 // If register is in-use by a different virtual register, spill it now.
2409 // TODO(rmcilroy): Consider moving to a unconstrained register instead of
2410 // spilling.
2411 SpillRegister(reg);
2412 }
2413 MarkRegisterUse(reg, rep, pos);
2414 }
2415
AllocatePhiGapMove(int to_vreg,int from_vreg,int instr_index)2416 void SinglePassRegisterAllocator::AllocatePhiGapMove(int to_vreg, int from_vreg,
2417 int instr_index) {
2418 EnsureRegisterState();
2419 RegisterIndex from_register = RegisterForVirtualRegister(from_vreg);
2420 RegisterIndex to_register = RegisterForVirtualRegister(to_vreg);
2421
2422 // If to_register isn't marked as a phi gap move, we can't use it as such.
2423 if (to_register.is_valid() && !register_state()->IsPhiGapMove(to_register)) {
2424 to_register = RegisterIndex::Invalid();
2425 }
2426
2427 if (to_register.is_valid() && !from_register.is_valid()) {
2428 // If |to| virtual register is allocated to a register, and the |from|
2429 // virtual register isn't allocated, then commit this register and
2430 // re-allocate it to the |from| virtual register.
2431 InstructionOperand operand;
2432 CommitRegister(to_register, to_vreg, &operand, UsePosition::kAll);
2433 AllocateUse(to_register, from_vreg, &operand, instr_index,
2434 UsePosition::kAll);
2435 } else {
2436 // Otherwise add a gap move.
2437 MoveOperands* move =
2438 data()->AddPendingOperandGapMove(instr_index, Instruction::END);
2439 PendingOperand* to_operand = PendingOperand::cast(&move->destination());
2440 PendingOperand* from_operand = PendingOperand::cast(&move->source());
2441
2442 // Commit the |to| side to either a register or the pending spills.
2443 if (to_register.is_valid()) {
2444 CommitRegister(to_register, to_vreg, to_operand, UsePosition::kAll);
2445 } else {
2446 VirtualRegisterDataFor(to_vreg).SpillOperand(to_operand, instr_index,
2447 data());
2448 }
2449
2450 // The from side is unconstrained.
2451 UnallocatedOperand unconstrained_input(UnallocatedOperand::REGISTER_OR_SLOT,
2452 from_vreg);
2453 InstructionOperand::ReplaceWith(from_operand, &unconstrained_input);
2454 }
2455 }
2456
AllocatePhi(int virtual_register,const InstructionBlock * block)2457 void SinglePassRegisterAllocator::AllocatePhi(int virtual_register,
2458 const InstructionBlock* block) {
2459 VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
2460 if (vreg_data.NeedsSpillAtOutput() || block->IsLoopHeader()) {
2461 // If the Phi needs to be spilled, just spill here directly so that all
2462 // gap moves into the Phi move into the spill slot.
2463 SpillRegisterForVirtualRegister(virtual_register);
2464 } else {
2465 RegisterIndex reg = RegisterForVirtualRegister(virtual_register);
2466 if (reg.is_valid()) {
2467 // If the register is valid, assign it as a phi gap move to be processed
2468 // at the successor blocks. If no register or spill slot was used then
2469 // the virtual register was never used.
2470 register_state()->UseForPhiGapMove(reg);
2471 }
2472 }
2473 }
2474
EnsureRegisterState()2475 void SinglePassRegisterAllocator::EnsureRegisterState() {
2476 if (!HasRegisterState()) {
2477 register_state_ = RegisterState::New(kind(), num_allocatable_registers_,
2478 data()->allocation_zone());
2479 }
2480 }
2481
2482 class MidTierOutputProcessor final {
2483 public:
2484 explicit MidTierOutputProcessor(MidTierRegisterAllocationData* data);
2485
2486 void InitializeBlockState(const InstructionBlock* block);
2487 void DefineOutputs(const InstructionBlock* block);
2488
2489 private:
2490 void PopulateDeferredBlockRegion(RpoNumber initial_block);
2491
VirtualRegisterDataFor(int virtual_register) const2492 VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const {
2493 return data()->VirtualRegisterDataFor(virtual_register);
2494 }
RepresentationFor(int virtual_register) const2495 MachineRepresentation RepresentationFor(int virtual_register) const {
2496 return data()->RepresentationFor(virtual_register);
2497 }
2498
IsDeferredBlockBoundary(const ZoneVector<RpoNumber> & blocks)2499 bool IsDeferredBlockBoundary(const ZoneVector<RpoNumber>& blocks) {
2500 return blocks.size() == 1 && !data()->GetBlock(blocks[0])->IsDeferred();
2501 }
2502
data() const2503 MidTierRegisterAllocationData* data() const { return data_; }
code() const2504 InstructionSequence* code() const { return data()->code(); }
zone() const2505 Zone* zone() const { return data()->allocation_zone(); }
2506
2507 MidTierRegisterAllocationData* const data_;
2508 ZoneQueue<RpoNumber> deferred_blocks_worklist_;
2509 ZoneSet<RpoNumber> deferred_blocks_processed_;
2510 };
2511
MidTierOutputProcessor(MidTierRegisterAllocationData * data)2512 MidTierOutputProcessor::MidTierOutputProcessor(
2513 MidTierRegisterAllocationData* data)
2514 : data_(data),
2515 deferred_blocks_worklist_(data->allocation_zone()),
2516 deferred_blocks_processed_(data->allocation_zone()) {}
2517
PopulateDeferredBlockRegion(RpoNumber initial_block)2518 void MidTierOutputProcessor::PopulateDeferredBlockRegion(
2519 RpoNumber initial_block) {
2520 DeferredBlocksRegion* deferred_blocks_region =
2521 zone()->New<DeferredBlocksRegion>(zone(),
2522 code()->InstructionBlockCount());
2523 DCHECK(deferred_blocks_worklist_.empty());
2524 deferred_blocks_worklist_.push(initial_block);
2525 deferred_blocks_processed_.insert(initial_block);
2526 while (!deferred_blocks_worklist_.empty()) {
2527 RpoNumber current = deferred_blocks_worklist_.front();
2528 deferred_blocks_worklist_.pop();
2529 deferred_blocks_region->AddBlock(current, data());
2530
2531 const InstructionBlock* curr_block = data()->GetBlock(current);
2532 // Check for whether the predecessor blocks are still deferred.
2533 if (IsDeferredBlockBoundary(curr_block->predecessors())) {
2534 // If not, mark the predecessor as having a deferred successor.
2535 data()
2536 ->block_state(curr_block->predecessors()[0])
2537 .MarkAsDeferredBlockBoundary();
2538 } else {
2539 // Otherwise process predecessors.
2540 for (RpoNumber pred : curr_block->predecessors()) {
2541 if (deferred_blocks_processed_.count(pred) == 0) {
2542 deferred_blocks_worklist_.push(pred);
2543 deferred_blocks_processed_.insert(pred);
2544 }
2545 }
2546 }
2547
2548 // Check for whether the successor blocks are still deferred.
2549 // Process any unprocessed successors if we aren't at a boundary.
2550 if (IsDeferredBlockBoundary(curr_block->successors())) {
2551 // If not, mark the predecessor as having a deferred successor.
2552 data()->block_state(current).MarkAsDeferredBlockBoundary();
2553 } else {
2554 // Otherwise process successors.
2555 for (RpoNumber succ : curr_block->successors()) {
2556 if (deferred_blocks_processed_.count(succ) == 0) {
2557 deferred_blocks_worklist_.push(succ);
2558 deferred_blocks_processed_.insert(succ);
2559 }
2560 }
2561 }
2562 }
2563 }
2564
InitializeBlockState(const InstructionBlock * block)2565 void MidTierOutputProcessor::InitializeBlockState(
2566 const InstructionBlock* block) {
2567 // Update our predecessor blocks with their successors_phi_index if we have
2568 // phis.
2569 if (block->phis().size()) {
2570 for (int i = 0; i < static_cast<int>(block->PredecessorCount()); ++i) {
2571 data()->block_state(block->predecessors()[i]).set_successors_phi_index(i);
2572 }
2573 }
2574
2575 BlockState& block_state = data()->block_state(block->rpo_number());
2576
2577 if (block->IsDeferred() && !block_state.deferred_blocks_region()) {
2578 PopulateDeferredBlockRegion(block->rpo_number());
2579 }
2580
2581 // Mark this block as dominating itself.
2582 block_state.dominated_blocks()->Add(block->rpo_number().ToInt());
2583
2584 if (block->dominator().IsValid()) {
2585 // Add all the blocks this block dominates to its dominator.
2586 BlockState& dominator_block_state = data()->block_state(block->dominator());
2587 dominator_block_state.dominated_blocks()->Union(
2588 *block_state.dominated_blocks());
2589 } else {
2590 // Only the first block shouldn't have a dominator.
2591 DCHECK_EQ(block, code()->instruction_blocks().front());
2592 }
2593 }
2594
DefineOutputs(const InstructionBlock * block)2595 void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
2596 int block_start = block->first_instruction_index();
2597 bool is_deferred = block->IsDeferred();
2598
2599 for (int index = block->last_instruction_index(); index >= block_start;
2600 index--) {
2601 Instruction* instr = code()->InstructionAt(index);
2602
2603 // For each instruction, define details of the output with the associated
2604 // virtual register data.
2605 for (size_t i = 0; i < instr->OutputCount(); i++) {
2606 InstructionOperand* output = instr->OutputAt(i);
2607 if (output->IsConstant()) {
2608 ConstantOperand* constant_operand = ConstantOperand::cast(output);
2609 int virtual_register = constant_operand->virtual_register();
2610 VirtualRegisterDataFor(virtual_register)
2611 .DefineAsConstantOperand(constant_operand, index, is_deferred);
2612 } else {
2613 DCHECK(output->IsUnallocated());
2614 UnallocatedOperand* unallocated_operand =
2615 UnallocatedOperand::cast(output);
2616 int virtual_register = unallocated_operand->virtual_register();
2617 bool is_exceptional_call_output =
2618 instr->IsCallWithDescriptorFlags() &&
2619 instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler);
2620 if (unallocated_operand->HasFixedSlotPolicy()) {
2621 // If output has a fixed slot policy, allocate its spill operand now
2622 // so that the register allocator can use this knowledge.
2623 MachineRepresentation rep = RepresentationFor(virtual_register);
2624 AllocatedOperand* fixed_spill_operand =
2625 AllocatedOperand::New(zone(), AllocatedOperand::STACK_SLOT, rep,
2626 unallocated_operand->fixed_slot_index());
2627 VirtualRegisterDataFor(virtual_register)
2628 .DefineAsFixedSpillOperand(fixed_spill_operand, virtual_register,
2629 index, is_deferred,
2630 is_exceptional_call_output);
2631 } else {
2632 VirtualRegisterDataFor(virtual_register)
2633 .DefineAsUnallocatedOperand(virtual_register, index, is_deferred,
2634 is_exceptional_call_output);
2635 }
2636 }
2637 }
2638
2639 // Mark any instructions that require reference maps for later reference map
2640 // processing.
2641 if (instr->HasReferenceMap()) {
2642 data()->reference_map_instructions().push_back(index);
2643 }
2644 }
2645
2646 // Define phi output operands.
2647 for (PhiInstruction* phi : block->phis()) {
2648 int virtual_register = phi->virtual_register();
2649 VirtualRegisterDataFor(virtual_register)
2650 .DefineAsPhi(virtual_register, block->first_instruction_index(),
2651 is_deferred);
2652 }
2653 }
2654
DefineOutputs(MidTierRegisterAllocationData * data)2655 void DefineOutputs(MidTierRegisterAllocationData* data) {
2656 MidTierOutputProcessor processor(data);
2657
2658 for (const InstructionBlock* block :
2659 base::Reversed(data->code()->instruction_blocks())) {
2660 data->tick_counter()->TickAndMaybeEnterSafepoint();
2661
2662 processor.InitializeBlockState(block);
2663 processor.DefineOutputs(block);
2664 }
2665 }
2666
2667 class MidTierRegisterAllocator final {
2668 public:
2669 explicit MidTierRegisterAllocator(MidTierRegisterAllocationData* data);
2670 MidTierRegisterAllocator(const MidTierRegisterAllocator&) = delete;
2671 MidTierRegisterAllocator& operator=(const MidTierRegisterAllocator&) = delete;
2672
2673 void AllocateRegisters(const InstructionBlock* block);
2674 void UpdateSpillRangesForLoops();
2675
general_reg_allocator()2676 SinglePassRegisterAllocator& general_reg_allocator() {
2677 return general_reg_allocator_;
2678 }
double_reg_allocator()2679 SinglePassRegisterAllocator& double_reg_allocator() {
2680 return double_reg_allocator_;
2681 }
2682
2683 private:
2684 void AllocatePhis(const InstructionBlock* block);
2685 void AllocatePhiGapMoves(const InstructionBlock* block);
2686
2687 bool IsFixedRegisterPolicy(const UnallocatedOperand* operand);
2688 void ReserveFixedRegisters(int instr_index);
2689
2690 SinglePassRegisterAllocator& AllocatorFor(MachineRepresentation rep);
2691 SinglePassRegisterAllocator& AllocatorFor(const UnallocatedOperand* operand);
2692 SinglePassRegisterAllocator& AllocatorFor(const ConstantOperand* operand);
2693
VirtualRegisterDataFor(int virtual_register) const2694 VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const {
2695 return data()->VirtualRegisterDataFor(virtual_register);
2696 }
RepresentationFor(int virtual_register) const2697 MachineRepresentation RepresentationFor(int virtual_register) const {
2698 return data()->RepresentationFor(virtual_register);
2699 }
data() const2700 MidTierRegisterAllocationData* data() const { return data_; }
code() const2701 InstructionSequence* code() const { return data()->code(); }
allocation_zone() const2702 Zone* allocation_zone() const { return data()->allocation_zone(); }
2703
2704 MidTierRegisterAllocationData* const data_;
2705 SinglePassRegisterAllocator general_reg_allocator_;
2706 SinglePassRegisterAllocator double_reg_allocator_;
2707 };
2708
MidTierRegisterAllocator(MidTierRegisterAllocationData * data)2709 MidTierRegisterAllocator::MidTierRegisterAllocator(
2710 MidTierRegisterAllocationData* data)
2711 : data_(data),
2712 general_reg_allocator_(RegisterKind::kGeneral, data),
2713 double_reg_allocator_(RegisterKind::kDouble, data) {}
2714
AllocateRegisters(const InstructionBlock * block)2715 void MidTierRegisterAllocator::AllocateRegisters(
2716 const InstructionBlock* block) {
2717 RpoNumber block_rpo = block->rpo_number();
2718 bool is_deferred_block_boundary =
2719 data()->block_state(block_rpo).is_deferred_block_boundary();
2720
2721 general_reg_allocator().StartBlock(block);
2722 double_reg_allocator().StartBlock(block);
2723
2724 // If the block is not deferred but has deferred successors, then try to
2725 // output spill slots for virtual_registers that are only spilled in the
2726 // deferred blocks at the start of those deferred blocks to avoid spilling
2727 // them at their output in non-deferred blocks.
2728 if (is_deferred_block_boundary && !block->IsDeferred()) {
2729 for (RpoNumber successor : block->successors()) {
2730 if (!data()->GetBlock(successor)->IsDeferred()) continue;
2731 DCHECK_GT(successor, block_rpo);
2732 for (int virtual_register :
2733 *data()->block_state(successor).deferred_blocks_region()) {
2734 USE(virtual_register);
2735 AllocatorFor(RepresentationFor(virtual_register))
2736 .AllocateDeferredBlockSpillOutput(block->last_instruction_index(),
2737 successor, virtual_register);
2738 }
2739 }
2740 }
2741
2742 // Allocate registers for instructions in reverse, from the end of the block
2743 // to the start.
2744 int block_start = block->first_instruction_index();
2745 for (int instr_index = block->last_instruction_index();
2746 instr_index >= block_start; instr_index--) {
2747 Instruction* instr = code()->InstructionAt(instr_index);
2748
2749 // Reserve any fixed register operands to prevent the register being
2750 // allocated to another operand.
2751 ReserveFixedRegisters(instr_index);
2752
2753 // Allocate outputs.
2754 for (size_t i = 0; i < instr->OutputCount(); i++) {
2755 InstructionOperand* output = instr->OutputAt(i);
2756 DCHECK(!output->IsAllocated());
2757 if (output->IsConstant()) {
2758 ConstantOperand* constant_operand = ConstantOperand::cast(output);
2759 AllocatorFor(constant_operand).AllocateConstantOutput(constant_operand);
2760 } else {
2761 UnallocatedOperand* unallocated_output =
2762 UnallocatedOperand::cast(output);
2763 if (unallocated_output->HasSameAsInputPolicy()) {
2764 DCHECK_EQ(i, 0);
2765 UnallocatedOperand* unallocated_input =
2766 UnallocatedOperand::cast(instr->InputAt(0));
2767 DCHECK_EQ(AllocatorFor(unallocated_input).kind(),
2768 AllocatorFor(unallocated_output).kind());
2769 AllocatorFor(unallocated_output)
2770 .AllocateSameInputOutput(unallocated_output, unallocated_input,
2771 instr_index);
2772 } else {
2773 AllocatorFor(unallocated_output)
2774 .AllocateOutput(unallocated_output, instr_index);
2775 }
2776 }
2777 }
2778
2779 if (instr->ClobbersRegisters()) {
2780 general_reg_allocator().SpillAllRegisters();
2781 }
2782 if (instr->ClobbersDoubleRegisters()) {
2783 double_reg_allocator().SpillAllRegisters();
2784 }
2785
2786 // Allocate temporaries.
2787 for (size_t i = 0; i < instr->TempCount(); i++) {
2788 UnallocatedOperand* temp = UnallocatedOperand::cast(instr->TempAt(i));
2789 AllocatorFor(temp).AllocateTemp(temp, instr_index);
2790 }
2791
2792 // Allocate inputs that are used across the whole instruction.
2793 for (size_t i = 0; i < instr->InputCount(); i++) {
2794 if (!instr->InputAt(i)->IsUnallocated()) continue;
2795 UnallocatedOperand* input = UnallocatedOperand::cast(instr->InputAt(i));
2796 if (input->IsUsedAtStart()) continue;
2797 AllocatorFor(input).AllocateInput(input, instr_index);
2798 }
2799
2800 // Then allocate inputs that are only used at the start of the instruction.
2801 for (size_t i = 0; i < instr->InputCount(); i++) {
2802 if (!instr->InputAt(i)->IsUnallocated()) continue;
2803 UnallocatedOperand* input = UnallocatedOperand::cast(instr->InputAt(i));
2804 DCHECK(input->IsUsedAtStart());
2805 AllocatorFor(input).AllocateInput(input, instr_index);
2806 }
2807
2808 // If we are allocating for the last instruction in the block, allocate any
2809 // phi gap move operations that are needed to resolve phis in our successor.
2810 if (instr_index == block->last_instruction_index()) {
2811 AllocatePhiGapMoves(block);
2812
2813 // If this block is deferred but it's successor isn't, update the state to
2814 // limit spills to the deferred blocks where possible.
2815 if (is_deferred_block_boundary && block->IsDeferred()) {
2816 general_reg_allocator().UpdateForDeferredBlock(instr_index);
2817 double_reg_allocator().UpdateForDeferredBlock(instr_index);
2818 }
2819 }
2820
2821 // Allocate any unallocated gap move inputs.
2822 ParallelMove* moves = instr->GetParallelMove(Instruction::END);
2823 if (moves != nullptr) {
2824 for (MoveOperands* move : *moves) {
2825 DCHECK(!move->destination().IsUnallocated());
2826 if (move->source().IsUnallocated()) {
2827 UnallocatedOperand* source =
2828 UnallocatedOperand::cast(&move->source());
2829 AllocatorFor(source).AllocateGapMoveInput(source, instr_index);
2830 }
2831 }
2832 }
2833
2834 general_reg_allocator().EndInstruction();
2835 double_reg_allocator().EndInstruction();
2836 }
2837
2838 // For now we spill all registers at a loop header.
2839 // TODO(rmcilroy): Add support for register allocations across loops.
2840 if (block->IsLoopHeader()) {
2841 general_reg_allocator().SpillAllRegisters();
2842 double_reg_allocator().SpillAllRegisters();
2843 }
2844
2845 AllocatePhis(block);
2846
2847 general_reg_allocator().EndBlock(block);
2848 double_reg_allocator().EndBlock(block);
2849 }
2850
AllocatorFor(MachineRepresentation rep)2851 SinglePassRegisterAllocator& MidTierRegisterAllocator::AllocatorFor(
2852 MachineRepresentation rep) {
2853 if (IsFloatingPoint(rep)) {
2854 return double_reg_allocator();
2855 } else {
2856 return general_reg_allocator();
2857 }
2858 }
2859
AllocatorFor(const UnallocatedOperand * operand)2860 SinglePassRegisterAllocator& MidTierRegisterAllocator::AllocatorFor(
2861 const UnallocatedOperand* operand) {
2862 return AllocatorFor(RepresentationFor(operand->virtual_register()));
2863 }
2864
AllocatorFor(const ConstantOperand * operand)2865 SinglePassRegisterAllocator& MidTierRegisterAllocator::AllocatorFor(
2866 const ConstantOperand* operand) {
2867 return AllocatorFor(RepresentationFor(operand->virtual_register()));
2868 }
2869
IsFixedRegisterPolicy(const UnallocatedOperand * operand)2870 bool MidTierRegisterAllocator::IsFixedRegisterPolicy(
2871 const UnallocatedOperand* operand) {
2872 return operand->HasFixedRegisterPolicy() ||
2873 operand->HasFixedFPRegisterPolicy();
2874 }
2875
ReserveFixedRegisters(int instr_index)2876 void MidTierRegisterAllocator::ReserveFixedRegisters(int instr_index) {
2877 Instruction* instr = code()->InstructionAt(instr_index);
2878 for (size_t i = 0; i < instr->OutputCount(); i++) {
2879 if (!instr->OutputAt(i)->IsUnallocated()) continue;
2880 const UnallocatedOperand* operand =
2881 UnallocatedOperand::cast(instr->OutputAt(i));
2882 if (operand->HasSameAsInputPolicy()) {
2883 // Input operand has the register constraints, use it here to reserve the
2884 // register for the output (it will be reserved for input below).
2885 operand = UnallocatedOperand::cast(instr->InputAt(i));
2886 }
2887 if (IsFixedRegisterPolicy(operand)) {
2888 AllocatorFor(operand).ReserveFixedOutputRegister(operand, instr_index);
2889 }
2890 }
2891 for (size_t i = 0; i < instr->TempCount(); i++) {
2892 if (!instr->TempAt(i)->IsUnallocated()) continue;
2893 const UnallocatedOperand* operand =
2894 UnallocatedOperand::cast(instr->TempAt(i));
2895 if (IsFixedRegisterPolicy(operand)) {
2896 AllocatorFor(operand).ReserveFixedTempRegister(operand, instr_index);
2897 }
2898 }
2899 for (size_t i = 0; i < instr->InputCount(); i++) {
2900 if (!instr->InputAt(i)->IsUnallocated()) continue;
2901 const UnallocatedOperand* operand =
2902 UnallocatedOperand::cast(instr->InputAt(i));
2903 if (IsFixedRegisterPolicy(operand)) {
2904 AllocatorFor(operand).ReserveFixedInputRegister(operand, instr_index);
2905 }
2906 }
2907 }
2908
AllocatePhiGapMoves(const InstructionBlock * block)2909 void MidTierRegisterAllocator::AllocatePhiGapMoves(
2910 const InstructionBlock* block) {
2911 int successors_phi_index =
2912 data()->block_state(block->rpo_number()).successors_phi_index();
2913
2914 // If successors_phi_index is -1 there are no phi's in the successor.
2915 if (successors_phi_index == -1) return;
2916
2917 // The last instruction of a block with phis can't require reference maps
2918 // since we won't record phi gap moves that get spilled when populating the
2919 // reference maps
2920 int instr_index = block->last_instruction_index();
2921 DCHECK(!code()->InstructionAt(instr_index)->HasReferenceMap());
2922
2923 // If there are phis, we only have a single successor due to edge-split form.
2924 DCHECK_EQ(block->SuccessorCount(), 1);
2925 const InstructionBlock* successor = data()->GetBlock(block->successors()[0]);
2926
2927 for (PhiInstruction* phi : successor->phis()) {
2928 int to_vreg = phi->virtual_register();
2929 int from_vreg = phi->operands()[successors_phi_index];
2930
2931 MachineRepresentation rep = RepresentationFor(to_vreg);
2932 AllocatorFor(rep).AllocatePhiGapMove(to_vreg, from_vreg, instr_index);
2933 }
2934 }
2935
AllocatePhis(const InstructionBlock * block)2936 void MidTierRegisterAllocator::AllocatePhis(const InstructionBlock* block) {
2937 for (PhiInstruction* phi : block->phis()) {
2938 int virtual_register = phi->virtual_register();
2939 MachineRepresentation rep = RepresentationFor(virtual_register);
2940 AllocatorFor(rep).AllocatePhi(virtual_register, block);
2941 }
2942 }
2943
UpdateSpillRangesForLoops()2944 void MidTierRegisterAllocator::UpdateSpillRangesForLoops() {
2945 // Extend the spill range of any spill that crosses a loop header to
2946 // the full loop.
2947 for (InstructionBlock* block : code()->instruction_blocks()) {
2948 if (block->IsLoopHeader()) {
2949 RpoNumber last_loop_block =
2950 RpoNumber::FromInt(block->loop_end().ToInt() - 1);
2951 int last_loop_instr =
2952 data()->GetBlock(last_loop_block)->last_instruction_index();
2953 // Extend spill range for all spilled values that are live on entry to the
2954 // loop header.
2955 BitVector::Iterator iterator(&data()->spilled_virtual_registers());
2956 for (; !iterator.Done(); iterator.Advance()) {
2957 const VirtualRegisterData& vreg_data =
2958 VirtualRegisterDataFor(iterator.Current());
2959 if (vreg_data.HasSpillRange() &&
2960 vreg_data.spill_range()->IsLiveAt(block->first_instruction_index(),
2961 block)) {
2962 vreg_data.spill_range()->ExtendRangeTo(last_loop_instr);
2963 }
2964 }
2965 }
2966 }
2967 }
2968
AllocateRegisters(MidTierRegisterAllocationData * data)2969 void AllocateRegisters(MidTierRegisterAllocationData* data) {
2970 MidTierRegisterAllocator allocator(data);
2971 for (InstructionBlock* block :
2972 base::Reversed(data->code()->instruction_blocks())) {
2973 data->tick_counter()->TickAndMaybeEnterSafepoint();
2974 allocator.AllocateRegisters(block);
2975 }
2976
2977 allocator.UpdateSpillRangesForLoops();
2978
2979 data->frame()->SetAllocatedRegisters(
2980 allocator.general_reg_allocator().assigned_registers());
2981 data->frame()->SetAllocatedDoubleRegisters(
2982 allocator.double_reg_allocator().assigned_registers());
2983 }
2984
2985 // Spill slot allocator for mid-tier register allocation.
2986 class MidTierSpillSlotAllocator final {
2987 public:
2988 explicit MidTierSpillSlotAllocator(MidTierRegisterAllocationData* data);
2989 MidTierSpillSlotAllocator(const MidTierSpillSlotAllocator&) = delete;
2990 MidTierSpillSlotAllocator& operator=(const MidTierSpillSlotAllocator&) =
2991 delete;
2992
2993 void Allocate(VirtualRegisterData* virtual_register);
2994
2995 private:
2996 class SpillSlot;
2997
2998 void AdvanceTo(int instr_index);
2999 SpillSlot* GetFreeSpillSlot(int byte_width);
3000
data() const3001 MidTierRegisterAllocationData* data() const { return data_; }
code() const3002 InstructionSequence* code() const { return data()->code(); }
frame() const3003 Frame* frame() const { return data()->frame(); }
zone() const3004 Zone* zone() const { return data()->allocation_zone(); }
3005
3006 struct OrderByLastUse {
3007 bool operator()(const SpillSlot* a, const SpillSlot* b) const;
3008 };
3009
3010 MidTierRegisterAllocationData* const data_;
3011 ZonePriorityQueue<SpillSlot*, OrderByLastUse> allocated_slots_;
3012 ZoneLinkedList<SpillSlot*> free_slots_;
3013 int position_;
3014 };
3015
3016 class MidTierSpillSlotAllocator::SpillSlot : public ZoneObject {
3017 public:
SpillSlot(int stack_slot,int byte_width)3018 SpillSlot(int stack_slot, int byte_width)
3019 : stack_slot_(stack_slot), byte_width_(byte_width), range_() {}
3020 SpillSlot(const SpillSlot&) = delete;
3021 SpillSlot& operator=(const SpillSlot&) = delete;
3022
AddRange(const Range & range)3023 void AddRange(const Range& range) { range_.AddRange(range); }
3024
ToOperand(MachineRepresentation rep) const3025 AllocatedOperand ToOperand(MachineRepresentation rep) const {
3026 return AllocatedOperand(AllocatedOperand::STACK_SLOT, rep, stack_slot_);
3027 }
3028
byte_width() const3029 int byte_width() const { return byte_width_; }
last_use() const3030 int last_use() const { return range_.end(); }
3031
3032 private:
3033 int stack_slot_;
3034 int byte_width_;
3035 Range range_;
3036 };
3037
operator ()(const SpillSlot * a,const SpillSlot * b) const3038 bool MidTierSpillSlotAllocator::OrderByLastUse::operator()(
3039 const SpillSlot* a, const SpillSlot* b) const {
3040 return a->last_use() > b->last_use();
3041 }
3042
MidTierSpillSlotAllocator(MidTierRegisterAllocationData * data)3043 MidTierSpillSlotAllocator::MidTierSpillSlotAllocator(
3044 MidTierRegisterAllocationData* data)
3045 : data_(data),
3046 allocated_slots_(data->allocation_zone()),
3047 free_slots_(data->allocation_zone()),
3048 position_(0) {}
3049
AdvanceTo(int instr_index)3050 void MidTierSpillSlotAllocator::AdvanceTo(int instr_index) {
3051 // Move any slots that are no longer in use to the free slots list.
3052 DCHECK_LE(position_, instr_index);
3053 while (!allocated_slots_.empty() &&
3054 instr_index > allocated_slots_.top()->last_use()) {
3055 free_slots_.push_front(allocated_slots_.top());
3056 allocated_slots_.pop();
3057 }
3058 position_ = instr_index;
3059 }
3060
3061 MidTierSpillSlotAllocator::SpillSlot*
GetFreeSpillSlot(int byte_width)3062 MidTierSpillSlotAllocator::GetFreeSpillSlot(int byte_width) {
3063 for (auto it = free_slots_.begin(); it != free_slots_.end(); ++it) {
3064 SpillSlot* slot = *it;
3065 if (slot->byte_width() == byte_width) {
3066 free_slots_.erase(it);
3067 return slot;
3068 }
3069 }
3070 return nullptr;
3071 }
3072
Allocate(VirtualRegisterData * virtual_register)3073 void MidTierSpillSlotAllocator::Allocate(
3074 VirtualRegisterData* virtual_register) {
3075 DCHECK(virtual_register->HasPendingSpillOperand());
3076 VirtualRegisterData::SpillRange* spill_range =
3077 virtual_register->spill_range();
3078 MachineRepresentation rep =
3079 data()->RepresentationFor(virtual_register->vreg());
3080 int byte_width = ByteWidthForStackSlot(rep);
3081 Range live_range = spill_range->live_range();
3082
3083 AdvanceTo(live_range.start());
3084
3085 // Try to re-use an existing free spill slot.
3086 SpillSlot* slot = GetFreeSpillSlot(byte_width);
3087 if (slot == nullptr) {
3088 // Otherwise allocate a new slot.
3089 int stack_slot_ = frame()->AllocateSpillSlot(byte_width);
3090 slot = zone()->New<SpillSlot>(stack_slot_, byte_width);
3091 }
3092
3093 // Extend the range of the slot to include this spill range, and allocate the
3094 // pending spill operands with this slot.
3095 slot->AddRange(live_range);
3096 virtual_register->AllocatePendingSpillOperand(slot->ToOperand(rep));
3097 allocated_slots_.push(slot);
3098 }
3099
AllocateSpillSlots(MidTierRegisterAllocationData * data)3100 void AllocateSpillSlots(MidTierRegisterAllocationData* data) {
3101 ZoneVector<VirtualRegisterData*> spilled(data->allocation_zone());
3102 BitVector::Iterator iterator(&data->spilled_virtual_registers());
3103 for (; !iterator.Done(); iterator.Advance()) {
3104 VirtualRegisterData& vreg_data =
3105 data->VirtualRegisterDataFor(iterator.Current());
3106 if (vreg_data.HasPendingSpillOperand()) {
3107 spilled.push_back(&vreg_data);
3108 }
3109 }
3110
3111 // Sort the spill ranges by order of their first use to enable linear
3112 // allocation of spill slots.
3113 std::sort(spilled.begin(), spilled.end(),
3114 [](const VirtualRegisterData* a, const VirtualRegisterData* b) {
3115 return a->spill_range()->live_range().start() <
3116 b->spill_range()->live_range().start();
3117 });
3118
3119 // Allocate a spill slot for each virtual register with a spill range.
3120 MidTierSpillSlotAllocator allocator(data);
3121 for (VirtualRegisterData* spill : spilled) {
3122 allocator.Allocate(spill);
3123 }
3124 }
3125
3126 // Populates reference maps for mid-tier register allocation.
3127 class MidTierReferenceMapPopulator final {
3128 public:
3129 explicit MidTierReferenceMapPopulator(MidTierRegisterAllocationData* data);
3130 MidTierReferenceMapPopulator(const MidTierReferenceMapPopulator&) = delete;
3131 MidTierReferenceMapPopulator& operator=(const MidTierReferenceMapPopulator&) =
3132 delete;
3133
3134 void RecordReferences(const VirtualRegisterData& virtual_register);
3135
3136 private:
data() const3137 MidTierRegisterAllocationData* data() const { return data_; }
code() const3138 InstructionSequence* code() const { return data()->code(); }
3139
3140 MidTierRegisterAllocationData* const data_;
3141 };
3142
MidTierReferenceMapPopulator(MidTierRegisterAllocationData * data)3143 MidTierReferenceMapPopulator::MidTierReferenceMapPopulator(
3144 MidTierRegisterAllocationData* data)
3145 : data_(data) {}
3146
RecordReferences(const VirtualRegisterData & virtual_register)3147 void MidTierReferenceMapPopulator::RecordReferences(
3148 const VirtualRegisterData& virtual_register) {
3149 if (!virtual_register.HasAllocatedSpillOperand()) return;
3150 if (!code()->IsReference(virtual_register.vreg())) return;
3151
3152 VirtualRegisterData::SpillRange* spill_range = virtual_register.spill_range();
3153 Range& live_range = spill_range->live_range();
3154 AllocatedOperand allocated =
3155 *AllocatedOperand::cast(virtual_register.spill_operand());
3156 for (int instr_index : data()->reference_map_instructions()) {
3157 if (instr_index > live_range.end() || instr_index < live_range.start())
3158 continue;
3159 Instruction* instr = data()->code()->InstructionAt(instr_index);
3160 DCHECK(instr->HasReferenceMap());
3161
3162 if (spill_range->IsLiveAt(instr_index, instr->block())) {
3163 instr->reference_map()->RecordReference(allocated);
3164 }
3165 }
3166 }
3167
PopulateReferenceMaps(MidTierRegisterAllocationData * data)3168 void PopulateReferenceMaps(MidTierRegisterAllocationData* data) {
3169 MidTierReferenceMapPopulator populator(data);
3170 BitVector::Iterator iterator(&data->spilled_virtual_registers());
3171 for (; !iterator.Done(); iterator.Advance()) {
3172 populator.RecordReferences(
3173 data->VirtualRegisterDataFor(iterator.Current()));
3174 }
3175 }
3176
3177 } // namespace compiler
3178 } // namespace internal
3179 } // namespace v8
3180