1 /*
2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
26 #include "aco_builder.h"
27 #include "aco_ir.h"
28
29 #include "common/sid.h"
30
31 #include <algorithm>
32 #include <cstring>
33 #include <map>
34 #include <set>
35 #include <stack>
36 #include <unordered_map>
37 #include <unordered_set>
38 #include <vector>
39
40 namespace std {
41 template <> struct hash<aco::Temp> {
operator ()std::hash42 size_t operator()(aco::Temp temp) const noexcept
43 {
44 uint32_t v;
45 std::memcpy(&v, &temp, sizeof(temp));
46 return std::hash<uint32_t>{}(v);
47 }
48 };
49 } // namespace std
50
51 /*
52 * Implements the spilling algorithm on SSA-form from
53 * "Register Spilling and Live-Range Splitting for SSA-Form Programs"
54 * by Matthias Braun and Sebastian Hack.
55 */
56
57 namespace aco {
58
59 namespace {
60
61 struct remat_info {
62 Instruction* instr;
63 };
64
65 struct spill_ctx {
66 RegisterDemand target_pressure;
67 Program* program;
68 std::vector<std::vector<RegisterDemand>> register_demand;
69 std::vector<std::map<Temp, Temp>> renames;
70 std::vector<std::unordered_map<Temp, uint32_t>> spills_entry;
71 std::vector<std::unordered_map<Temp, uint32_t>> spills_exit;
72
73 std::vector<bool> processed;
74 std::stack<Block*, std::vector<Block*>> loop_header;
75 std::vector<std::unordered_map<Temp, std::pair<uint32_t, uint32_t>>> next_use_distances_start;
76 std::vector<std::unordered_map<Temp, std::pair<uint32_t, uint32_t>>> next_use_distances_end;
77 std::vector<std::vector<std::pair<Temp, uint32_t>>> local_next_use_distance; /* Working buffer */
78 std::vector<std::pair<RegClass, std::unordered_set<uint32_t>>> interferences;
79 std::vector<std::vector<uint32_t>> affinities;
80 std::vector<bool> is_reloaded;
81 std::unordered_map<Temp, remat_info> remat;
82 std::set<Instruction*> unused_remats;
83 unsigned wave_size;
84
spill_ctxaco::__anonaa854a970111::spill_ctx85 spill_ctx(const RegisterDemand target_pressure_, Program* program_,
86 std::vector<std::vector<RegisterDemand>> register_demand_)
87 : target_pressure(target_pressure_), program(program_),
88 register_demand(std::move(register_demand_)), renames(program->blocks.size()),
89 spills_entry(program->blocks.size()), spills_exit(program->blocks.size()),
90 processed(program->blocks.size(), false), wave_size(program->wave_size)
91 {}
92
add_affinityaco::__anonaa854a970111::spill_ctx93 void add_affinity(uint32_t first, uint32_t second)
94 {
95 unsigned found_first = affinities.size();
96 unsigned found_second = affinities.size();
97 for (unsigned i = 0; i < affinities.size(); i++) {
98 std::vector<uint32_t>& vec = affinities[i];
99 for (uint32_t entry : vec) {
100 if (entry == first)
101 found_first = i;
102 else if (entry == second)
103 found_second = i;
104 }
105 }
106 if (found_first == affinities.size() && found_second == affinities.size()) {
107 affinities.emplace_back(std::vector<uint32_t>({first, second}));
108 } else if (found_first < affinities.size() && found_second == affinities.size()) {
109 affinities[found_first].push_back(second);
110 } else if (found_second < affinities.size() && found_first == affinities.size()) {
111 affinities[found_second].push_back(first);
112 } else if (found_first != found_second) {
113 /* merge second into first */
114 affinities[found_first].insert(affinities[found_first].end(),
115 affinities[found_second].begin(),
116 affinities[found_second].end());
117 affinities.erase(std::next(affinities.begin(), found_second));
118 } else {
119 assert(found_first == found_second);
120 }
121 }
122
add_interferenceaco::__anonaa854a970111::spill_ctx123 void add_interference(uint32_t first, uint32_t second)
124 {
125 if (interferences[first].first.type() != interferences[second].first.type())
126 return;
127
128 bool inserted = interferences[first].second.insert(second).second;
129 if (inserted)
130 interferences[second].second.insert(first);
131 }
132
allocate_spill_idaco::__anonaa854a970111::spill_ctx133 uint32_t allocate_spill_id(RegClass rc)
134 {
135 interferences.emplace_back(rc, std::unordered_set<uint32_t>());
136 is_reloaded.push_back(false);
137 return next_spill_id++;
138 }
139
140 uint32_t next_spill_id = 0;
141 };
142
143 int32_t
get_dominator(int idx_a,int idx_b,Program * program,bool is_linear)144 get_dominator(int idx_a, int idx_b, Program* program, bool is_linear)
145 {
146
147 if (idx_a == -1)
148 return idx_b;
149 if (idx_b == -1)
150 return idx_a;
151 if (is_linear) {
152 while (idx_a != idx_b) {
153 if (idx_a > idx_b)
154 idx_a = program->blocks[idx_a].linear_idom;
155 else
156 idx_b = program->blocks[idx_b].linear_idom;
157 }
158 } else {
159 while (idx_a != idx_b) {
160 if (idx_a > idx_b)
161 idx_a = program->blocks[idx_a].logical_idom;
162 else
163 idx_b = program->blocks[idx_b].logical_idom;
164 }
165 }
166 assert(idx_a != -1);
167 return idx_a;
168 }
169
170 void
next_uses_per_block(spill_ctx & ctx,unsigned block_idx,uint32_t & worklist)171 next_uses_per_block(spill_ctx& ctx, unsigned block_idx, uint32_t& worklist)
172 {
173 Block* block = &ctx.program->blocks[block_idx];
174 ctx.next_use_distances_start[block_idx] = ctx.next_use_distances_end[block_idx];
175 auto& next_use_distances_start = ctx.next_use_distances_start[block_idx];
176
177 /* to compute the next use distance at the beginning of the block, we have to add the block's
178 * size */
179 for (std::unordered_map<Temp, std::pair<uint32_t, uint32_t>>::iterator it =
180 next_use_distances_start.begin();
181 it != next_use_distances_start.end(); ++it)
182 it->second.second = it->second.second + block->instructions.size();
183
184 int idx = block->instructions.size() - 1;
185 while (idx >= 0) {
186 aco_ptr<Instruction>& instr = block->instructions[idx];
187
188 if (instr->opcode == aco_opcode::p_linear_phi || instr->opcode == aco_opcode::p_phi)
189 break;
190
191 for (const Definition& def : instr->definitions) {
192 if (def.isTemp())
193 next_use_distances_start.erase(def.getTemp());
194 }
195
196 for (const Operand& op : instr->operands) {
197 /* omit exec mask */
198 if (op.isFixed() && op.physReg() == exec)
199 continue;
200 if (op.regClass().type() == RegType::vgpr && op.regClass().is_linear())
201 continue;
202 if (op.isTemp())
203 next_use_distances_start[op.getTemp()] = {block_idx, idx};
204 }
205 idx--;
206 }
207
208 assert(block_idx != 0 || next_use_distances_start.empty());
209 std::unordered_set<Temp> phi_defs;
210 while (idx >= 0) {
211 aco_ptr<Instruction>& instr = block->instructions[idx];
212 assert(instr->opcode == aco_opcode::p_linear_phi || instr->opcode == aco_opcode::p_phi);
213
214 std::pair<uint32_t, uint32_t> distance{block_idx, 0};
215
216 auto it = instr->definitions[0].isTemp() ? next_use_distances_start.find(instr->definitions[0].getTemp())
217 : next_use_distances_start.end();
218 if (it != next_use_distances_start.end() &&
219 phi_defs.insert(instr->definitions[0].getTemp()).second) {
220 distance = it->second;
221 }
222
223 for (unsigned i = 0; i < instr->operands.size(); i++) {
224 unsigned pred_idx =
225 instr->opcode == aco_opcode::p_phi ? block->logical_preds[i] : block->linear_preds[i];
226 if (instr->operands[i].isTemp()) {
227 auto insert_result = ctx.next_use_distances_end[pred_idx].insert(
228 std::make_pair(instr->operands[i].getTemp(), distance));
229 const bool inserted = insert_result.second;
230 std::pair<uint32_t, uint32_t>& entry_distance = insert_result.first->second;
231 if (inserted || entry_distance != distance)
232 worklist = std::max(worklist, pred_idx + 1);
233 entry_distance = distance;
234 }
235 }
236 idx--;
237 }
238
239 /* all remaining live vars must be live-out at the predecessors */
240 for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair : next_use_distances_start) {
241 Temp temp = pair.first;
242 if (phi_defs.count(temp)) {
243 continue;
244 }
245 uint32_t distance = pair.second.second;
246 uint32_t dom = pair.second.first;
247 std::vector<unsigned>& preds = temp.is_linear() ? block->linear_preds : block->logical_preds;
248 for (unsigned pred_idx : preds) {
249 if (ctx.program->blocks[pred_idx].loop_nest_depth > block->loop_nest_depth)
250 distance += 0xFFFF;
251 auto insert_result = ctx.next_use_distances_end[pred_idx].insert(
252 std::make_pair(temp, std::pair<uint32_t, uint32_t>{}));
253 const bool inserted = insert_result.second;
254 std::pair<uint32_t, uint32_t>& entry_distance = insert_result.first->second;
255
256 if (!inserted) {
257 dom = get_dominator(dom, entry_distance.first, ctx.program, temp.is_linear());
258 distance = std::min(entry_distance.second, distance);
259 }
260 if (entry_distance != std::pair<uint32_t, uint32_t>{dom, distance}) {
261 worklist = std::max(worklist, pred_idx + 1);
262 entry_distance = {dom, distance};
263 }
264 }
265 }
266 }
267
268 void
compute_global_next_uses(spill_ctx & ctx)269 compute_global_next_uses(spill_ctx& ctx)
270 {
271 ctx.next_use_distances_start.resize(ctx.program->blocks.size());
272 ctx.next_use_distances_end.resize(ctx.program->blocks.size());
273
274 uint32_t worklist = ctx.program->blocks.size();
275 while (worklist) {
276 unsigned block_idx = --worklist;
277 next_uses_per_block(ctx, block_idx, worklist);
278 }
279 }
280
281 bool
should_rematerialize(aco_ptr<Instruction> & instr)282 should_rematerialize(aco_ptr<Instruction>& instr)
283 {
284 /* TODO: rematerialization is only supported for VOP1, SOP1 and PSEUDO */
285 if (instr->format != Format::VOP1 && instr->format != Format::SOP1 &&
286 instr->format != Format::PSEUDO && instr->format != Format::SOPK)
287 return false;
288 /* TODO: pseudo-instruction rematerialization is only supported for
289 * p_create_vector/p_parallelcopy */
290 if (instr->isPseudo() && instr->opcode != aco_opcode::p_create_vector &&
291 instr->opcode != aco_opcode::p_parallelcopy)
292 return false;
293 if (instr->isSOPK() && instr->opcode != aco_opcode::s_movk_i32)
294 return false;
295
296 for (const Operand& op : instr->operands) {
297 /* TODO: rematerialization using temporaries isn't yet supported */
298 if (!op.isConstant())
299 return false;
300 }
301
302 /* TODO: rematerialization with multiple definitions isn't yet supported */
303 if (instr->definitions.size() > 1)
304 return false;
305
306 return true;
307 }
308
309 aco_ptr<Instruction>
do_reload(spill_ctx & ctx,Temp tmp,Temp new_name,uint32_t spill_id)310 do_reload(spill_ctx& ctx, Temp tmp, Temp new_name, uint32_t spill_id)
311 {
312 std::unordered_map<Temp, remat_info>::iterator remat = ctx.remat.find(tmp);
313 if (remat != ctx.remat.end()) {
314 Instruction* instr = remat->second.instr;
315 assert((instr->isVOP1() || instr->isSOP1() || instr->isPseudo() || instr->isSOPK()) &&
316 "unsupported");
317 assert((instr->format != Format::PSEUDO || instr->opcode == aco_opcode::p_create_vector ||
318 instr->opcode == aco_opcode::p_parallelcopy) &&
319 "unsupported");
320 assert(instr->definitions.size() == 1 && "unsupported");
321
322 aco_ptr<Instruction> res;
323 if (instr->isVOP1()) {
324 res.reset(create_instruction<VOP1_instruction>(
325 instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
326 } else if (instr->isSOP1()) {
327 res.reset(create_instruction<SOP1_instruction>(
328 instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
329 } else if (instr->isPseudo()) {
330 res.reset(create_instruction<Pseudo_instruction>(
331 instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
332 } else if (instr->isSOPK()) {
333 res.reset(create_instruction<SOPK_instruction>(
334 instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
335 res->sopk().imm = instr->sopk().imm;
336 }
337 for (unsigned i = 0; i < instr->operands.size(); i++) {
338 res->operands[i] = instr->operands[i];
339 if (instr->operands[i].isTemp()) {
340 assert(false && "unsupported");
341 if (ctx.remat.count(instr->operands[i].getTemp()))
342 ctx.unused_remats.erase(ctx.remat[instr->operands[i].getTemp()].instr);
343 }
344 }
345 res->definitions[0] = Definition(new_name);
346 return res;
347 } else {
348 aco_ptr<Pseudo_instruction> reload{
349 create_instruction<Pseudo_instruction>(aco_opcode::p_reload, Format::PSEUDO, 1, 1)};
350 reload->operands[0] = Operand::c32(spill_id);
351 reload->definitions[0] = Definition(new_name);
352 ctx.is_reloaded[spill_id] = true;
353 return reload;
354 }
355 }
356
357 void
get_rematerialize_info(spill_ctx & ctx)358 get_rematerialize_info(spill_ctx& ctx)
359 {
360 for (Block& block : ctx.program->blocks) {
361 bool logical = false;
362 for (aco_ptr<Instruction>& instr : block.instructions) {
363 if (instr->opcode == aco_opcode::p_logical_start)
364 logical = true;
365 else if (instr->opcode == aco_opcode::p_logical_end)
366 logical = false;
367 if (logical && should_rematerialize(instr)) {
368 for (const Definition& def : instr->definitions) {
369 if (def.isTemp()) {
370 ctx.remat[def.getTemp()] = remat_info{instr.get()};
371 ctx.unused_remats.insert(instr.get());
372 }
373 }
374 }
375 }
376 }
377 }
378
379 void
update_local_next_uses(spill_ctx & ctx,Block * block,std::vector<std::vector<std::pair<Temp,uint32_t>>> & local_next_uses)380 update_local_next_uses(spill_ctx& ctx, Block* block,
381 std::vector<std::vector<std::pair<Temp, uint32_t>>>& local_next_uses)
382 {
383 if (local_next_uses.size() < block->instructions.size()) {
384 /* Allocate more next-use-maps. Note that by never reducing the vector size, we enable
385 * future calls to this function to re-use already allocated map memory. */
386 local_next_uses.resize(block->instructions.size());
387 }
388
389 local_next_uses[block->instructions.size() - 1].clear();
390 for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair :
391 ctx.next_use_distances_end[block->index]) {
392 local_next_uses[block->instructions.size() - 1].push_back(std::make_pair<Temp, uint32_t>(
393 (Temp)pair.first, pair.second.second + block->instructions.size()));
394 }
395
396 for (int idx = block->instructions.size() - 1; idx >= 0; idx--) {
397 aco_ptr<Instruction>& instr = block->instructions[idx];
398 if (!instr)
399 break;
400 if (instr->opcode == aco_opcode::p_phi || instr->opcode == aco_opcode::p_linear_phi)
401 break;
402
403 if (idx != (int)block->instructions.size() - 1) {
404 local_next_uses[idx] = local_next_uses[idx + 1];
405 }
406
407 for (const Operand& op : instr->operands) {
408 if (op.isFixed() && op.physReg() == exec)
409 continue;
410 if (op.regClass().type() == RegType::vgpr && op.regClass().is_linear())
411 continue;
412 if (op.isTemp()) {
413 auto it = std::find_if(local_next_uses[idx].begin(), local_next_uses[idx].end(),
414 [op](auto& pair) { return pair.first == op.getTemp(); });
415 if (it == local_next_uses[idx].end()) {
416 local_next_uses[idx].push_back(std::make_pair<Temp, uint32_t>(op.getTemp(), idx));
417 } else {
418 it->second = idx;
419 }
420 }
421 }
422 for (const Definition& def : instr->definitions) {
423 if (def.isTemp()) {
424 auto it = std::find_if(local_next_uses[idx].begin(), local_next_uses[idx].end(),
425 [def](auto& pair) { return pair.first == def.getTemp(); });
426 if (it != local_next_uses[idx].end()) {
427 local_next_uses[idx].erase(it);
428 }
429 }
430 }
431 }
432 }
433
434 RegisterDemand
get_demand_before(spill_ctx & ctx,unsigned block_idx,unsigned idx)435 get_demand_before(spill_ctx& ctx, unsigned block_idx, unsigned idx)
436 {
437 if (idx == 0) {
438 RegisterDemand demand = ctx.register_demand[block_idx][idx];
439 aco_ptr<Instruction>& instr = ctx.program->blocks[block_idx].instructions[idx];
440 aco_ptr<Instruction> instr_before(nullptr);
441 return get_demand_before(demand, instr, instr_before);
442 } else {
443 return ctx.register_demand[block_idx][idx - 1];
444 }
445 }
446
447 RegisterDemand
get_live_in_demand(spill_ctx & ctx,unsigned block_idx)448 get_live_in_demand(spill_ctx& ctx, unsigned block_idx)
449 {
450 unsigned idx = 0;
451 RegisterDemand reg_pressure = RegisterDemand();
452 Block& block = ctx.program->blocks[block_idx];
453 for (aco_ptr<Instruction>& phi : block.instructions) {
454 if (!is_phi(phi))
455 break;
456 idx++;
457
458 /* Killed phi definitions increase pressure in the predecessor but not
459 * the block they're in. Since the loops below are both to control
460 * pressure of the start of this block and the ends of it's
461 * predecessors, we need to count killed unspilled phi definitions here. */
462 if (phi->definitions[0].isTemp() && phi->definitions[0].isKill() &&
463 !ctx.spills_entry[block_idx].count(phi->definitions[0].getTemp()))
464 reg_pressure += phi->definitions[0].getTemp();
465 }
466
467 reg_pressure += get_demand_before(ctx, block_idx, idx);
468
469 /* Consider register pressure from linear predecessors. This can affect
470 * reg_pressure if the branch instructions define sgprs. */
471 for (unsigned pred : block.linear_preds)
472 reg_pressure.sgpr =
473 std::max<int16_t>(reg_pressure.sgpr, ctx.register_demand[pred].back().sgpr);
474
475 return reg_pressure;
476 }
477
478 RegisterDemand
init_live_in_vars(spill_ctx & ctx,Block * block,unsigned block_idx)479 init_live_in_vars(spill_ctx& ctx, Block* block, unsigned block_idx)
480 {
481 RegisterDemand spilled_registers;
482
483 /* first block, nothing was spilled before */
484 if (block_idx == 0)
485 return {0, 0};
486
487 /* next use distances at the beginning of the current block */
488 const auto& next_use_distances = ctx.next_use_distances_start[block_idx];
489
490 /* loop header block */
491 if (block->loop_nest_depth > ctx.program->blocks[block_idx - 1].loop_nest_depth) {
492 assert(block->linear_preds[0] == block_idx - 1);
493 assert(block->logical_preds[0] == block_idx - 1);
494
495 /* create new loop_info */
496 ctx.loop_header.emplace(block);
497
498 /* check how many live-through variables should be spilled */
499 RegisterDemand reg_pressure = get_live_in_demand(ctx, block_idx);
500 RegisterDemand loop_demand = reg_pressure;
501 unsigned i = block_idx;
502 while (ctx.program->blocks[i].loop_nest_depth >= block->loop_nest_depth) {
503 assert(ctx.program->blocks.size() > i);
504 loop_demand.update(ctx.program->blocks[i++].register_demand);
505 }
506 unsigned loop_end = i;
507
508 for (auto spilled : ctx.spills_exit[block_idx - 1]) {
509 auto it = next_use_distances.find(spilled.first);
510
511 /* variable is not live at loop entry: probably a phi operand */
512 if (it == next_use_distances.end())
513 continue;
514
515 /* keep constants and live-through variables spilled */
516 if (it->second.first >= loop_end || ctx.remat.count(spilled.first)) {
517 ctx.spills_entry[block_idx][spilled.first] = spilled.second;
518 spilled_registers += spilled.first;
519 loop_demand -= spilled.first;
520 }
521 }
522
523 /* select live-through variables and constants */
524 RegType type = RegType::vgpr;
525 while (loop_demand.exceeds(ctx.target_pressure)) {
526 /* if VGPR demand is low enough, select SGPRs */
527 if (type == RegType::vgpr && loop_demand.vgpr <= ctx.target_pressure.vgpr)
528 type = RegType::sgpr;
529 /* if SGPR demand is low enough, break */
530 if (type == RegType::sgpr && loop_demand.sgpr <= ctx.target_pressure.sgpr)
531 break;
532
533 unsigned distance = 0;
534 Temp to_spill;
535 for (const std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair :
536 next_use_distances) {
537 if (pair.first.type() == type &&
538 (pair.second.first >= loop_end ||
539 (ctx.remat.count(pair.first) && type == RegType::sgpr)) &&
540 pair.second.second > distance && !ctx.spills_entry[block_idx].count(pair.first)) {
541 to_spill = pair.first;
542 distance = pair.second.second;
543 }
544 }
545
546 /* select SGPRs or break */
547 if (distance == 0) {
548 if (type == RegType::sgpr)
549 break;
550 type = RegType::sgpr;
551 continue;
552 }
553
554 uint32_t spill_id;
555 if (!ctx.spills_exit[block_idx - 1].count(to_spill)) {
556 spill_id = ctx.allocate_spill_id(to_spill.regClass());
557 } else {
558 spill_id = ctx.spills_exit[block_idx - 1][to_spill];
559 }
560
561 ctx.spills_entry[block_idx][to_spill] = spill_id;
562 spilled_registers += to_spill;
563 loop_demand -= to_spill;
564 }
565
566 /* shortcut */
567 if (!loop_demand.exceeds(ctx.target_pressure))
568 return spilled_registers;
569
570 /* if reg pressure is too high at beginning of loop, add variables with furthest use */
571 reg_pressure -= spilled_registers;
572
573 while (reg_pressure.exceeds(ctx.target_pressure)) {
574 unsigned distance = 0;
575 Temp to_spill;
576 type = reg_pressure.vgpr > ctx.target_pressure.vgpr ? RegType::vgpr : RegType::sgpr;
577
578 for (const std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair :
579 next_use_distances) {
580 if (pair.first.type() == type && pair.second.second > distance &&
581 !ctx.spills_entry[block_idx].count(pair.first)) {
582 to_spill = pair.first;
583 distance = pair.second.second;
584 }
585 }
586 assert(distance != 0);
587 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
588 spilled_registers += to_spill;
589 reg_pressure -= to_spill;
590 }
591
592 return spilled_registers;
593 }
594
595 /* branch block */
596 if (block->linear_preds.size() == 1 && !(block->kind & block_kind_loop_exit)) {
597 /* keep variables spilled if they are alive and not used in the current block */
598 unsigned pred_idx = block->linear_preds[0];
599 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
600 if (pair.first.type() != RegType::sgpr) {
601 continue;
602 }
603 auto next_use_distance_it = next_use_distances.find(pair.first);
604 if (next_use_distance_it != next_use_distances.end() &&
605 next_use_distance_it->second.first != block_idx) {
606 ctx.spills_entry[block_idx].insert(pair);
607 spilled_registers.sgpr += pair.first.size();
608 }
609 }
610 if (block->logical_preds.size() == 1) {
611 pred_idx = block->logical_preds[0];
612 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
613 if (pair.first.type() != RegType::vgpr) {
614 continue;
615 }
616 auto next_use_distance_it = next_use_distances.find(pair.first);
617 if (next_use_distance_it != next_use_distances.end() &&
618 next_use_distance_it->second.first != block_idx) {
619 ctx.spills_entry[block_idx].insert(pair);
620 spilled_registers.vgpr += pair.first.size();
621 }
622 }
623 }
624
625 /* if register demand is still too high, we just keep all spilled live vars
626 * and process the block */
627 if (block->register_demand.sgpr - spilled_registers.sgpr > ctx.target_pressure.sgpr) {
628 pred_idx = block->linear_preds[0];
629 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
630 if (pair.first.type() == RegType::sgpr && next_use_distances.count(pair.first) &&
631 ctx.spills_entry[block_idx].insert(pair).second) {
632 spilled_registers.sgpr += pair.first.size();
633 }
634 }
635 }
636 if (block->register_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr &&
637 block->logical_preds.size() == 1) {
638 pred_idx = block->logical_preds[0];
639 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
640 if (pair.first.type() == RegType::vgpr && next_use_distances.count(pair.first) &&
641 ctx.spills_entry[block_idx].insert(pair).second) {
642 spilled_registers.vgpr += pair.first.size();
643 }
644 }
645 }
646
647 return spilled_registers;
648 }
649
650 /* else: merge block */
651 std::set<Temp> partial_spills;
652
653 /* keep variables spilled on all incoming paths */
654 for (const std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair : next_use_distances) {
655 std::vector<unsigned>& preds =
656 pair.first.is_linear() ? block->linear_preds : block->logical_preds;
657 /* If it can be rematerialized, keep the variable spilled if all predecessors do not reload
658 * it. Otherwise, if any predecessor reloads it, ensure it's reloaded on all other
659 * predecessors. The idea is that it's better in practice to rematerialize redundantly than to
660 * create lots of phis. */
661 /* TODO: test this idea with more than Dawn of War III shaders (the current pipeline-db
662 * doesn't seem to exercise this path much) */
663 bool remat = ctx.remat.count(pair.first);
664 bool spill = !remat;
665 uint32_t spill_id = 0;
666 for (unsigned pred_idx : preds) {
667 /* variable is not even live at the predecessor: probably from a phi */
668 if (!ctx.next_use_distances_end[pred_idx].count(pair.first)) {
669 spill = false;
670 break;
671 }
672 if (!ctx.spills_exit[pred_idx].count(pair.first)) {
673 if (!remat)
674 spill = false;
675 } else {
676 partial_spills.insert(pair.first);
677 /* it might be that on one incoming path, the variable has a different spill_id, but
678 * add_couple_code() will take care of that. */
679 spill_id = ctx.spills_exit[pred_idx][pair.first];
680 if (remat)
681 spill = true;
682 }
683 }
684 if (spill) {
685 ctx.spills_entry[block_idx][pair.first] = spill_id;
686 partial_spills.erase(pair.first);
687 spilled_registers += pair.first;
688 }
689 }
690
691 /* same for phis */
692 for (aco_ptr<Instruction>& phi : block->instructions) {
693 if (!is_phi(phi))
694 break;
695 if (!phi->definitions[0].isTemp())
696 continue;
697
698 std::vector<unsigned>& preds =
699 phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
700 bool spill = true;
701
702 for (unsigned i = 0; i < phi->operands.size(); i++) {
703 /* non-temp operands can increase the register pressure */
704 if (!phi->operands[i].isTemp()) {
705 partial_spills.insert(phi->definitions[0].getTemp());
706 continue;
707 }
708
709 if (!ctx.spills_exit[preds[i]].count(phi->operands[i].getTemp()))
710 spill = false;
711 else
712 partial_spills.insert(phi->definitions[0].getTemp());
713 }
714 if (spill) {
715 ctx.spills_entry[block_idx][phi->definitions[0].getTemp()] =
716 ctx.allocate_spill_id(phi->definitions[0].regClass());
717 partial_spills.erase(phi->definitions[0].getTemp());
718 spilled_registers += phi->definitions[0].getTemp();
719 }
720 }
721
722 /* if reg pressure at first instruction is still too high, add partially spilled variables */
723 RegisterDemand reg_pressure = get_live_in_demand(ctx, block_idx);
724 reg_pressure -= spilled_registers;
725
726 while (reg_pressure.exceeds(ctx.target_pressure)) {
727 assert(!partial_spills.empty());
728 std::set<Temp>::iterator it = partial_spills.begin();
729 Temp to_spill = Temp();
730 unsigned distance = 0;
731 RegType type = reg_pressure.vgpr > ctx.target_pressure.vgpr ? RegType::vgpr : RegType::sgpr;
732
733 while (it != partial_spills.end()) {
734 assert(!ctx.spills_entry[block_idx].count(*it));
735
736 if (it->type() == type && next_use_distances.at(*it).second > distance) {
737 distance = next_use_distances.at(*it).second;
738 to_spill = *it;
739 }
740 ++it;
741 }
742 assert(distance != 0);
743
744 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
745 partial_spills.erase(to_spill);
746 spilled_registers += to_spill;
747 reg_pressure -= to_spill;
748 }
749
750 return spilled_registers;
751 }
752
753 void
add_coupling_code(spill_ctx & ctx,Block * block,unsigned block_idx)754 add_coupling_code(spill_ctx& ctx, Block* block, unsigned block_idx)
755 {
756 /* no coupling code necessary */
757 if (block->linear_preds.size() == 0)
758 return;
759
760 std::vector<aco_ptr<Instruction>> instructions;
761 /* branch block: TODO take other branch into consideration */
762 if (block->linear_preds.size() == 1 &&
763 !(block->kind & (block_kind_loop_exit | block_kind_loop_header))) {
764 assert(ctx.processed[block->linear_preds[0]]);
765 assert(ctx.register_demand[block_idx].size() == block->instructions.size());
766 std::vector<RegisterDemand> reg_demand;
767 unsigned insert_idx = 0;
768 RegisterDemand demand_before = get_demand_before(ctx, block_idx, 0);
769
770 for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& live :
771 ctx.next_use_distances_start[block_idx]) {
772 const unsigned pred_idx = block->linear_preds[0];
773
774 if (!live.first.is_linear())
775 continue;
776 /* still spilled */
777 if (ctx.spills_entry[block_idx].count(live.first))
778 continue;
779
780 /* in register at end of predecessor */
781 auto spills_exit_it = ctx.spills_exit[pred_idx].find(live.first);
782 if (spills_exit_it == ctx.spills_exit[pred_idx].end()) {
783 std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(live.first);
784 if (it != ctx.renames[pred_idx].end())
785 ctx.renames[block_idx].insert(*it);
786 continue;
787 }
788
789 /* variable is spilled at predecessor and live at current block: create reload instruction */
790 Temp new_name = ctx.program->allocateTmp(live.first.regClass());
791 aco_ptr<Instruction> reload = do_reload(ctx, live.first, new_name, spills_exit_it->second);
792 instructions.emplace_back(std::move(reload));
793 reg_demand.push_back(demand_before);
794 ctx.renames[block_idx][live.first] = new_name;
795 }
796
797 if (block->logical_preds.size() == 1) {
798 do {
799 assert(insert_idx < block->instructions.size());
800 instructions.emplace_back(std::move(block->instructions[insert_idx]));
801 reg_demand.push_back(ctx.register_demand[block_idx][insert_idx]);
802 insert_idx++;
803 } while (instructions.back()->opcode != aco_opcode::p_logical_start);
804
805 unsigned pred_idx = block->logical_preds[0];
806 for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& live :
807 ctx.next_use_distances_start[block_idx]) {
808 if (live.first.is_linear())
809 continue;
810 /* still spilled */
811 if (ctx.spills_entry[block_idx].count(live.first))
812 continue;
813
814 /* in register at end of predecessor */
815 auto spills_exit_it = ctx.spills_exit[pred_idx].find(live.first);
816 if (spills_exit_it == ctx.spills_exit[pred_idx].end()) {
817 std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(live.first);
818 if (it != ctx.renames[pred_idx].end())
819 ctx.renames[block_idx].insert(*it);
820 continue;
821 }
822
823 /* variable is spilled at predecessor and live at current block:
824 * create reload instruction */
825 Temp new_name = ctx.program->allocateTmp(live.first.regClass());
826 aco_ptr<Instruction> reload =
827 do_reload(ctx, live.first, new_name, spills_exit_it->second);
828 instructions.emplace_back(std::move(reload));
829 reg_demand.emplace_back(reg_demand.back());
830 ctx.renames[block_idx][live.first] = new_name;
831 }
832 }
833
834 /* combine new reload instructions with original block */
835 if (!instructions.empty()) {
836 reg_demand.insert(reg_demand.end(),
837 std::next(ctx.register_demand[block->index].begin(), insert_idx),
838 ctx.register_demand[block->index].end());
839 ctx.register_demand[block_idx] = std::move(reg_demand);
840 instructions.insert(instructions.end(),
841 std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(
842 std::next(block->instructions.begin(), insert_idx)),
843 std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(
844 block->instructions.end()));
845 block->instructions = std::move(instructions);
846 }
847 return;
848 }
849
850 /* loop header and merge blocks: check if all (linear) predecessors have been processed */
851 for (ASSERTED unsigned pred : block->linear_preds)
852 assert(ctx.processed[pred]);
853
854 /* iterate the phi nodes for which operands to spill at the predecessor */
855 for (aco_ptr<Instruction>& phi : block->instructions) {
856 if (!is_phi(phi))
857 break;
858
859 /* if the phi is not spilled, add to instructions */
860 if (!phi->definitions[0].isTemp() ||
861 !ctx.spills_entry[block_idx].count(phi->definitions[0].getTemp())) {
862 instructions.emplace_back(std::move(phi));
863 continue;
864 }
865
866 std::vector<unsigned>& preds =
867 phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
868 uint32_t def_spill_id = ctx.spills_entry[block_idx][phi->definitions[0].getTemp()];
869
870 for (unsigned i = 0; i < phi->operands.size(); i++) {
871 if (phi->operands[i].isUndefined())
872 continue;
873
874 unsigned pred_idx = preds[i];
875 Operand spill_op = phi->operands[i];
876
877 if (spill_op.isTemp()) {
878 assert(phi->operands[i].isKill());
879 Temp var = phi->operands[i].getTemp();
880
881 std::map<Temp, Temp>::iterator rename_it = ctx.renames[pred_idx].find(var);
882 /* prevent the definining instruction from being DCE'd if it could be rematerialized */
883 if (rename_it == ctx.renames[preds[i]].end() && ctx.remat.count(var))
884 ctx.unused_remats.erase(ctx.remat[var].instr);
885
886 /* check if variable is already spilled at predecessor */
887 auto spilled = ctx.spills_exit[pred_idx].find(var);
888 if (spilled != ctx.spills_exit[pred_idx].end()) {
889 if (spilled->second != def_spill_id)
890 ctx.add_affinity(def_spill_id, spilled->second);
891 continue;
892 }
893
894 /* rename if necessary */
895 if (rename_it != ctx.renames[pred_idx].end()) {
896 spill_op.setTemp(rename_it->second);
897 ctx.renames[pred_idx].erase(rename_it);
898 }
899 }
900
901 uint32_t spill_id = ctx.allocate_spill_id(phi->definitions[0].regClass());
902
903 /* add interferences and affinity */
904 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx])
905 ctx.add_interference(spill_id, pair.second);
906 ctx.add_affinity(def_spill_id, spill_id);
907
908 aco_ptr<Pseudo_instruction> spill{
909 create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
910 spill->operands[0] = spill_op;
911 spill->operands[1] = Operand::c32(spill_id);
912 Block& pred = ctx.program->blocks[pred_idx];
913 unsigned idx = pred.instructions.size();
914 do {
915 assert(idx != 0);
916 idx--;
917 } while (phi->opcode == aco_opcode::p_phi &&
918 pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
919 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
920 pred.instructions.insert(it, std::move(spill));
921 if (spill_op.isTemp())
922 ctx.spills_exit[pred_idx][spill_op.getTemp()] = spill_id;
923 }
924
925 /* remove phi from instructions */
926 phi.reset();
927 }
928
929 /* iterate all (other) spilled variables for which to spill at the predecessor */
930 // TODO: would be better to have them sorted: first vgprs and first with longest distance
931 for (std::pair<Temp, uint32_t> pair : ctx.spills_entry[block_idx]) {
932 std::vector<unsigned> preds =
933 pair.first.is_linear() ? block->linear_preds : block->logical_preds;
934
935 for (unsigned pred_idx : preds) {
936 /* variable is already spilled at predecessor */
937 auto spilled = ctx.spills_exit[pred_idx].find(pair.first);
938 if (spilled != ctx.spills_exit[pred_idx].end()) {
939 if (spilled->second != pair.second)
940 ctx.add_affinity(pair.second, spilled->second);
941 continue;
942 }
943
944 /* variable is dead at predecessor, it must be from a phi: this works because of CSSA form */
945 if (!ctx.next_use_distances_end[pred_idx].count(pair.first))
946 continue;
947
948 /* add interferences between spilled variable and predecessors exit spills */
949 for (std::pair<Temp, uint32_t> exit_spill : ctx.spills_exit[pred_idx]) {
950 if (exit_spill.first == pair.first)
951 continue;
952 ctx.add_interference(exit_spill.second, pair.second);
953 }
954
955 /* variable is in register at predecessor and has to be spilled */
956 /* rename if necessary */
957 Temp var = pair.first;
958 std::map<Temp, Temp>::iterator rename_it = ctx.renames[pred_idx].find(var);
959 if (rename_it != ctx.renames[pred_idx].end()) {
960 var = rename_it->second;
961 ctx.renames[pred_idx].erase(rename_it);
962 }
963
964 aco_ptr<Pseudo_instruction> spill{
965 create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
966 spill->operands[0] = Operand(var);
967 spill->operands[1] = Operand::c32(pair.second);
968 Block& pred = ctx.program->blocks[pred_idx];
969 unsigned idx = pred.instructions.size();
970 do {
971 assert(idx != 0);
972 idx--;
973 } while (pair.first.type() == RegType::vgpr &&
974 pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
975 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
976 pred.instructions.insert(it, std::move(spill));
977 ctx.spills_exit[pred.index][pair.first] = pair.second;
978 }
979 }
980
981 /* iterate phis for which operands to reload */
982 for (aco_ptr<Instruction>& phi : instructions) {
983 assert(phi->opcode == aco_opcode::p_phi || phi->opcode == aco_opcode::p_linear_phi);
984 assert(!phi->definitions[0].isTemp() ||
985 !ctx.spills_entry[block_idx].count(phi->definitions[0].getTemp()));
986
987 std::vector<unsigned>& preds =
988 phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
989 for (unsigned i = 0; i < phi->operands.size(); i++) {
990 if (!phi->operands[i].isTemp())
991 continue;
992 unsigned pred_idx = preds[i];
993
994 /* if the operand was reloaded, rename */
995 if (!ctx.spills_exit[pred_idx].count(phi->operands[i].getTemp())) {
996 std::map<Temp, Temp>::iterator it =
997 ctx.renames[pred_idx].find(phi->operands[i].getTemp());
998 if (it != ctx.renames[pred_idx].end()) {
999 phi->operands[i].setTemp(it->second);
1000 /* prevent the definining instruction from being DCE'd if it could be rematerialized */
1001 } else {
1002 auto remat_it = ctx.remat.find(phi->operands[i].getTemp());
1003 if (remat_it != ctx.remat.end()) {
1004 ctx.unused_remats.erase(remat_it->second.instr);
1005 }
1006 }
1007 continue;
1008 }
1009
1010 Temp tmp = phi->operands[i].getTemp();
1011
1012 /* reload phi operand at end of predecessor block */
1013 Temp new_name = ctx.program->allocateTmp(tmp.regClass());
1014 Block& pred = ctx.program->blocks[pred_idx];
1015 unsigned idx = pred.instructions.size();
1016 do {
1017 assert(idx != 0);
1018 idx--;
1019 } while (phi->opcode == aco_opcode::p_phi &&
1020 pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
1021 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
1022 aco_ptr<Instruction> reload =
1023 do_reload(ctx, tmp, new_name, ctx.spills_exit[pred_idx][tmp]);
1024
1025 /* reload spilled exec mask directly to exec */
1026 if (!phi->definitions[0].isTemp()) {
1027 assert(phi->definitions[0].isFixed() && phi->definitions[0].physReg() == exec);
1028 reload->definitions[0] = phi->definitions[0];
1029 phi->operands[i] = Operand(exec, ctx.program->lane_mask);
1030 } else {
1031 ctx.spills_exit[pred_idx].erase(tmp);
1032 ctx.renames[pred_idx][tmp] = new_name;
1033 phi->operands[i].setTemp(new_name);
1034 }
1035
1036 pred.instructions.insert(it, std::move(reload));
1037 }
1038 }
1039
1040 /* iterate live variables for which to reload */
1041 // TODO: reload at current block if variable is spilled on all predecessors
1042 for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair :
1043 ctx.next_use_distances_start[block_idx]) {
1044 /* skip spilled variables */
1045 if (ctx.spills_entry[block_idx].count(pair.first))
1046 continue;
1047 std::vector<unsigned> preds =
1048 pair.first.is_linear() ? block->linear_preds : block->logical_preds;
1049
1050 /* variable is dead at predecessor, it must be from a phi */
1051 bool is_dead = false;
1052 for (unsigned pred_idx : preds) {
1053 if (!ctx.next_use_distances_end[pred_idx].count(pair.first))
1054 is_dead = true;
1055 }
1056 if (is_dead)
1057 continue;
1058 for (unsigned pred_idx : preds) {
1059 /* the variable is not spilled at the predecessor */
1060 if (!ctx.spills_exit[pred_idx].count(pair.first))
1061 continue;
1062
1063 /* variable is spilled at predecessor and has to be reloaded */
1064 Temp new_name = ctx.program->allocateTmp(pair.first.regClass());
1065 Block& pred = ctx.program->blocks[pred_idx];
1066 unsigned idx = pred.instructions.size();
1067 do {
1068 assert(idx != 0);
1069 idx--;
1070 } while (pair.first.type() == RegType::vgpr &&
1071 pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
1072 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
1073
1074 aco_ptr<Instruction> reload =
1075 do_reload(ctx, pair.first, new_name, ctx.spills_exit[pred.index][pair.first]);
1076 pred.instructions.insert(it, std::move(reload));
1077
1078 ctx.spills_exit[pred.index].erase(pair.first);
1079 ctx.renames[pred.index][pair.first] = new_name;
1080 }
1081
1082 /* check if we have to create a new phi for this variable */
1083 Temp rename = Temp();
1084 bool is_same = true;
1085 for (unsigned pred_idx : preds) {
1086 if (!ctx.renames[pred_idx].count(pair.first)) {
1087 if (rename == Temp())
1088 rename = pair.first;
1089 else
1090 is_same = rename == pair.first;
1091 } else {
1092 if (rename == Temp())
1093 rename = ctx.renames[pred_idx][pair.first];
1094 else
1095 is_same = rename == ctx.renames[pred_idx][pair.first];
1096 }
1097
1098 if (!is_same)
1099 break;
1100 }
1101
1102 if (!is_same) {
1103 /* the variable was renamed differently in the predecessors: we have to create a phi */
1104 aco_opcode opcode = pair.first.is_linear() ? aco_opcode::p_linear_phi : aco_opcode::p_phi;
1105 aco_ptr<Pseudo_instruction> phi{
1106 create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, preds.size(), 1)};
1107 rename = ctx.program->allocateTmp(pair.first.regClass());
1108 for (unsigned i = 0; i < phi->operands.size(); i++) {
1109 Temp tmp;
1110 if (ctx.renames[preds[i]].count(pair.first)) {
1111 tmp = ctx.renames[preds[i]][pair.first];
1112 } else if (preds[i] >= block_idx) {
1113 tmp = rename;
1114 } else {
1115 tmp = pair.first;
1116 /* prevent the definining instruction from being DCE'd if it could be rematerialized */
1117 if (ctx.remat.count(tmp))
1118 ctx.unused_remats.erase(ctx.remat[tmp].instr);
1119 }
1120 phi->operands[i] = Operand(tmp);
1121 }
1122 phi->definitions[0] = Definition(rename);
1123 instructions.emplace_back(std::move(phi));
1124 }
1125
1126 /* the variable was renamed: add new name to renames */
1127 if (!(rename == Temp() || rename == pair.first))
1128 ctx.renames[block_idx][pair.first] = rename;
1129 }
1130
1131 /* combine phis with instructions */
1132 unsigned idx = 0;
1133 while (!block->instructions[idx]) {
1134 idx++;
1135 }
1136
1137 if (!ctx.processed[block_idx]) {
1138 assert(!(block->kind & block_kind_loop_header));
1139 RegisterDemand demand_before = get_demand_before(ctx, block_idx, idx);
1140 ctx.register_demand[block->index].erase(ctx.register_demand[block->index].begin(),
1141 ctx.register_demand[block->index].begin() + idx);
1142 ctx.register_demand[block->index].insert(ctx.register_demand[block->index].begin(),
1143 instructions.size(), demand_before);
1144 }
1145
1146 std::vector<aco_ptr<Instruction>>::iterator start = std::next(block->instructions.begin(), idx);
1147 instructions.insert(
1148 instructions.end(), std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(start),
1149 std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(block->instructions.end()));
1150 block->instructions = std::move(instructions);
1151 }
1152
1153 void
process_block(spill_ctx & ctx,unsigned block_idx,Block * block,RegisterDemand spilled_registers)1154 process_block(spill_ctx& ctx, unsigned block_idx, Block* block, RegisterDemand spilled_registers)
1155 {
1156 assert(!ctx.processed[block_idx]);
1157
1158 std::vector<aco_ptr<Instruction>> instructions;
1159 unsigned idx = 0;
1160
1161 /* phis are handled separetely */
1162 while (block->instructions[idx]->opcode == aco_opcode::p_phi ||
1163 block->instructions[idx]->opcode == aco_opcode::p_linear_phi) {
1164 instructions.emplace_back(std::move(block->instructions[idx++]));
1165 }
1166
1167 if (block->register_demand.exceeds(ctx.target_pressure)) {
1168 update_local_next_uses(ctx, block, ctx.local_next_use_distance);
1169 } else {
1170 /* We won't use local_next_use_distance, so no initialization needed */
1171 }
1172
1173 auto& current_spills = ctx.spills_exit[block_idx];
1174
1175 while (idx < block->instructions.size()) {
1176 aco_ptr<Instruction>& instr = block->instructions[idx];
1177
1178 std::map<Temp, std::pair<Temp, uint32_t>> reloads;
1179
1180 /* rename and reload operands */
1181 for (Operand& op : instr->operands) {
1182 if (!op.isTemp())
1183 continue;
1184 if (!current_spills.count(op.getTemp())) {
1185 /* the Operand is in register: check if it was renamed */
1186 auto rename_it = ctx.renames[block_idx].find(op.getTemp());
1187 if (rename_it != ctx.renames[block_idx].end()) {
1188 op.setTemp(rename_it->second);
1189 } else {
1190 /* prevent its definining instruction from being DCE'd if it could be rematerialized */
1191 auto remat_it = ctx.remat.find(op.getTemp());
1192 if (remat_it != ctx.remat.end()) {
1193 ctx.unused_remats.erase(remat_it->second.instr);
1194 }
1195 }
1196 continue;
1197 }
1198 /* the Operand is spilled: add it to reloads */
1199 Temp new_tmp = ctx.program->allocateTmp(op.regClass());
1200 ctx.renames[block_idx][op.getTemp()] = new_tmp;
1201 reloads[new_tmp] = std::make_pair(op.getTemp(), current_spills[op.getTemp()]);
1202 current_spills.erase(op.getTemp());
1203 op.setTemp(new_tmp);
1204 spilled_registers -= new_tmp;
1205 }
1206
1207 /* check if register demand is low enough before and after the current instruction */
1208 if (block->register_demand.exceeds(ctx.target_pressure)) {
1209
1210 RegisterDemand new_demand = ctx.register_demand[block_idx][idx];
1211 new_demand.update(get_demand_before(ctx, block_idx, idx));
1212
1213 assert(!ctx.local_next_use_distance.empty());
1214
1215 /* if reg pressure is too high, spill variable with furthest next use */
1216 while ((new_demand - spilled_registers).exceeds(ctx.target_pressure)) {
1217 unsigned distance = 0;
1218 Temp to_spill;
1219 bool do_rematerialize = false;
1220 RegType type = RegType::sgpr;
1221 if (new_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr)
1222 type = RegType::vgpr;
1223
1224 for (std::pair<Temp, uint32_t> pair : ctx.local_next_use_distance[idx]) {
1225 if (pair.first.type() != type)
1226 continue;
1227 bool can_rematerialize = ctx.remat.count(pair.first);
1228 if (((pair.second > distance && can_rematerialize == do_rematerialize) ||
1229 (can_rematerialize && !do_rematerialize && pair.second > idx)) &&
1230 !current_spills.count(pair.first)) {
1231 to_spill = pair.first;
1232 distance = pair.second;
1233 do_rematerialize = can_rematerialize;
1234 }
1235 }
1236
1237 assert(distance != 0 && distance > idx);
1238 uint32_t spill_id = ctx.allocate_spill_id(to_spill.regClass());
1239
1240 /* add interferences with currently spilled variables */
1241 for (std::pair<Temp, uint32_t> pair : current_spills)
1242 ctx.add_interference(spill_id, pair.second);
1243 for (std::pair<const Temp, std::pair<Temp, uint32_t>>& pair : reloads)
1244 ctx.add_interference(spill_id, pair.second.second);
1245
1246 current_spills[to_spill] = spill_id;
1247 spilled_registers += to_spill;
1248
1249 /* rename if necessary */
1250 if (ctx.renames[block_idx].count(to_spill)) {
1251 to_spill = ctx.renames[block_idx][to_spill];
1252 }
1253
1254 /* add spill to new instructions */
1255 aco_ptr<Pseudo_instruction> spill{
1256 create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
1257 spill->operands[0] = Operand(to_spill);
1258 spill->operands[1] = Operand::c32(spill_id);
1259 instructions.emplace_back(std::move(spill));
1260 }
1261 }
1262
1263 /* add reloads and instruction to new instructions */
1264 for (std::pair<const Temp, std::pair<Temp, uint32_t>>& pair : reloads) {
1265 aco_ptr<Instruction> reload =
1266 do_reload(ctx, pair.second.first, pair.first, pair.second.second);
1267 instructions.emplace_back(std::move(reload));
1268 }
1269 instructions.emplace_back(std::move(instr));
1270 idx++;
1271 }
1272
1273 block->instructions = std::move(instructions);
1274 }
1275
1276 void
spill_block(spill_ctx & ctx,unsigned block_idx)1277 spill_block(spill_ctx& ctx, unsigned block_idx)
1278 {
1279 Block* block = &ctx.program->blocks[block_idx];
1280
1281 /* determine set of variables which are spilled at the beginning of the block */
1282 RegisterDemand spilled_registers = init_live_in_vars(ctx, block, block_idx);
1283
1284 /* add interferences for spilled variables */
1285 for (auto it = ctx.spills_entry[block_idx].begin(); it != ctx.spills_entry[block_idx].end();
1286 ++it) {
1287 for (auto it2 = std::next(it); it2 != ctx.spills_entry[block_idx].end(); ++it2)
1288 ctx.add_interference(it->second, it2->second);
1289 }
1290
1291 bool is_loop_header = block->loop_nest_depth && ctx.loop_header.top()->index == block_idx;
1292 if (!is_loop_header) {
1293 /* add spill/reload code on incoming control flow edges */
1294 add_coupling_code(ctx, block, block_idx);
1295 }
1296
1297 const auto& current_spills = ctx.spills_entry[block_idx];
1298
1299 /* check conditions to process this block */
1300 bool process = (block->register_demand - spilled_registers).exceeds(ctx.target_pressure) ||
1301 !ctx.renames[block_idx].empty() || ctx.unused_remats.size();
1302
1303 for (auto it = current_spills.begin(); !process && it != current_spills.end(); ++it) {
1304 if (ctx.next_use_distances_start[block_idx].at(it->first).first == block_idx)
1305 process = true;
1306 }
1307
1308 assert(ctx.spills_exit[block_idx].empty());
1309 ctx.spills_exit[block_idx] = current_spills;
1310 if (process) {
1311 process_block(ctx, block_idx, block, spilled_registers);
1312 }
1313
1314 ctx.processed[block_idx] = true;
1315
1316 /* check if the next block leaves the current loop */
1317 if (block->loop_nest_depth == 0 ||
1318 ctx.program->blocks[block_idx + 1].loop_nest_depth >= block->loop_nest_depth)
1319 return;
1320
1321 Block* loop_header = ctx.loop_header.top();
1322
1323 /* preserve original renames at end of loop header block */
1324 std::map<Temp, Temp> renames = std::move(ctx.renames[loop_header->index]);
1325
1326 /* add coupling code to all loop header predecessors */
1327 add_coupling_code(ctx, loop_header, loop_header->index);
1328
1329 /* propagate new renames through loop: i.e. repair the SSA */
1330 renames.swap(ctx.renames[loop_header->index]);
1331 for (std::pair<Temp, Temp> rename : renames) {
1332 for (unsigned idx = loop_header->index; idx <= block_idx; idx++) {
1333 Block& current = ctx.program->blocks[idx];
1334 std::vector<aco_ptr<Instruction>>::iterator instr_it = current.instructions.begin();
1335
1336 /* first rename phis */
1337 while (instr_it != current.instructions.end()) {
1338 aco_ptr<Instruction>& phi = *instr_it;
1339 if (phi->opcode != aco_opcode::p_phi && phi->opcode != aco_opcode::p_linear_phi)
1340 break;
1341 /* no need to rename the loop header phis once again. this happened in
1342 * add_coupling_code() */
1343 if (idx == loop_header->index) {
1344 instr_it++;
1345 continue;
1346 }
1347
1348 for (Operand& op : phi->operands) {
1349 if (!op.isTemp())
1350 continue;
1351 if (op.getTemp() == rename.first)
1352 op.setTemp(rename.second);
1353 }
1354 instr_it++;
1355 }
1356
1357 /* variable is not live at beginning of this block */
1358 if (ctx.next_use_distances_start[idx].count(rename.first) == 0)
1359 continue;
1360
1361 /* if the variable is live at the block's exit, add rename */
1362 if (ctx.next_use_distances_end[idx].count(rename.first) != 0)
1363 ctx.renames[idx].insert(rename);
1364
1365 /* rename all uses in this block */
1366 bool renamed = false;
1367 while (!renamed && instr_it != current.instructions.end()) {
1368 aco_ptr<Instruction>& instr = *instr_it;
1369 for (Operand& op : instr->operands) {
1370 if (!op.isTemp())
1371 continue;
1372 if (op.getTemp() == rename.first) {
1373 op.setTemp(rename.second);
1374 /* we can stop with this block as soon as the variable is spilled */
1375 if (instr->opcode == aco_opcode::p_spill)
1376 renamed = true;
1377 }
1378 }
1379 instr_it++;
1380 }
1381 }
1382 }
1383
1384 /* remove loop header info from stack */
1385 ctx.loop_header.pop();
1386 }
1387
1388 Temp
load_scratch_resource(spill_ctx & ctx,Temp & scratch_offset,std::vector<aco_ptr<Instruction>> & instructions,unsigned offset,bool is_top_level)1389 load_scratch_resource(spill_ctx& ctx, Temp& scratch_offset,
1390 std::vector<aco_ptr<Instruction>>& instructions, unsigned offset,
1391 bool is_top_level)
1392 {
1393 Builder bld(ctx.program);
1394 if (is_top_level) {
1395 bld.reset(&instructions);
1396 } else {
1397 /* find p_logical_end */
1398 unsigned idx = instructions.size() - 1;
1399 while (instructions[idx]->opcode != aco_opcode::p_logical_end)
1400 idx--;
1401 bld.reset(&instructions, std::next(instructions.begin(), idx));
1402 }
1403
1404 Temp private_segment_buffer = ctx.program->private_segment_buffer;
1405 if (ctx.program->stage != compute_cs)
1406 private_segment_buffer =
1407 bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), private_segment_buffer, Operand::zero());
1408
1409 if (offset)
1410 scratch_offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc),
1411 scratch_offset, Operand::c32(offset));
1412
1413 uint32_t rsrc_conf =
1414 S_008F0C_ADD_TID_ENABLE(1) | S_008F0C_INDEX_STRIDE(ctx.program->wave_size == 64 ? 3 : 2);
1415
1416 if (ctx.program->chip_class >= GFX10) {
1417 rsrc_conf |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
1418 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) | S_008F0C_RESOURCE_LEVEL(1);
1419 } else if (ctx.program->chip_class <= GFX7) {
1420 /* dfmt modifies stride on GFX8/GFX9 when ADD_TID_EN=1 */
1421 rsrc_conf |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1422 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1423 }
1424 /* older generations need element size = 4 bytes. element size removed in GFX9 */
1425 if (ctx.program->chip_class <= GFX8)
1426 rsrc_conf |= S_008F0C_ELEMENT_SIZE(1);
1427
1428 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), private_segment_buffer,
1429 Operand::c32(-1u), Operand::c32(rsrc_conf));
1430 }
1431
1432 void
add_interferences(spill_ctx & ctx,std::vector<bool> & is_assigned,std::vector<uint32_t> & slots,std::vector<bool> & slots_used,unsigned id)1433 add_interferences(spill_ctx& ctx, std::vector<bool>& is_assigned, std::vector<uint32_t>& slots,
1434 std::vector<bool>& slots_used, unsigned id)
1435 {
1436 for (unsigned other : ctx.interferences[id].second) {
1437 if (!is_assigned[other])
1438 continue;
1439
1440 RegClass other_rc = ctx.interferences[other].first;
1441 unsigned slot = slots[other];
1442 std::fill(slots_used.begin() + slot, slots_used.begin() + slot + other_rc.size(), true);
1443 }
1444 }
1445
1446 unsigned
find_available_slot(std::vector<bool> & used,unsigned wave_size,unsigned size,bool is_sgpr,unsigned * num_slots)1447 find_available_slot(std::vector<bool>& used, unsigned wave_size, unsigned size, bool is_sgpr,
1448 unsigned* num_slots)
1449 {
1450 unsigned wave_size_minus_one = wave_size - 1;
1451 unsigned slot = 0;
1452
1453 while (true) {
1454 bool available = true;
1455 for (unsigned i = 0; i < size; i++) {
1456 if (slot + i < used.size() && used[slot + i]) {
1457 available = false;
1458 break;
1459 }
1460 }
1461 if (!available) {
1462 slot++;
1463 continue;
1464 }
1465
1466 if (is_sgpr && ((slot & wave_size_minus_one) > wave_size - size)) {
1467 slot = align(slot, wave_size);
1468 continue;
1469 }
1470
1471 std::fill(used.begin(), used.end(), false);
1472
1473 if (slot + size > used.size())
1474 used.resize(slot + size);
1475
1476 return slot;
1477 }
1478 }
1479
1480 void
assign_spill_slots_helper(spill_ctx & ctx,RegType type,std::vector<bool> & is_assigned,std::vector<uint32_t> & slots,unsigned * num_slots)1481 assign_spill_slots_helper(spill_ctx& ctx, RegType type, std::vector<bool>& is_assigned,
1482 std::vector<uint32_t>& slots, unsigned* num_slots)
1483 {
1484 std::vector<bool> slots_used(*num_slots);
1485
1486 /* assign slots for ids with affinities first */
1487 for (std::vector<uint32_t>& vec : ctx.affinities) {
1488 if (ctx.interferences[vec[0]].first.type() != type)
1489 continue;
1490
1491 for (unsigned id : vec) {
1492 if (!ctx.is_reloaded[id])
1493 continue;
1494
1495 add_interferences(ctx, is_assigned, slots, slots_used, id);
1496 }
1497
1498 unsigned slot =
1499 find_available_slot(slots_used, ctx.wave_size, ctx.interferences[vec[0]].first.size(),
1500 type == RegType::sgpr, num_slots);
1501
1502 for (unsigned id : vec) {
1503 assert(!is_assigned[id]);
1504
1505 if (ctx.is_reloaded[id]) {
1506 slots[id] = slot;
1507 is_assigned[id] = true;
1508 }
1509 }
1510 }
1511
1512 /* assign slots for ids without affinities */
1513 for (unsigned id = 0; id < ctx.interferences.size(); id++) {
1514 if (is_assigned[id] || !ctx.is_reloaded[id] || ctx.interferences[id].first.type() != type)
1515 continue;
1516
1517 add_interferences(ctx, is_assigned, slots, slots_used, id);
1518
1519 unsigned slot =
1520 find_available_slot(slots_used, ctx.wave_size, ctx.interferences[id].first.size(),
1521 type == RegType::sgpr, num_slots);
1522
1523 slots[id] = slot;
1524 is_assigned[id] = true;
1525 }
1526
1527 *num_slots = slots_used.size();
1528 }
1529
1530 void
assign_spill_slots(spill_ctx & ctx,unsigned spills_to_vgpr)1531 assign_spill_slots(spill_ctx& ctx, unsigned spills_to_vgpr)
1532 {
1533 std::vector<uint32_t> slots(ctx.interferences.size());
1534 std::vector<bool> is_assigned(ctx.interferences.size());
1535
1536 /* first, handle affinities: just merge all interferences into both spill ids */
1537 for (std::vector<uint32_t>& vec : ctx.affinities) {
1538 for (unsigned i = 0; i < vec.size(); i++) {
1539 for (unsigned j = i + 1; j < vec.size(); j++) {
1540 assert(vec[i] != vec[j]);
1541 bool reloaded = ctx.is_reloaded[vec[i]] || ctx.is_reloaded[vec[j]];
1542 ctx.is_reloaded[vec[i]] = reloaded;
1543 ctx.is_reloaded[vec[j]] = reloaded;
1544 }
1545 }
1546 }
1547 for (ASSERTED uint32_t i = 0; i < ctx.interferences.size(); i++)
1548 for (ASSERTED uint32_t id : ctx.interferences[i].second)
1549 assert(i != id);
1550
1551 /* for each spill slot, assign as many spill ids as possible */
1552 unsigned sgpr_spill_slots = 0, vgpr_spill_slots = 0;
1553 assign_spill_slots_helper(ctx, RegType::sgpr, is_assigned, slots, &sgpr_spill_slots);
1554 assign_spill_slots_helper(ctx, RegType::vgpr, is_assigned, slots, &vgpr_spill_slots);
1555
1556 for (unsigned id = 0; id < is_assigned.size(); id++)
1557 assert(is_assigned[id] || !ctx.is_reloaded[id]);
1558
1559 for (std::vector<uint32_t>& vec : ctx.affinities) {
1560 for (unsigned i = 0; i < vec.size(); i++) {
1561 for (unsigned j = i + 1; j < vec.size(); j++) {
1562 assert(is_assigned[vec[i]] == is_assigned[vec[j]]);
1563 if (!is_assigned[vec[i]])
1564 continue;
1565 assert(ctx.is_reloaded[vec[i]] == ctx.is_reloaded[vec[j]]);
1566 assert(ctx.interferences[vec[i]].first.type() ==
1567 ctx.interferences[vec[j]].first.type());
1568 assert(slots[vec[i]] == slots[vec[j]]);
1569 }
1570 }
1571 }
1572
1573 /* hope, we didn't mess up */
1574 std::vector<Temp> vgpr_spill_temps((sgpr_spill_slots + ctx.wave_size - 1) / ctx.wave_size);
1575 assert(vgpr_spill_temps.size() <= spills_to_vgpr);
1576
1577 /* replace pseudo instructions with actual hardware instructions */
1578 Temp scratch_offset = ctx.program->scratch_offset, scratch_rsrc = Temp();
1579 unsigned last_top_level_block_idx = 0;
1580 std::vector<bool> reload_in_loop(vgpr_spill_temps.size());
1581 for (Block& block : ctx.program->blocks) {
1582
1583 /* after loops, we insert a user if there was a reload inside the loop */
1584 if (block.loop_nest_depth == 0) {
1585 int end_vgprs = 0;
1586 for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1587 if (reload_in_loop[i])
1588 end_vgprs++;
1589 }
1590
1591 if (end_vgprs > 0) {
1592 aco_ptr<Instruction> destr{create_instruction<Pseudo_instruction>(
1593 aco_opcode::p_end_linear_vgpr, Format::PSEUDO, end_vgprs, 0)};
1594 int k = 0;
1595 for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1596 if (reload_in_loop[i])
1597 destr->operands[k++] = Operand(vgpr_spill_temps[i]);
1598 reload_in_loop[i] = false;
1599 }
1600 /* find insertion point */
1601 std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.begin();
1602 while ((*it)->opcode == aco_opcode::p_linear_phi || (*it)->opcode == aco_opcode::p_phi)
1603 ++it;
1604 block.instructions.insert(it, std::move(destr));
1605 }
1606 }
1607
1608 if (block.kind & block_kind_top_level && !block.linear_preds.empty()) {
1609 last_top_level_block_idx = block.index;
1610
1611 /* check if any spilled variables use a created linear vgpr, otherwise destroy them */
1612 for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1613 if (vgpr_spill_temps[i] == Temp())
1614 continue;
1615
1616 bool can_destroy = true;
1617 for (std::pair<Temp, uint32_t> pair : ctx.spills_entry[block.index]) {
1618
1619 if (ctx.interferences[pair.second].first.type() == RegType::sgpr &&
1620 slots[pair.second] / ctx.wave_size == i) {
1621 can_destroy = false;
1622 break;
1623 }
1624 }
1625 if (can_destroy)
1626 vgpr_spill_temps[i] = Temp();
1627 }
1628 }
1629
1630 std::vector<aco_ptr<Instruction>>::iterator it;
1631 std::vector<aco_ptr<Instruction>> instructions;
1632 instructions.reserve(block.instructions.size());
1633 Builder bld(ctx.program, &instructions);
1634 for (it = block.instructions.begin(); it != block.instructions.end(); ++it) {
1635
1636 if ((*it)->opcode == aco_opcode::p_spill) {
1637 uint32_t spill_id = (*it)->operands[1].constantValue();
1638
1639 if (!ctx.is_reloaded[spill_id]) {
1640 /* never reloaded, so don't spill */
1641 } else if (!is_assigned[spill_id]) {
1642 unreachable("No spill slot assigned for spill id");
1643 } else if (ctx.interferences[spill_id].first.type() == RegType::vgpr) {
1644 /* spill vgpr */
1645 ctx.program->config->spilled_vgprs += (*it)->operands[0].size();
1646 uint32_t spill_slot = slots[spill_id];
1647 bool add_offset_to_sgpr =
1648 ctx.program->config->scratch_bytes_per_wave / ctx.program->wave_size +
1649 vgpr_spill_slots * 4 >
1650 4096;
1651 unsigned base_offset =
1652 add_offset_to_sgpr
1653 ? 0
1654 : ctx.program->config->scratch_bytes_per_wave / ctx.program->wave_size;
1655
1656 /* check if the scratch resource descriptor already exists */
1657 if (scratch_rsrc == Temp()) {
1658 unsigned offset =
1659 add_offset_to_sgpr ? ctx.program->config->scratch_bytes_per_wave : 0;
1660 scratch_rsrc = load_scratch_resource(
1661 ctx, scratch_offset,
1662 last_top_level_block_idx == block.index
1663 ? instructions
1664 : ctx.program->blocks[last_top_level_block_idx].instructions,
1665 offset, last_top_level_block_idx == block.index);
1666 }
1667
1668 unsigned offset = base_offset + spill_slot * 4;
1669 aco_opcode opcode = aco_opcode::buffer_store_dword;
1670 assert((*it)->operands[0].isTemp());
1671 Temp temp = (*it)->operands[0].getTemp();
1672 assert(temp.type() == RegType::vgpr && !temp.is_linear());
1673 if (temp.size() > 1) {
1674 Instruction* split{create_instruction<Pseudo_instruction>(
1675 aco_opcode::p_split_vector, Format::PSEUDO, 1, temp.size())};
1676 split->operands[0] = Operand(temp);
1677 for (unsigned i = 0; i < temp.size(); i++)
1678 split->definitions[i] = bld.def(v1);
1679 bld.insert(split);
1680 for (unsigned i = 0; i < temp.size(); i++) {
1681 Instruction* instr =
1682 bld.mubuf(opcode, scratch_rsrc, Operand(v1), scratch_offset,
1683 split->definitions[i].getTemp(), offset + i * 4, false, true);
1684 instr->mubuf().sync = memory_sync_info(storage_vgpr_spill, semantic_private);
1685 }
1686 } else {
1687 Instruction* instr = bld.mubuf(opcode, scratch_rsrc, Operand(v1), scratch_offset,
1688 temp, offset, false, true);
1689 instr->mubuf().sync = memory_sync_info(storage_vgpr_spill, semantic_private);
1690 }
1691 } else {
1692 ctx.program->config->spilled_sgprs += (*it)->operands[0].size();
1693
1694 uint32_t spill_slot = slots[spill_id];
1695
1696 /* check if the linear vgpr already exists */
1697 if (vgpr_spill_temps[spill_slot / ctx.wave_size] == Temp()) {
1698 Temp linear_vgpr = ctx.program->allocateTmp(v1.as_linear());
1699 vgpr_spill_temps[spill_slot / ctx.wave_size] = linear_vgpr;
1700 aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(
1701 aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
1702 create->definitions[0] = Definition(linear_vgpr);
1703 /* find the right place to insert this definition */
1704 if (last_top_level_block_idx == block.index) {
1705 /* insert right before the current instruction */
1706 instructions.emplace_back(std::move(create));
1707 } else {
1708 assert(last_top_level_block_idx < block.index);
1709 /* insert before the branch at last top level block */
1710 std::vector<aco_ptr<Instruction>>& block_instrs =
1711 ctx.program->blocks[last_top_level_block_idx].instructions;
1712 block_instrs.insert(std::prev(block_instrs.end()), std::move(create));
1713 }
1714 }
1715
1716 /* spill sgpr: just add the vgpr temp to operands */
1717 Pseudo_instruction* spill =
1718 create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 3, 0);
1719 spill->operands[0] = Operand(vgpr_spill_temps[spill_slot / ctx.wave_size]);
1720 spill->operands[1] = Operand::c32(spill_slot % ctx.wave_size);
1721 spill->operands[2] = (*it)->operands[0];
1722 instructions.emplace_back(aco_ptr<Instruction>(spill));
1723 }
1724
1725 } else if ((*it)->opcode == aco_opcode::p_reload) {
1726 uint32_t spill_id = (*it)->operands[0].constantValue();
1727 assert(ctx.is_reloaded[spill_id]);
1728
1729 if (!is_assigned[spill_id]) {
1730 unreachable("No spill slot assigned for spill id");
1731 } else if (ctx.interferences[spill_id].first.type() == RegType::vgpr) {
1732 /* reload vgpr */
1733 uint32_t spill_slot = slots[spill_id];
1734 bool add_offset_to_sgpr =
1735 ctx.program->config->scratch_bytes_per_wave / ctx.program->wave_size +
1736 vgpr_spill_slots * 4 >
1737 4096;
1738 unsigned base_offset =
1739 add_offset_to_sgpr
1740 ? 0
1741 : ctx.program->config->scratch_bytes_per_wave / ctx.program->wave_size;
1742
1743 /* check if the scratch resource descriptor already exists */
1744 if (scratch_rsrc == Temp()) {
1745 unsigned offset =
1746 add_offset_to_sgpr ? ctx.program->config->scratch_bytes_per_wave : 0;
1747 scratch_rsrc = load_scratch_resource(
1748 ctx, scratch_offset,
1749 last_top_level_block_idx == block.index
1750 ? instructions
1751 : ctx.program->blocks[last_top_level_block_idx].instructions,
1752 offset, last_top_level_block_idx == block.index);
1753 }
1754
1755 unsigned offset = base_offset + spill_slot * 4;
1756 aco_opcode opcode = aco_opcode::buffer_load_dword;
1757 Definition def = (*it)->definitions[0];
1758 if (def.size() > 1) {
1759 Instruction* vec{create_instruction<Pseudo_instruction>(
1760 aco_opcode::p_create_vector, Format::PSEUDO, def.size(), 1)};
1761 vec->definitions[0] = def;
1762 for (unsigned i = 0; i < def.size(); i++) {
1763 Temp tmp = bld.tmp(v1);
1764 vec->operands[i] = Operand(tmp);
1765 Instruction* instr =
1766 bld.mubuf(opcode, Definition(tmp), scratch_rsrc, Operand(v1),
1767 scratch_offset, offset + i * 4, false, true);
1768 instr->mubuf().sync = memory_sync_info(storage_vgpr_spill, semantic_private);
1769 }
1770 bld.insert(vec);
1771 } else {
1772 Instruction* instr = bld.mubuf(opcode, def, scratch_rsrc, Operand(v1),
1773 scratch_offset, offset, false, true);
1774 instr->mubuf().sync = memory_sync_info(storage_vgpr_spill, semantic_private);
1775 }
1776 } else {
1777 uint32_t spill_slot = slots[spill_id];
1778 reload_in_loop[spill_slot / ctx.wave_size] = block.loop_nest_depth > 0;
1779
1780 /* check if the linear vgpr already exists */
1781 if (vgpr_spill_temps[spill_slot / ctx.wave_size] == Temp()) {
1782 Temp linear_vgpr = ctx.program->allocateTmp(v1.as_linear());
1783 vgpr_spill_temps[spill_slot / ctx.wave_size] = linear_vgpr;
1784 aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(
1785 aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
1786 create->definitions[0] = Definition(linear_vgpr);
1787 /* find the right place to insert this definition */
1788 if (last_top_level_block_idx == block.index) {
1789 /* insert right before the current instruction */
1790 instructions.emplace_back(std::move(create));
1791 } else {
1792 assert(last_top_level_block_idx < block.index);
1793 /* insert before the branch at last top level block */
1794 std::vector<aco_ptr<Instruction>>& block_instrs =
1795 ctx.program->blocks[last_top_level_block_idx].instructions;
1796 block_instrs.insert(std::prev(block_instrs.end()), std::move(create));
1797 }
1798 }
1799
1800 /* reload sgpr: just add the vgpr temp to operands */
1801 Pseudo_instruction* reload = create_instruction<Pseudo_instruction>(
1802 aco_opcode::p_reload, Format::PSEUDO, 2, 1);
1803 reload->operands[0] = Operand(vgpr_spill_temps[spill_slot / ctx.wave_size]);
1804 reload->operands[1] = Operand::c32(spill_slot % ctx.wave_size);
1805 reload->definitions[0] = (*it)->definitions[0];
1806 instructions.emplace_back(aco_ptr<Instruction>(reload));
1807 }
1808 } else if (!ctx.unused_remats.count(it->get())) {
1809 instructions.emplace_back(std::move(*it));
1810 }
1811 }
1812 block.instructions = std::move(instructions);
1813 }
1814
1815 /* update required scratch memory */
1816 ctx.program->config->scratch_bytes_per_wave +=
1817 align(vgpr_spill_slots * 4 * ctx.program->wave_size, 1024);
1818
1819 /* SSA elimination inserts copies for logical phis right before p_logical_end
1820 * So if a linear vgpr is used between that p_logical_end and the branch,
1821 * we need to ensure logical phis don't choose a definition which aliases
1822 * the linear vgpr.
1823 * TODO: Moving the spills and reloads to before p_logical_end might produce
1824 * slightly better code. */
1825 for (Block& block : ctx.program->blocks) {
1826 /* loops exits are already handled */
1827 if (block.logical_preds.size() <= 1)
1828 continue;
1829
1830 bool has_logical_phis = false;
1831 for (aco_ptr<Instruction>& instr : block.instructions) {
1832 if (instr->opcode == aco_opcode::p_phi) {
1833 has_logical_phis = true;
1834 break;
1835 } else if (instr->opcode != aco_opcode::p_linear_phi) {
1836 break;
1837 }
1838 }
1839 if (!has_logical_phis)
1840 continue;
1841
1842 std::set<Temp> vgprs;
1843 for (unsigned pred_idx : block.logical_preds) {
1844 Block& pred = ctx.program->blocks[pred_idx];
1845 for (int i = pred.instructions.size() - 1; i >= 0; i--) {
1846 aco_ptr<Instruction>& pred_instr = pred.instructions[i];
1847 if (pred_instr->opcode == aco_opcode::p_logical_end) {
1848 break;
1849 } else if (pred_instr->opcode == aco_opcode::p_spill ||
1850 pred_instr->opcode == aco_opcode::p_reload) {
1851 vgprs.insert(pred_instr->operands[0].getTemp());
1852 }
1853 }
1854 }
1855 if (!vgprs.size())
1856 continue;
1857
1858 aco_ptr<Instruction> destr{create_instruction<Pseudo_instruction>(
1859 aco_opcode::p_end_linear_vgpr, Format::PSEUDO, vgprs.size(), 0)};
1860 int k = 0;
1861 for (Temp tmp : vgprs) {
1862 destr->operands[k++] = Operand(tmp);
1863 }
1864 /* find insertion point */
1865 std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.begin();
1866 while ((*it)->opcode == aco_opcode::p_linear_phi || (*it)->opcode == aco_opcode::p_phi)
1867 ++it;
1868 block.instructions.insert(it, std::move(destr));
1869 }
1870 }
1871
1872 } /* end namespace */
1873
1874 void
spill(Program * program,live & live_vars)1875 spill(Program* program, live& live_vars)
1876 {
1877 program->config->spilled_vgprs = 0;
1878 program->config->spilled_sgprs = 0;
1879
1880 program->progress = CompilationProgress::after_spilling;
1881
1882 /* no spilling when register pressure is low enough */
1883 if (program->num_waves > 0)
1884 return;
1885
1886 /* lower to CSSA before spilling to ensure correctness w.r.t. phis */
1887 lower_to_cssa(program, live_vars);
1888
1889 /* calculate target register demand */
1890 const RegisterDemand demand = program->max_reg_demand; /* current max */
1891 const uint16_t sgpr_limit = get_addr_sgpr_from_waves(program, program->min_waves);
1892 const uint16_t vgpr_limit = get_addr_vgpr_from_waves(program, program->min_waves);
1893 uint16_t extra_vgprs = 0;
1894 uint16_t extra_sgprs = 0;
1895
1896 /* calculate extra VGPRs required for spilling SGPRs */
1897 if (demand.sgpr > sgpr_limit) {
1898 unsigned sgpr_spills = demand.sgpr - sgpr_limit;
1899 extra_vgprs = DIV_ROUND_UP(sgpr_spills, program->wave_size) + 1;
1900 }
1901 /* add extra SGPRs required for spilling VGPRs */
1902 if (demand.vgpr + extra_vgprs > vgpr_limit) {
1903 extra_sgprs = 5; /* scratch_resource (s4) + scratch_offset (s1) */
1904 if (demand.sgpr + extra_sgprs > sgpr_limit) {
1905 /* re-calculate in case something has changed */
1906 unsigned sgpr_spills = demand.sgpr + extra_sgprs - sgpr_limit;
1907 extra_vgprs = DIV_ROUND_UP(sgpr_spills, program->wave_size) + 1;
1908 }
1909 }
1910 /* the spiller has to target the following register demand */
1911 const RegisterDemand target(vgpr_limit - extra_vgprs, sgpr_limit - extra_sgprs);
1912
1913 /* initialize ctx */
1914 spill_ctx ctx(target, program, live_vars.register_demand);
1915 compute_global_next_uses(ctx);
1916 get_rematerialize_info(ctx);
1917
1918 /* create spills and reloads */
1919 for (unsigned i = 0; i < program->blocks.size(); i++)
1920 spill_block(ctx, i);
1921
1922 /* assign spill slots and DCE rematerialized code */
1923 assign_spill_slots(ctx, extra_vgprs);
1924
1925 /* update live variable information */
1926 live_vars = live_var_analysis(program);
1927
1928 assert(program->num_waves > 0);
1929 }
1930
1931 } // namespace aco
1932