1 /*
2  * Copyright © 2021 Valve Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "aco_builder.h"
26 #include "aco_ir.h"
27 
28 #include <algorithm>
29 #include <array>
30 #include <bitset>
31 #include <vector>
32 
33 namespace aco {
34 namespace {
35 
36 constexpr const size_t max_reg_cnt = 512;
37 
38 struct Idx {
operator ==aco::__anonc0690e750111::Idx39    bool operator==(const Idx& other) const { return block == other.block && instr == other.instr; }
operator !=aco::__anonc0690e750111::Idx40    bool operator!=(const Idx& other) const { return !operator==(other); }
41 
foundaco::__anonc0690e750111::Idx42    bool found() const { return block != UINT32_MAX; }
43 
44    uint32_t block;
45    uint32_t instr;
46 };
47 
48 Idx not_written_in_block{UINT32_MAX, 0};
49 Idx clobbered{UINT32_MAX, 1};
50 Idx const_or_undef{UINT32_MAX, 2};
51 Idx written_by_multiple_instrs{UINT32_MAX, 3};
52 
53 struct pr_opt_ctx {
54    Program* program;
55    Block* current_block;
56    uint32_t current_instr_idx;
57    std::vector<uint16_t> uses;
58    std::vector<std::array<Idx, max_reg_cnt>> instr_idx_by_regs;
59 
reset_blockaco::__anonc0690e750111::pr_opt_ctx60    void reset_block(Block* block)
61    {
62       current_block = block;
63       current_instr_idx = 0;
64 
65       if ((block->kind & block_kind_loop_header) || block->linear_preds.empty()) {
66          std::fill(instr_idx_by_regs[block->index].begin(), instr_idx_by_regs[block->index].end(),
67                    not_written_in_block);
68       } else {
69          unsigned first_pred = block->linear_preds[0];
70          for (unsigned i = 0; i < max_reg_cnt; i++) {
71             bool all_same = std::all_of(
72                std::next(block->linear_preds.begin()), block->linear_preds.end(),
73                [&](unsigned pred)
74                { return instr_idx_by_regs[pred][i] == instr_idx_by_regs[first_pred][i]; });
75 
76             if (all_same)
77                instr_idx_by_regs[block->index][i] = instr_idx_by_regs[first_pred][i];
78             else
79                instr_idx_by_regs[block->index][i] = not_written_in_block;
80          }
81       }
82    }
83 
getaco::__anonc0690e750111::pr_opt_ctx84    Instruction* get(Idx idx) { return program->blocks[idx.block].instructions[idx.instr].get(); }
85 };
86 
87 void
save_reg_writes(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)88 save_reg_writes(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
89 {
90    for (const Definition& def : instr->definitions) {
91       assert(def.regClass().type() != RegType::sgpr || def.physReg().reg() <= 255);
92       assert(def.regClass().type() != RegType::vgpr || def.physReg().reg() >= 256);
93 
94       unsigned dw_size = DIV_ROUND_UP(def.bytes(), 4u);
95       unsigned r = def.physReg().reg();
96       Idx idx{ctx.current_block->index, ctx.current_instr_idx};
97 
98       if (def.regClass().is_subdword())
99          idx = clobbered;
100 
101       assert((r + dw_size) <= max_reg_cnt);
102       assert(def.size() == dw_size || def.regClass().is_subdword());
103       std::fill(ctx.instr_idx_by_regs[ctx.current_block->index].begin() + r,
104                 ctx.instr_idx_by_regs[ctx.current_block->index].begin() + r + dw_size, idx);
105    }
106 }
107 
108 Idx
last_writer_idx(pr_opt_ctx & ctx,PhysReg physReg,RegClass rc)109 last_writer_idx(pr_opt_ctx& ctx, PhysReg physReg, RegClass rc)
110 {
111    /* Verify that all of the operand's registers are written by the same instruction. */
112    assert(physReg.reg() < max_reg_cnt);
113    Idx instr_idx = ctx.instr_idx_by_regs[ctx.current_block->index][physReg.reg()];
114    unsigned dw_size = DIV_ROUND_UP(rc.bytes(), 4u);
115    unsigned r = physReg.reg();
116    bool all_same =
117       std::all_of(ctx.instr_idx_by_regs[ctx.current_block->index].begin() + r,
118                   ctx.instr_idx_by_regs[ctx.current_block->index].begin() + r + dw_size,
119                   [instr_idx](Idx i) { return i == instr_idx; });
120 
121    return all_same ? instr_idx : written_by_multiple_instrs;
122 }
123 
124 Idx
last_writer_idx(pr_opt_ctx & ctx,const Operand & op)125 last_writer_idx(pr_opt_ctx& ctx, const Operand& op)
126 {
127    if (op.isConstant() || op.isUndefined())
128       return const_or_undef;
129 
130    assert(op.physReg().reg() < max_reg_cnt);
131    Idx instr_idx = ctx.instr_idx_by_regs[ctx.current_block->index][op.physReg().reg()];
132 
133 #ifndef NDEBUG
134    /* Debug mode:  */
135    instr_idx = last_writer_idx(ctx, op.physReg(), op.regClass());
136    assert(instr_idx != written_by_multiple_instrs);
137 #endif
138 
139    return instr_idx;
140 }
141 
142 bool
is_clobbered_since(pr_opt_ctx & ctx,PhysReg reg,RegClass rc,const Idx & idx)143 is_clobbered_since(pr_opt_ctx& ctx, PhysReg reg, RegClass rc, const Idx& idx)
144 {
145    /* If we didn't find an instruction, assume that the register is clobbered. */
146    if (!idx.found())
147       return true;
148 
149    /* TODO: We currently can't keep track of subdword registers. */
150    if (rc.is_subdword())
151       return true;
152 
153    unsigned begin_reg = reg.reg();
154    unsigned end_reg = begin_reg + rc.size();
155    unsigned current_block_idx = ctx.current_block->index;
156 
157    for (unsigned r = begin_reg; r < end_reg; ++r) {
158       Idx& i = ctx.instr_idx_by_regs[current_block_idx][r];
159       if (i == clobbered || i == written_by_multiple_instrs)
160          return true;
161       else if (i == not_written_in_block)
162          continue;
163 
164       assert(i.found());
165 
166       if (i.block > idx.block || (i.block == idx.block && i.instr > idx.instr))
167          return true;
168    }
169 
170    return false;
171 }
172 
173 template <typename T>
174 bool
is_clobbered_since(pr_opt_ctx & ctx,const T & t,const Idx & idx)175 is_clobbered_since(pr_opt_ctx& ctx, const T& t, const Idx& idx)
176 {
177    return is_clobbered_since(ctx, t.physReg(), t.regClass(), idx);
178 }
179 
180 void
try_apply_branch_vcc(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)181 try_apply_branch_vcc(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
182 {
183    /* We are looking for the following pattern:
184     *
185     * vcc = ...                      ; last_vcc_wr
186     * sX, scc = s_and_bXX vcc, exec  ; op0_instr
187     * (...vcc and exec must not be clobbered inbetween...)
188     * s_cbranch_XX scc               ; instr
189     *
190     * If possible, the above is optimized into:
191     *
192     * vcc = ...                      ; last_vcc_wr
193     * s_cbranch_XX vcc               ; instr modified to use vcc
194     */
195 
196    /* Don't try to optimize this on GFX6-7 because SMEM may corrupt the vccz bit. */
197    if (ctx.program->chip_class < GFX8)
198       return;
199 
200    if (instr->format != Format::PSEUDO_BRANCH || instr->operands.size() == 0 ||
201        instr->operands[0].physReg() != scc)
202       return;
203 
204    Idx op0_instr_idx = last_writer_idx(ctx, instr->operands[0]);
205    Idx last_vcc_wr_idx = last_writer_idx(ctx, vcc, ctx.program->lane_mask);
206 
207    /* We need to make sure:
208     * - the instructions that wrote the operand register and VCC are both found
209     * - the operand register used by the branch, and VCC were both written in the current block
210     * - EXEC hasn't been clobbered since the last VCC write
211     * - VCC hasn't been clobbered since the operand register was written
212     *   (ie. the last VCC writer precedes the op0 writer)
213     */
214    if (!op0_instr_idx.found() || !last_vcc_wr_idx.found() ||
215        op0_instr_idx.block != ctx.current_block->index ||
216        last_vcc_wr_idx.block != ctx.current_block->index ||
217        is_clobbered_since(ctx, exec, ctx.program->lane_mask, last_vcc_wr_idx) ||
218        is_clobbered_since(ctx, vcc, ctx.program->lane_mask, op0_instr_idx))
219       return;
220 
221    Instruction* op0_instr = ctx.get(op0_instr_idx);
222    Instruction* last_vcc_wr = ctx.get(last_vcc_wr_idx);
223 
224    if ((op0_instr->opcode != aco_opcode::s_and_b64 /* wave64 */ &&
225         op0_instr->opcode != aco_opcode::s_and_b32 /* wave32 */) ||
226        op0_instr->operands[0].physReg() != vcc || op0_instr->operands[1].physReg() != exec ||
227        !last_vcc_wr->isVOPC())
228       return;
229 
230    assert(last_vcc_wr->definitions[0].tempId() == op0_instr->operands[0].tempId());
231 
232    /* Reduce the uses of the SCC def */
233    ctx.uses[instr->operands[0].tempId()]--;
234    /* Use VCC instead of SCC in the branch */
235    instr->operands[0] = op0_instr->operands[0];
236 }
237 
238 void
try_optimize_scc_nocompare(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)239 try_optimize_scc_nocompare(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
240 {
241    /* We are looking for the following pattern:
242     *
243     * s_bfe_u32 s0, s3, 0x40018  ; outputs SGPR and SCC if the SGPR != 0
244     * s_cmp_eq_i32 s0, 0         ; comparison between the SGPR and 0
245     * s_cbranch_scc0 BB3         ; use the result of the comparison, eg. branch or cselect
246     *
247     * If possible, the above is optimized into:
248     *
249     * s_bfe_u32 s0, s3, 0x40018  ; original instruction
250     * s_cbranch_scc1 BB3         ; modified to use SCC directly rather than the SGPR with comparison
251     *
252     */
253 
254    if (!instr->isSALU() && !instr->isBranch())
255       return;
256 
257    if (instr->isSOPC() &&
258        (instr->opcode == aco_opcode::s_cmp_eq_u32 || instr->opcode == aco_opcode::s_cmp_eq_i32 ||
259         instr->opcode == aco_opcode::s_cmp_lg_u32 || instr->opcode == aco_opcode::s_cmp_lg_i32 ||
260         instr->opcode == aco_opcode::s_cmp_eq_u64 || instr->opcode == aco_opcode::s_cmp_lg_u64) &&
261        (instr->operands[0].constantEquals(0) || instr->operands[1].constantEquals(0)) &&
262        (instr->operands[0].isTemp() || instr->operands[1].isTemp())) {
263       /* Make sure the constant is always in operand 1 */
264       if (instr->operands[0].isConstant())
265          std::swap(instr->operands[0], instr->operands[1]);
266 
267       if (ctx.uses[instr->operands[0].tempId()] > 1)
268          return;
269 
270       /* Make sure both SCC and Operand 0 are written by the same instruction. */
271       Idx wr_idx = last_writer_idx(ctx, instr->operands[0]);
272       Idx sccwr_idx = last_writer_idx(ctx, scc, s1);
273       if (!wr_idx.found() || wr_idx != sccwr_idx)
274          return;
275 
276       Instruction* wr_instr = ctx.get(wr_idx);
277       if (!wr_instr->isSALU() || wr_instr->definitions.size() < 2 ||
278           wr_instr->definitions[1].physReg() != scc)
279          return;
280 
281       /* Look for instructions which set SCC := (D != 0) */
282       switch (wr_instr->opcode) {
283       case aco_opcode::s_bfe_i32:
284       case aco_opcode::s_bfe_i64:
285       case aco_opcode::s_bfe_u32:
286       case aco_opcode::s_bfe_u64:
287       case aco_opcode::s_and_b32:
288       case aco_opcode::s_and_b64:
289       case aco_opcode::s_andn2_b32:
290       case aco_opcode::s_andn2_b64:
291       case aco_opcode::s_or_b32:
292       case aco_opcode::s_or_b64:
293       case aco_opcode::s_orn2_b32:
294       case aco_opcode::s_orn2_b64:
295       case aco_opcode::s_xor_b32:
296       case aco_opcode::s_xor_b64:
297       case aco_opcode::s_not_b32:
298       case aco_opcode::s_not_b64:
299       case aco_opcode::s_nor_b32:
300       case aco_opcode::s_nor_b64:
301       case aco_opcode::s_xnor_b32:
302       case aco_opcode::s_xnor_b64:
303       case aco_opcode::s_nand_b32:
304       case aco_opcode::s_nand_b64:
305       case aco_opcode::s_lshl_b32:
306       case aco_opcode::s_lshl_b64:
307       case aco_opcode::s_lshr_b32:
308       case aco_opcode::s_lshr_b64:
309       case aco_opcode::s_ashr_i32:
310       case aco_opcode::s_ashr_i64:
311       case aco_opcode::s_abs_i32:
312       case aco_opcode::s_absdiff_i32: break;
313       default: return;
314       }
315 
316       /* Use the SCC def from wr_instr */
317       ctx.uses[instr->operands[0].tempId()]--;
318       instr->operands[0] = Operand(wr_instr->definitions[1].getTemp(), scc);
319       ctx.uses[instr->operands[0].tempId()]++;
320 
321       /* Set the opcode and operand to 32-bit */
322       instr->operands[1] = Operand::zero();
323       instr->opcode =
324          (instr->opcode == aco_opcode::s_cmp_eq_u32 || instr->opcode == aco_opcode::s_cmp_eq_i32 ||
325           instr->opcode == aco_opcode::s_cmp_eq_u64)
326             ? aco_opcode::s_cmp_eq_u32
327             : aco_opcode::s_cmp_lg_u32;
328    } else if ((instr->format == Format::PSEUDO_BRANCH && instr->operands.size() == 1 &&
329                instr->operands[0].physReg() == scc) ||
330               instr->opcode == aco_opcode::s_cselect_b32) {
331 
332       /* For cselect, operand 2 is the SCC condition */
333       unsigned scc_op_idx = 0;
334       if (instr->opcode == aco_opcode::s_cselect_b32) {
335          scc_op_idx = 2;
336       }
337 
338       Idx wr_idx = last_writer_idx(ctx, instr->operands[scc_op_idx]);
339       if (!wr_idx.found())
340          return;
341 
342       Instruction* wr_instr = ctx.get(wr_idx);
343 
344       /* Check if we found the pattern above. */
345       if (wr_instr->opcode != aco_opcode::s_cmp_eq_u32 &&
346           wr_instr->opcode != aco_opcode::s_cmp_lg_u32)
347          return;
348       if (wr_instr->operands[0].physReg() != scc)
349          return;
350       if (!wr_instr->operands[1].constantEquals(0))
351          return;
352 
353       /* The optimization can be unsafe when there are other users. */
354       if (ctx.uses[instr->operands[scc_op_idx].tempId()] > 1)
355          return;
356 
357       if (wr_instr->opcode == aco_opcode::s_cmp_eq_u32) {
358          /* Flip the meaning of the instruction to correctly use the SCC. */
359          if (instr->format == Format::PSEUDO_BRANCH)
360             instr->opcode = instr->opcode == aco_opcode::p_cbranch_z ? aco_opcode::p_cbranch_nz
361                                                                      : aco_opcode::p_cbranch_z;
362          else if (instr->opcode == aco_opcode::s_cselect_b32)
363             std::swap(instr->operands[0], instr->operands[1]);
364          else
365             unreachable(
366                "scc_nocompare optimization is only implemented for p_cbranch and s_cselect");
367       }
368 
369       /* Use the SCC def from the original instruction, not the comparison */
370       ctx.uses[instr->operands[scc_op_idx].tempId()]--;
371       instr->operands[scc_op_idx] = wr_instr->operands[0];
372    }
373 }
374 
375 void
try_combine_dpp(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)376 try_combine_dpp(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
377 {
378    /* We are looking for the following pattern:
379     *
380     * v_mov_dpp vA, vB, ...      ; move instruction with DPP
381     * v_xxx vC, vA, ...          ; current instr that uses the result from the move
382     *
383     * If possible, the above is optimized into:
384     *
385     * v_xxx_dpp vC, vB, ...      ; current instr modified to use DPP directly
386     *
387     */
388 
389    if (!instr->isVALU() || instr->isDPP() || !can_use_DPP(instr, false))
390       return;
391 
392    for (unsigned i = 0; i < MIN2(2, instr->operands.size()); i++) {
393       Idx op_instr_idx = last_writer_idx(ctx, instr->operands[i]);
394       if (!op_instr_idx.found())
395          continue;
396 
397       Instruction* mov = ctx.get(op_instr_idx);
398       if (mov->opcode != aco_opcode::v_mov_b32 || !mov->isDPP())
399          continue;
400 
401       /* If we aren't going to remove the v_mov_b32, we have to ensure that it doesn't overwrite
402        * it's own operand before we use it.
403        */
404       if (mov->definitions[0].physReg() == mov->operands[0].physReg() &&
405           (!mov->definitions[0].tempId() || ctx.uses[mov->definitions[0].tempId()] > 1))
406          continue;
407 
408       /* Don't propagate DPP if the source register is overwritten since the move. */
409       if (is_clobbered_since(ctx, mov->operands[0], op_instr_idx))
410          continue;
411 
412       if (i && !can_swap_operands(instr, &instr->opcode))
413          continue;
414 
415       /* anything else doesn't make sense in SSA */
416       assert(mov->dpp().row_mask == 0xf && mov->dpp().bank_mask == 0xf);
417 
418       if (--ctx.uses[mov->definitions[0].tempId()])
419          ctx.uses[mov->operands[0].tempId()]++;
420 
421       convert_to_DPP(instr);
422 
423       DPP_instruction* dpp = &instr->dpp();
424       if (i) {
425          std::swap(dpp->operands[0], dpp->operands[1]);
426          std::swap(dpp->neg[0], dpp->neg[1]);
427          std::swap(dpp->abs[0], dpp->abs[1]);
428       }
429       dpp->operands[0] = mov->operands[0];
430       dpp->dpp_ctrl = mov->dpp().dpp_ctrl;
431       dpp->bound_ctrl = true;
432       dpp->neg[0] ^= mov->dpp().neg[0] && !dpp->abs[0];
433       dpp->abs[0] |= mov->dpp().abs[0];
434       return;
435    }
436 }
437 
438 void
process_instruction(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)439 process_instruction(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
440 {
441    try_apply_branch_vcc(ctx, instr);
442 
443    try_optimize_scc_nocompare(ctx, instr);
444 
445    try_combine_dpp(ctx, instr);
446 
447    if (instr)
448       save_reg_writes(ctx, instr);
449 
450    ctx.current_instr_idx++;
451 }
452 
453 } // namespace
454 
455 void
optimize_postRA(Program * program)456 optimize_postRA(Program* program)
457 {
458    pr_opt_ctx ctx;
459    ctx.program = program;
460    ctx.uses = dead_code_analysis(program);
461    ctx.instr_idx_by_regs.resize(program->blocks.size());
462 
463    /* Forward pass
464     * Goes through each instruction exactly once, and can transform
465     * instructions or adjust the use counts of temps.
466     */
467    for (auto& block : program->blocks) {
468       ctx.reset_block(&block);
469 
470       for (aco_ptr<Instruction>& instr : block.instructions)
471          process_instruction(ctx, instr);
472    }
473 
474    /* Cleanup pass
475     * Gets rid of instructions which are manually deleted or
476     * no longer have any uses.
477     */
478    for (auto& block : program->blocks) {
479       auto new_end = std::remove_if(block.instructions.begin(), block.instructions.end(),
480                                     [&ctx](const aco_ptr<Instruction>& instr)
481                                     { return !instr || is_dead(ctx.uses, instr.get()); });
482       block.instructions.resize(new_end - block.instructions.begin());
483    }
484 }
485 
486 } // namespace aco
487