1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir/nir_builder.h"
26 #include "nir_constant_expressions.h"
27 #include "nir_control_flow.h"
28 #include "nir_loop_analyze.h"
29
30 static nir_ssa_def *clone_alu_and_replace_src_defs(nir_builder *b,
31 const nir_alu_instr *alu,
32 nir_ssa_def **src_defs);
33
34 /**
35 * Gets the single block that jumps back to the loop header. Already assumes
36 * there is exactly one such block.
37 */
38 static nir_block*
find_continue_block(nir_loop * loop)39 find_continue_block(nir_loop *loop)
40 {
41 nir_block *header_block = nir_loop_first_block(loop);
42 nir_block *prev_block =
43 nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
44
45 assert(header_block->predecessors->entries == 2);
46
47 set_foreach(header_block->predecessors, pred_entry) {
48 if (pred_entry->key != prev_block)
49 return (nir_block*)pred_entry->key;
50 }
51
52 unreachable("Continue block not found!");
53 }
54
55 /**
56 * Does a phi have one constant value from outside a loop and one from inside?
57 */
58 static bool
phi_has_constant_from_outside_and_one_from_inside_loop(nir_phi_instr * phi,const nir_block * entry_block,bool * entry_val,bool * continue_val)59 phi_has_constant_from_outside_and_one_from_inside_loop(nir_phi_instr *phi,
60 const nir_block *entry_block,
61 bool *entry_val,
62 bool *continue_val)
63 {
64 /* We already know we have exactly one continue */
65 assert(exec_list_length(&phi->srcs) == 2);
66
67 *entry_val = false;
68 *continue_val = false;
69
70 nir_foreach_phi_src(src, phi) {
71 if (!nir_src_is_const(src->src))
72 return false;
73
74 if (src->pred != entry_block) {
75 *continue_val = nir_src_as_bool(src->src);
76 } else {
77 *entry_val = nir_src_as_bool(src->src);
78 }
79 }
80
81 return true;
82 }
83
84 /**
85 * This optimization detects if statements at the tops of loops where the
86 * condition is a phi node of two constants and moves half of the if to above
87 * the loop and the other half of the if to the end of the loop. A simple for
88 * loop "for (int i = 0; i < 4; i++)", when run through the SPIR-V front-end,
89 * ends up looking something like this:
90 *
91 * vec1 32 ssa_0 = load_const (0x00000000)
92 * vec1 32 ssa_1 = load_const (0xffffffff)
93 * loop {
94 * block block_1:
95 * vec1 32 ssa_2 = phi block_0: ssa_0, block_7: ssa_5
96 * vec1 32 ssa_3 = phi block_0: ssa_0, block_7: ssa_1
97 * if ssa_3 {
98 * block block_2:
99 * vec1 32 ssa_4 = load_const (0x00000001)
100 * vec1 32 ssa_5 = iadd ssa_2, ssa_4
101 * } else {
102 * block block_3:
103 * }
104 * block block_4:
105 * vec1 32 ssa_6 = load_const (0x00000004)
106 * vec1 32 ssa_7 = ilt ssa_5, ssa_6
107 * if ssa_7 {
108 * block block_5:
109 * } else {
110 * block block_6:
111 * break
112 * }
113 * block block_7:
114 * }
115 *
116 * This turns it into something like this:
117 *
118 * // Stuff from block 1
119 * // Stuff from block 3
120 * loop {
121 * block block_1:
122 * vec1 32 ssa_2 = phi block_0: ssa_0, block_7: ssa_5
123 * vec1 32 ssa_6 = load_const (0x00000004)
124 * vec1 32 ssa_7 = ilt ssa_2, ssa_6
125 * if ssa_7 {
126 * block block_5:
127 * } else {
128 * block block_6:
129 * break
130 * }
131 * block block_7:
132 * // Stuff from block 1
133 * // Stuff from block 2
134 * vec1 32 ssa_4 = load_const (0x00000001)
135 * vec1 32 ssa_5 = iadd ssa_2, ssa_4
136 * }
137 */
138 static bool
opt_peel_loop_initial_if(nir_loop * loop)139 opt_peel_loop_initial_if(nir_loop *loop)
140 {
141 nir_block *header_block = nir_loop_first_block(loop);
142 nir_block *const prev_block =
143 nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
144
145 /* It would be insane if this were not true */
146 assert(_mesa_set_search(header_block->predecessors, prev_block));
147
148 /* The loop must have exactly one continue block which could be a block
149 * ending in a continue instruction or the "natural" continue from the
150 * last block in the loop back to the top.
151 */
152 if (header_block->predecessors->entries != 2)
153 return false;
154
155 nir_cf_node *if_node = nir_cf_node_next(&header_block->cf_node);
156 if (!if_node || if_node->type != nir_cf_node_if)
157 return false;
158
159 nir_if *nif = nir_cf_node_as_if(if_node);
160 assert(nif->condition.is_ssa);
161
162 nir_ssa_def *cond = nif->condition.ssa;
163 if (cond->parent_instr->type != nir_instr_type_phi)
164 return false;
165
166 nir_phi_instr *cond_phi = nir_instr_as_phi(cond->parent_instr);
167 if (cond->parent_instr->block != header_block)
168 return false;
169
170 bool entry_val = false, continue_val = false;
171 if (!phi_has_constant_from_outside_and_one_from_inside_loop(cond_phi,
172 prev_block,
173 &entry_val,
174 &continue_val))
175 return false;
176
177 /* If they both execute or both don't execute, this is a job for
178 * nir_dead_cf, not this pass.
179 */
180 if ((entry_val && continue_val) || (!entry_val && !continue_val))
181 return false;
182
183 struct exec_list *continue_list, *entry_list;
184 if (continue_val) {
185 continue_list = &nif->then_list;
186 entry_list = &nif->else_list;
187 } else {
188 continue_list = &nif->else_list;
189 entry_list = &nif->then_list;
190 }
191
192 /* We want to be moving the contents of entry_list to above the loop so it
193 * can't contain any break or continue instructions.
194 */
195 foreach_list_typed(nir_cf_node, cf_node, node, entry_list) {
196 nir_foreach_block_in_cf_node(block, cf_node) {
197 nir_instr *last_instr = nir_block_last_instr(block);
198 if (last_instr && last_instr->type == nir_instr_type_jump)
199 return false;
200 }
201 }
202
203 /* We're about to re-arrange a bunch of blocks so make sure that we don't
204 * have deref uses which cross block boundaries. We don't want a deref
205 * accidentally ending up in a phi.
206 */
207 nir_rematerialize_derefs_in_use_blocks_impl(
208 nir_cf_node_get_function(&loop->cf_node));
209
210 /* Before we do anything, convert the loop to LCSSA. We're about to
211 * replace a bunch of SSA defs with registers and this will prevent any of
212 * it from leaking outside the loop.
213 */
214 nir_convert_loop_to_lcssa(loop);
215
216 nir_block *after_if_block =
217 nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node));
218
219 /* Get rid of phis in the header block since we will be duplicating it */
220 nir_lower_phis_to_regs_block(header_block);
221 /* Get rid of phis after the if since dominance will change */
222 nir_lower_phis_to_regs_block(after_if_block);
223
224 /* Get rid of SSA defs in the pieces we're about to move around */
225 nir_lower_ssa_defs_to_regs_block(header_block);
226 nir_foreach_block_in_cf_node(block, &nif->cf_node)
227 nir_lower_ssa_defs_to_regs_block(block);
228
229 nir_cf_list header, tmp;
230 nir_cf_extract(&header, nir_before_block(header_block),
231 nir_after_block(header_block));
232
233 nir_cf_list_clone(&tmp, &header, &loop->cf_node, NULL);
234 nir_cf_reinsert(&tmp, nir_before_cf_node(&loop->cf_node));
235 nir_cf_extract(&tmp, nir_before_cf_list(entry_list),
236 nir_after_cf_list(entry_list));
237 nir_cf_reinsert(&tmp, nir_before_cf_node(&loop->cf_node));
238
239 nir_cf_reinsert(&header,
240 nir_after_block_before_jump(find_continue_block(loop)));
241
242 bool continue_list_jumps =
243 nir_block_ends_in_jump(exec_node_data(nir_block,
244 exec_list_get_tail(continue_list),
245 cf_node.node));
246
247 nir_cf_extract(&tmp, nir_before_cf_list(continue_list),
248 nir_after_cf_list(continue_list));
249
250 /* Get continue block again as the previous reinsert might have removed the
251 * block. Also, if both the continue list and the continue block ends in
252 * jump instructions, removes the jump from the latter, as it will not be
253 * executed if we insert the continue list before it. */
254
255 nir_block *continue_block = find_continue_block(loop);
256
257 if (continue_list_jumps) {
258 nir_instr *last_instr = nir_block_last_instr(continue_block);
259 if (last_instr && last_instr->type == nir_instr_type_jump)
260 nir_instr_remove(last_instr);
261 }
262
263 nir_cf_reinsert(&tmp,
264 nir_after_block_before_jump(continue_block));
265
266 nir_cf_node_remove(&nif->cf_node);
267
268 return true;
269 }
270
271 static bool
alu_instr_is_comparison(const nir_alu_instr * alu)272 alu_instr_is_comparison(const nir_alu_instr *alu)
273 {
274 switch (alu->op) {
275 case nir_op_flt32:
276 case nir_op_fge32:
277 case nir_op_feq32:
278 case nir_op_fneu32:
279 case nir_op_ilt32:
280 case nir_op_ult32:
281 case nir_op_ige32:
282 case nir_op_uge32:
283 case nir_op_ieq32:
284 case nir_op_ine32:
285 return true;
286 default:
287 return nir_alu_instr_is_comparison(alu);
288 }
289 }
290
291 static bool
alu_instr_is_type_conversion(const nir_alu_instr * alu)292 alu_instr_is_type_conversion(const nir_alu_instr *alu)
293 {
294 return nir_op_infos[alu->op].num_inputs == 1 &&
295 nir_op_infos[alu->op].output_type != nir_op_infos[alu->op].input_types[0];
296 }
297
298 static bool
is_trivial_bcsel(const nir_instr * instr,bool allow_non_phi_src)299 is_trivial_bcsel(const nir_instr *instr, bool allow_non_phi_src)
300 {
301 if (instr->type != nir_instr_type_alu)
302 return false;
303
304 nir_alu_instr *const bcsel = nir_instr_as_alu(instr);
305 if (bcsel->op != nir_op_bcsel &&
306 bcsel->op != nir_op_b32csel &&
307 bcsel->op != nir_op_fcsel)
308 return false;
309
310 for (unsigned i = 0; i < 3; i++) {
311 if (!nir_alu_src_is_trivial_ssa(bcsel, i) ||
312 bcsel->src[i].src.ssa->parent_instr->block != instr->block)
313 return false;
314
315 if (bcsel->src[i].src.ssa->parent_instr->type != nir_instr_type_phi) {
316 /* opt_split_alu_of_phi() is able to peel that src from the loop */
317 if (i == 0 || !allow_non_phi_src)
318 return false;
319 allow_non_phi_src = false;
320 }
321 }
322
323 nir_foreach_phi_src(src, nir_instr_as_phi(bcsel->src[0].src.ssa->parent_instr)) {
324 if (!nir_src_is_const(src->src))
325 return false;
326 }
327
328 return true;
329 }
330
331 /**
332 * Splits ALU instructions that have a source that is a phi node
333 *
334 * ALU instructions in the header block of a loop that meet the following
335 * criteria can be split.
336 *
337 * - The loop has no continue instructions other than the "natural" continue
338 * at the bottom of the loop.
339 *
340 * - At least one source of the instruction is a phi node from the header block.
341 *
342 * - Any non-phi sources of the ALU instruction come from a block that
343 * dominates the block before the loop. The most common failure mode for
344 * this check is sources that are generated in the loop header block.
345 *
346 * - The phi node selects a constant or undef from the block before the loop or
347 * the only ALU user is a trivial bcsel that gets removed by peeling the ALU
348 *
349 * The split process splits the original ALU instruction into two, one at the
350 * bottom of the loop and one at the block before the loop. The instruction
351 * before the loop computes the value on the first iteration, and the
352 * instruction at the bottom computes the value on the second, third, and so
353 * on. A new phi node is added to the header block that selects either the
354 * instruction before the loop or the one at the end, and uses of the original
355 * instruction are replaced by this phi.
356 *
357 * The splitting transforms a loop like:
358 *
359 * vec1 32 ssa_8 = load_const (0x00000001)
360 * vec1 32 ssa_10 = load_const (0x00000000)
361 * // succs: block_1
362 * loop {
363 * block block_1:
364 * // preds: block_0 block_4
365 * vec1 32 ssa_11 = phi block_0: ssa_10, block_4: ssa_15
366 * vec1 32 ssa_12 = phi block_0: ssa_1, block_4: ssa_15
367 * vec1 32 ssa_13 = phi block_0: ssa_10, block_4: ssa_16
368 * vec1 32 ssa_14 = iadd ssa_11, ssa_8
369 * vec1 32 ssa_15 = b32csel ssa_13, ssa_14, ssa_12
370 * ...
371 * // succs: block_1
372 * }
373 *
374 * into:
375 *
376 * vec1 32 ssa_8 = load_const (0x00000001)
377 * vec1 32 ssa_10 = load_const (0x00000000)
378 * vec1 32 ssa_22 = iadd ssa_10, ssa_8
379 * // succs: block_1
380 * loop {
381 * block block_1:
382 * // preds: block_0 block_4
383 * vec1 32 ssa_11 = phi block_0: ssa_10, block_4: ssa_15
384 * vec1 32 ssa_12 = phi block_0: ssa_1, block_4: ssa_15
385 * vec1 32 ssa_13 = phi block_0: ssa_10, block_4: ssa_16
386 * vec1 32 ssa_21 = phi block_0: ssa_22, block_4: ssa_20
387 * vec1 32 ssa_15 = b32csel ssa_13, ssa_21, ssa_12
388 * ...
389 * vec1 32 ssa_20 = iadd ssa_15, ssa_8
390 * // succs: block_1
391 * }
392 */
393 static bool
opt_split_alu_of_phi(nir_builder * b,nir_loop * loop)394 opt_split_alu_of_phi(nir_builder *b, nir_loop *loop)
395 {
396 bool progress = false;
397 nir_block *header_block = nir_loop_first_block(loop);
398 nir_block *const prev_block =
399 nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
400
401 /* It would be insane if this were not true */
402 assert(_mesa_set_search(header_block->predecessors, prev_block));
403
404 /* The loop must have exactly one continue block which could be a block
405 * ending in a continue instruction or the "natural" continue from the
406 * last block in the loop back to the top.
407 */
408 if (header_block->predecessors->entries != 2)
409 return false;
410
411 nir_block *continue_block = find_continue_block(loop);
412 if (continue_block == header_block)
413 return false;
414
415 nir_foreach_instr_safe(instr, header_block) {
416 if (instr->type != nir_instr_type_alu)
417 continue;
418
419 nir_alu_instr *const alu = nir_instr_as_alu(instr);
420
421 /* nir_op_vec{2,3,4} and nir_op_mov are excluded because they can easily
422 * lead to infinite optimization loops. Splitting comparisons can lead
423 * to loop unrolling not recognizing loop termintators, and type
424 * conversions also lead to regressions.
425 */
426 if (nir_op_is_vec(alu->op) ||
427 alu_instr_is_comparison(alu) ||
428 alu_instr_is_type_conversion(alu))
429 continue;
430
431 bool has_phi_src_from_prev_block = false;
432 bool all_non_phi_exist_in_prev_block = true;
433 bool is_prev_result_undef = true;
434 bool is_prev_result_const = true;
435 nir_ssa_def *prev_srcs[8]; // FINISHME: Array size?
436 nir_ssa_def *continue_srcs[8]; // FINISHME: Array size?
437
438 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
439 nir_instr *const src_instr = alu->src[i].src.ssa->parent_instr;
440
441 /* If the source is a phi in the loop header block, then the
442 * prev_srcs and continue_srcs will come from the different sources
443 * of the phi.
444 */
445 if (src_instr->type == nir_instr_type_phi &&
446 src_instr->block == header_block) {
447 nir_phi_instr *const phi = nir_instr_as_phi(src_instr);
448
449 /* Only strictly need to NULL out the pointers when the assertions
450 * (below) are compiled in. Debugging a NULL pointer deref in the
451 * wild is easier than debugging a random pointer deref, so set
452 * NULL unconditionally just to be safe.
453 */
454 prev_srcs[i] = NULL;
455 continue_srcs[i] = NULL;
456
457 nir_foreach_phi_src(src_of_phi, phi) {
458 if (src_of_phi->pred == prev_block) {
459 if (src_of_phi->src.ssa->parent_instr->type !=
460 nir_instr_type_ssa_undef) {
461 is_prev_result_undef = false;
462 }
463
464 if (src_of_phi->src.ssa->parent_instr->type !=
465 nir_instr_type_load_const) {
466 is_prev_result_const = false;
467 }
468
469 prev_srcs[i] = src_of_phi->src.ssa;
470 has_phi_src_from_prev_block = true;
471 } else
472 continue_srcs[i] = src_of_phi->src.ssa;
473 }
474
475 assert(prev_srcs[i] != NULL);
476 assert(continue_srcs[i] != NULL);
477 } else {
478 /* If the source is not a phi (or a phi in a block other than the
479 * loop header), then the value must exist in prev_block.
480 */
481 if (!nir_block_dominates(src_instr->block, prev_block)) {
482 all_non_phi_exist_in_prev_block = false;
483 break;
484 }
485
486 prev_srcs[i] = alu->src[i].src.ssa;
487 continue_srcs[i] = alu->src[i].src.ssa;
488 }
489 }
490
491 if (!has_phi_src_from_prev_block || !all_non_phi_exist_in_prev_block)
492 continue;
493
494 if (!is_prev_result_undef && !is_prev_result_const) {
495 /* check if the only user is a trivial bcsel */
496 if (!list_is_empty(&alu->dest.dest.ssa.if_uses) ||
497 !list_is_singular(&alu->dest.dest.ssa.uses))
498 continue;
499
500 nir_src *use = list_first_entry(&alu->dest.dest.ssa.uses, nir_src, use_link);
501 if (!is_trivial_bcsel(use->parent_instr, true))
502 continue;
503 }
504
505 /* Split ALU of Phi */
506 b->cursor = nir_after_block(prev_block);
507 nir_ssa_def *prev_value = clone_alu_and_replace_src_defs(b, alu, prev_srcs);
508
509 /* Make a copy of the original ALU instruction. Replace the sources
510 * of the new instruction that read a phi with an undef source from
511 * prev_block with the non-undef source of that phi.
512 *
513 * Insert the new instruction at the end of the continue block.
514 */
515 b->cursor = nir_after_block_before_jump(continue_block);
516
517 nir_ssa_def *const alu_copy =
518 clone_alu_and_replace_src_defs(b, alu, continue_srcs);
519
520 /* Make a new phi node that selects a value from prev_block and the
521 * result of the new instruction from continue_block.
522 */
523 nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
524 nir_phi_instr_add_src(phi, prev_block, nir_src_for_ssa(prev_value));
525 nir_phi_instr_add_src(phi, continue_block, nir_src_for_ssa(alu_copy));
526
527 nir_ssa_dest_init(&phi->instr, &phi->dest,
528 alu_copy->num_components, alu_copy->bit_size, NULL);
529
530 b->cursor = nir_after_phis(header_block);
531 nir_builder_instr_insert(b, &phi->instr);
532
533 /* Modify all readers of the original ALU instruction to read the
534 * result of the phi.
535 */
536 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa,
537 &phi->dest.ssa);
538
539 /* Since the original ALU instruction no longer has any readers, just
540 * remove it.
541 */
542 nir_instr_remove_v(&alu->instr);
543 nir_instr_free(&alu->instr);
544
545 progress = true;
546 }
547
548 return progress;
549 }
550
551 /**
552 * Simplify a bcsel whose sources are all phi nodes from the loop header block
553 *
554 * bcsel instructions in a loop that meet the following criteria can be
555 * converted to phi nodes:
556 *
557 * - The loop has no continue instructions other than the "natural" continue
558 * at the bottom of the loop.
559 *
560 * - All of the sources of the bcsel are phi nodes in the header block of the
561 * loop.
562 *
563 * - The phi node representing the condition of the bcsel instruction chooses
564 * only constant values.
565 *
566 * The contant value from the condition will select one of the other sources
567 * when entered from outside the loop and the remaining source when entered
568 * from the continue block. Since each of these sources is also a phi node in
569 * the header block, the value of the phi node can be "evaluated." These
570 * evaluated phi nodes provide the sources for a new phi node. All users of
571 * the bcsel result are updated to use the phi node result.
572 *
573 * The replacement transforms loops like:
574 *
575 * vec1 32 ssa_7 = undefined
576 * vec1 32 ssa_8 = load_const (0x00000001)
577 * vec1 32 ssa_9 = load_const (0x000000c8)
578 * vec1 32 ssa_10 = load_const (0x00000000)
579 * // succs: block_1
580 * loop {
581 * block block_1:
582 * // preds: block_0 block_4
583 * vec1 32 ssa_11 = phi block_0: ssa_1, block_4: ssa_14
584 * vec1 32 ssa_12 = phi block_0: ssa_10, block_4: ssa_15
585 * vec1 32 ssa_13 = phi block_0: ssa_7, block_4: ssa_25
586 * vec1 32 ssa_14 = b32csel ssa_12, ssa_13, ssa_11
587 * vec1 32 ssa_16 = ige32 ssa_14, ssa_9
588 * ...
589 * vec1 32 ssa_15 = load_const (0xffffffff)
590 * ...
591 * vec1 32 ssa_25 = iadd ssa_14, ssa_8
592 * // succs: block_1
593 * }
594 *
595 * into:
596 *
597 * vec1 32 ssa_7 = undefined
598 * vec1 32 ssa_8 = load_const (0x00000001)
599 * vec1 32 ssa_9 = load_const (0x000000c8)
600 * vec1 32 ssa_10 = load_const (0x00000000)
601 * // succs: block_1
602 * loop {
603 * block block_1:
604 * // preds: block_0 block_4
605 * vec1 32 ssa_11 = phi block_0: ssa_1, block_4: ssa_14
606 * vec1 32 ssa_12 = phi block_0: ssa_10, block_4: ssa_15
607 * vec1 32 ssa_13 = phi block_0: ssa_7, block_4: ssa_25
608 * vec1 32 sss_26 = phi block_0: ssa_1, block_4: ssa_25
609 * vec1 32 ssa_16 = ige32 ssa_26, ssa_9
610 * ...
611 * vec1 32 ssa_15 = load_const (0xffffffff)
612 * ...
613 * vec1 32 ssa_25 = iadd ssa_26, ssa_8
614 * // succs: block_1
615 * }
616 *
617 * \note
618 * It may be possible modify this function to not require a phi node as the
619 * source of the bcsel that is selected when entering from outside the loop.
620 * The only restriction is that the source must be geneated outside the loop
621 * (since it will become the source of a phi node in the header block of the
622 * loop).
623 */
624 static bool
opt_simplify_bcsel_of_phi(nir_builder * b,nir_loop * loop)625 opt_simplify_bcsel_of_phi(nir_builder *b, nir_loop *loop)
626 {
627 bool progress = false;
628 nir_block *header_block = nir_loop_first_block(loop);
629 nir_block *const prev_block =
630 nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
631
632 /* It would be insane if this were not true */
633 assert(_mesa_set_search(header_block->predecessors, prev_block));
634
635 /* The loop must have exactly one continue block which could be a block
636 * ending in a continue instruction or the "natural" continue from the
637 * last block in the loop back to the top.
638 */
639 if (header_block->predecessors->entries != 2)
640 return false;
641
642 /* We can move any bcsel that can guaranteed to execut on every iteration
643 * of a loop. For now this is accomplished by only taking bcsels from the
644 * header_block. In the future, this could be expanced to include any
645 * bcsel that must come before any break.
646 *
647 * For more details, see
648 * https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/170#note_110305
649 */
650 nir_foreach_instr_safe(instr, header_block) {
651 if (!is_trivial_bcsel(instr, false))
652 continue;
653
654 nir_alu_instr *const bcsel = nir_instr_as_alu(instr);
655 nir_phi_instr *const cond_phi =
656 nir_instr_as_phi(bcsel->src[0].src.ssa->parent_instr);
657
658 bool entry_val = false, continue_val = false;
659 if (!phi_has_constant_from_outside_and_one_from_inside_loop(cond_phi,
660 prev_block,
661 &entry_val,
662 &continue_val))
663 continue;
664
665 /* If they both execute or both don't execute, this is a job for
666 * nir_dead_cf, not this pass.
667 */
668 if ((entry_val && continue_val) || (!entry_val && !continue_val))
669 continue;
670
671 const unsigned entry_src = entry_val ? 1 : 2;
672 const unsigned continue_src = entry_val ? 2 : 1;
673
674 /* Create a new phi node that selects the value for prev_block from
675 * the bcsel source that is selected by entry_val and the value for
676 * continue_block from the other bcsel source. Both sources have
677 * already been verified to be phi nodes.
678 */
679 nir_block *continue_block = find_continue_block(loop);
680 nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
681 nir_phi_instr_add_src(phi, prev_block,
682 nir_phi_get_src_from_block(nir_instr_as_phi(bcsel->src[entry_src].src.ssa->parent_instr),
683 prev_block)->src);
684
685 nir_phi_instr_add_src(phi, continue_block,
686 nir_phi_get_src_from_block(nir_instr_as_phi(bcsel->src[continue_src].src.ssa->parent_instr),
687 continue_block)->src);
688
689 nir_ssa_dest_init(&phi->instr,
690 &phi->dest,
691 nir_dest_num_components(bcsel->dest.dest),
692 nir_dest_bit_size(bcsel->dest.dest),
693 NULL);
694
695 b->cursor = nir_after_phis(header_block);
696 nir_builder_instr_insert(b, &phi->instr);
697
698 /* Modify all readers of the bcsel instruction to read the result of
699 * the phi.
700 */
701 nir_ssa_def_rewrite_uses(&bcsel->dest.dest.ssa,
702 &phi->dest.ssa);
703
704 /* Since the original bcsel instruction no longer has any readers,
705 * just remove it.
706 */
707 nir_instr_remove_v(&bcsel->instr);
708 nir_instr_free(&bcsel->instr);
709
710 progress = true;
711 }
712
713 return progress;
714 }
715
716 static bool
is_block_empty(nir_block * block)717 is_block_empty(nir_block *block)
718 {
719 return nir_cf_node_is_last(&block->cf_node) &&
720 exec_list_is_empty(&block->instr_list);
721 }
722
723 static bool
nir_block_ends_in_continue(nir_block * block)724 nir_block_ends_in_continue(nir_block *block)
725 {
726 if (exec_list_is_empty(&block->instr_list))
727 return false;
728
729 nir_instr *instr = nir_block_last_instr(block);
730 return instr->type == nir_instr_type_jump &&
731 nir_instr_as_jump(instr)->type == nir_jump_continue;
732 }
733
734 /**
735 * This optimization turns:
736 *
737 * loop {
738 * ...
739 * if (cond) {
740 * do_work_1();
741 * continue;
742 * } else {
743 * }
744 * do_work_2();
745 * }
746 *
747 * into:
748 *
749 * loop {
750 * ...
751 * if (cond) {
752 * do_work_1();
753 * continue;
754 * } else {
755 * do_work_2();
756 * }
757 * }
758 *
759 * The continue should then be removed by nir_opt_trivial_continues() and the
760 * loop can potentially be unrolled.
761 *
762 * Note: Unless the function param aggressive_last_continue==true do_work_2()
763 * is only ever blocks and nested loops. We avoid nesting other if-statments
764 * in the branch as this can result in increased register pressure, and in
765 * the i965 driver it causes a large amount of spilling in shader-db.
766 * For RADV however nesting these if-statements allows further continues to be
767 * remove and provides a significant FPS boost in Doom, which is why we have
768 * opted for this special bool to enable more aggresive optimisations.
769 * TODO: The GCM pass solves most of the spilling regressions in i965, if it
770 * is ever enabled we should consider removing the aggressive_last_continue
771 * param.
772 */
773 static bool
opt_if_loop_last_continue(nir_loop * loop,bool aggressive_last_continue)774 opt_if_loop_last_continue(nir_loop *loop, bool aggressive_last_continue)
775 {
776 nir_if *nif = NULL;
777 bool then_ends_in_continue = false;
778 bool else_ends_in_continue = false;
779
780 /* Scan the control flow of the loop from the last to the first node
781 * looking for an if-statement we can optimise.
782 */
783 nir_block *last_block = nir_loop_last_block(loop);
784 nir_cf_node *if_node = nir_cf_node_prev(&last_block->cf_node);
785 while (if_node) {
786 if (if_node->type == nir_cf_node_if) {
787 nif = nir_cf_node_as_if(if_node);
788 nir_block *then_block = nir_if_last_then_block(nif);
789 nir_block *else_block = nir_if_last_else_block(nif);
790
791 then_ends_in_continue = nir_block_ends_in_continue(then_block);
792 else_ends_in_continue = nir_block_ends_in_continue(else_block);
793
794 /* If both branches end in a jump do nothing, this should be handled
795 * by nir_opt_dead_cf().
796 */
797 if ((then_ends_in_continue || nir_block_ends_in_break(then_block)) &&
798 (else_ends_in_continue || nir_block_ends_in_break(else_block)))
799 return false;
800
801 /* If continue found stop scanning and attempt optimisation, or
802 */
803 if (then_ends_in_continue || else_ends_in_continue ||
804 !aggressive_last_continue)
805 break;
806 }
807
808 if_node = nir_cf_node_prev(if_node);
809 }
810
811 /* If we didn't find an if to optimise return */
812 if (!nif || (!then_ends_in_continue && !else_ends_in_continue))
813 return false;
814
815 /* If there is nothing after the if-statement we bail */
816 if (&nif->cf_node == nir_cf_node_prev(&last_block->cf_node) &&
817 exec_list_is_empty(&last_block->instr_list))
818 return false;
819
820 /* Move the last block of the loop inside the last if-statement */
821 nir_cf_list tmp;
822 nir_cf_extract(&tmp, nir_after_cf_node(if_node),
823 nir_after_block(last_block));
824 if (then_ends_in_continue)
825 nir_cf_reinsert(&tmp, nir_after_cf_list(&nif->else_list));
826 else
827 nir_cf_reinsert(&tmp, nir_after_cf_list(&nif->then_list));
828
829 /* In order to avoid running nir_lower_regs_to_ssa_impl() every time an if
830 * opt makes progress we leave nir_opt_trivial_continues() to remove the
831 * continue now that the end of the loop has been simplified.
832 */
833
834 return true;
835 }
836
837 /* Walk all the phis in the block immediately following the if statement and
838 * swap the blocks.
839 */
840 static void
rewrite_phi_predecessor_blocks(nir_if * nif,nir_block * old_then_block,nir_block * old_else_block,nir_block * new_then_block,nir_block * new_else_block)841 rewrite_phi_predecessor_blocks(nir_if *nif,
842 nir_block *old_then_block,
843 nir_block *old_else_block,
844 nir_block *new_then_block,
845 nir_block *new_else_block)
846 {
847 nir_block *after_if_block =
848 nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node));
849
850 nir_foreach_instr(instr, after_if_block) {
851 if (instr->type != nir_instr_type_phi)
852 continue;
853
854 nir_phi_instr *phi = nir_instr_as_phi(instr);
855
856 foreach_list_typed(nir_phi_src, src, node, &phi->srcs) {
857 if (src->pred == old_then_block) {
858 src->pred = new_then_block;
859 } else if (src->pred == old_else_block) {
860 src->pred = new_else_block;
861 }
862 }
863 }
864 }
865
866 /**
867 * This optimization turns:
868 *
869 * if (cond) {
870 * } else {
871 * do_work();
872 * }
873 *
874 * into:
875 *
876 * if (!cond) {
877 * do_work();
878 * } else {
879 * }
880 */
881 static bool
opt_if_simplification(nir_builder * b,nir_if * nif)882 opt_if_simplification(nir_builder *b, nir_if *nif)
883 {
884 /* Only simplify if the then block is empty and the else block is not. */
885 if (!is_block_empty(nir_if_first_then_block(nif)) ||
886 is_block_empty(nir_if_first_else_block(nif)))
887 return false;
888
889 /* Make sure the condition is a comparison operation. */
890 nir_instr *src_instr = nif->condition.ssa->parent_instr;
891 if (src_instr->type != nir_instr_type_alu)
892 return false;
893
894 nir_alu_instr *alu_instr = nir_instr_as_alu(src_instr);
895 if (!nir_alu_instr_is_comparison(alu_instr))
896 return false;
897
898 /* Insert the inverted instruction and rewrite the condition. */
899 b->cursor = nir_after_instr(&alu_instr->instr);
900
901 nir_ssa_def *new_condition =
902 nir_inot(b, &alu_instr->dest.dest.ssa);
903
904 nir_if_rewrite_condition(nif, nir_src_for_ssa(new_condition));
905
906 /* Grab pointers to the last then/else blocks for fixing up the phis. */
907 nir_block *then_block = nir_if_last_then_block(nif);
908 nir_block *else_block = nir_if_last_else_block(nif);
909
910 if (nir_block_ends_in_jump(else_block)) {
911 /* Even though this if statement has a jump on one side, we may still have
912 * phis afterwards. Single-source phis can be produced by loop unrolling
913 * or dead control-flow passes and are perfectly legal. Run a quick phi
914 * removal on the block after the if to clean up any such phis.
915 */
916 nir_block *const next_block =
917 nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node));
918 nir_opt_remove_phis_block(next_block);
919 }
920
921 rewrite_phi_predecessor_blocks(nif, then_block, else_block, else_block,
922 then_block);
923
924 /* Finally, move the else block to the then block. */
925 nir_cf_list tmp;
926 nir_cf_extract(&tmp, nir_before_cf_list(&nif->else_list),
927 nir_after_cf_list(&nif->else_list));
928 nir_cf_reinsert(&tmp, nir_before_cf_list(&nif->then_list));
929
930 return true;
931 }
932
933 /**
934 * This optimization simplifies potential loop terminators which then allows
935 * other passes such as opt_if_simplification() and loop unrolling to progress
936 * further:
937 *
938 * if (cond) {
939 * ... then block instructions ...
940 * } else {
941 * ...
942 * break;
943 * }
944 *
945 * into:
946 *
947 * if (cond) {
948 * } else {
949 * ...
950 * break;
951 * }
952 * ... then block instructions ...
953 */
954 static bool
opt_if_loop_terminator(nir_if * nif)955 opt_if_loop_terminator(nir_if *nif)
956 {
957 nir_block *break_blk = NULL;
958 nir_block *continue_from_blk = NULL;
959 bool continue_from_then = true;
960
961 nir_block *last_then = nir_if_last_then_block(nif);
962 nir_block *last_else = nir_if_last_else_block(nif);
963
964 if (nir_block_ends_in_break(last_then)) {
965 break_blk = last_then;
966 continue_from_blk = last_else;
967 continue_from_then = false;
968 } else if (nir_block_ends_in_break(last_else)) {
969 break_blk = last_else;
970 continue_from_blk = last_then;
971 }
972
973 /* Continue if the if-statement contained no jumps at all */
974 if (!break_blk)
975 return false;
976
977 /* If the continue from block is empty then return as there is nothing to
978 * move.
979 */
980 nir_block *first_continue_from_blk = continue_from_then ?
981 nir_if_first_then_block(nif) :
982 nir_if_first_else_block(nif);
983 if (is_block_empty(first_continue_from_blk))
984 return false;
985
986 if (nir_block_ends_in_jump(continue_from_blk))
987 return false;
988
989 /* Even though this if statement has a jump on one side, we may still have
990 * phis afterwards. Single-source phis can be produced by loop unrolling
991 * or dead control-flow passes and are perfectly legal. Run a quick phi
992 * removal on the block after the if to clean up any such phis.
993 */
994 nir_opt_remove_phis_block(nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node)));
995
996 /* Finally, move the continue from branch after the if-statement. */
997 nir_cf_list tmp;
998 nir_cf_extract(&tmp, nir_before_block(first_continue_from_blk),
999 nir_after_block(continue_from_blk));
1000 nir_cf_reinsert(&tmp, nir_after_cf_node(&nif->cf_node));
1001
1002 return true;
1003 }
1004
1005 static bool
evaluate_if_condition(nir_if * nif,nir_cursor cursor,bool * value)1006 evaluate_if_condition(nir_if *nif, nir_cursor cursor, bool *value)
1007 {
1008 nir_block *use_block = nir_cursor_current_block(cursor);
1009 if (nir_block_dominates(nir_if_first_then_block(nif), use_block)) {
1010 *value = true;
1011 return true;
1012 } else if (nir_block_dominates(nir_if_first_else_block(nif), use_block)) {
1013 *value = false;
1014 return true;
1015 } else {
1016 return false;
1017 }
1018 }
1019
1020 static nir_ssa_def *
clone_alu_and_replace_src_defs(nir_builder * b,const nir_alu_instr * alu,nir_ssa_def ** src_defs)1021 clone_alu_and_replace_src_defs(nir_builder *b, const nir_alu_instr *alu,
1022 nir_ssa_def **src_defs)
1023 {
1024 nir_alu_instr *nalu = nir_alu_instr_create(b->shader, alu->op);
1025 nalu->exact = alu->exact;
1026
1027 nir_ssa_dest_init(&nalu->instr, &nalu->dest.dest,
1028 alu->dest.dest.ssa.num_components,
1029 alu->dest.dest.ssa.bit_size, NULL);
1030
1031 nalu->dest.saturate = alu->dest.saturate;
1032 nalu->dest.write_mask = alu->dest.write_mask;
1033
1034 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
1035 assert(alu->src[i].src.is_ssa);
1036 nalu->src[i].src = nir_src_for_ssa(src_defs[i]);
1037 nalu->src[i].negate = alu->src[i].negate;
1038 nalu->src[i].abs = alu->src[i].abs;
1039 memcpy(nalu->src[i].swizzle, alu->src[i].swizzle,
1040 sizeof(nalu->src[i].swizzle));
1041 }
1042
1043 nir_builder_instr_insert(b, &nalu->instr);
1044
1045 return &nalu->dest.dest.ssa;;
1046 }
1047
1048 /*
1049 * This propagates if condition evaluation down the chain of some alu
1050 * instructions. For example by checking the use of some of the following alu
1051 * instruction we can eventually replace ssa_107 with NIR_TRUE.
1052 *
1053 * loop {
1054 * block block_1:
1055 * vec1 32 ssa_85 = load_const (0x00000002)
1056 * vec1 32 ssa_86 = ieq ssa_48, ssa_85
1057 * vec1 32 ssa_87 = load_const (0x00000001)
1058 * vec1 32 ssa_88 = ieq ssa_48, ssa_87
1059 * vec1 32 ssa_89 = ior ssa_86, ssa_88
1060 * vec1 32 ssa_90 = ieq ssa_48, ssa_0
1061 * vec1 32 ssa_91 = ior ssa_89, ssa_90
1062 * if ssa_86 {
1063 * block block_2:
1064 * ...
1065 * break
1066 * } else {
1067 * block block_3:
1068 * }
1069 * block block_4:
1070 * if ssa_88 {
1071 * block block_5:
1072 * ...
1073 * break
1074 * } else {
1075 * block block_6:
1076 * }
1077 * block block_7:
1078 * if ssa_90 {
1079 * block block_8:
1080 * ...
1081 * break
1082 * } else {
1083 * block block_9:
1084 * }
1085 * block block_10:
1086 * vec1 32 ssa_107 = inot ssa_91
1087 * if ssa_107 {
1088 * block block_11:
1089 * break
1090 * } else {
1091 * block block_12:
1092 * }
1093 * }
1094 */
1095 static bool
propagate_condition_eval(nir_builder * b,nir_if * nif,nir_src * use_src,nir_src * alu_use,nir_alu_instr * alu,bool is_if_condition)1096 propagate_condition_eval(nir_builder *b, nir_if *nif, nir_src *use_src,
1097 nir_src *alu_use, nir_alu_instr *alu,
1098 bool is_if_condition)
1099 {
1100 bool bool_value;
1101 b->cursor = nir_before_src(alu_use, is_if_condition);
1102 if (!evaluate_if_condition(nif, b->cursor, &bool_value))
1103 return false;
1104
1105 nir_ssa_def *def[NIR_MAX_VEC_COMPONENTS] = {0};
1106 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
1107 if (alu->src[i].src.ssa == use_src->ssa) {
1108 def[i] = nir_imm_bool(b, bool_value);
1109 } else {
1110 def[i] = alu->src[i].src.ssa;
1111 }
1112 }
1113
1114 nir_ssa_def *nalu = clone_alu_and_replace_src_defs(b, alu, def);
1115
1116 /* Rewrite use to use new alu instruction */
1117 nir_src new_src = nir_src_for_ssa(nalu);
1118
1119 if (is_if_condition)
1120 nir_if_rewrite_condition(alu_use->parent_if, new_src);
1121 else
1122 nir_instr_rewrite_src(alu_use->parent_instr, alu_use, new_src);
1123
1124 return true;
1125 }
1126
1127 static bool
can_propagate_through_alu(nir_src * src)1128 can_propagate_through_alu(nir_src *src)
1129 {
1130 if (src->parent_instr->type != nir_instr_type_alu)
1131 return false;
1132
1133 nir_alu_instr *alu = nir_instr_as_alu(src->parent_instr);
1134 switch (alu->op) {
1135 case nir_op_ior:
1136 case nir_op_iand:
1137 case nir_op_inot:
1138 case nir_op_b2i32:
1139 return true;
1140 case nir_op_bcsel:
1141 return src == &alu->src[0].src;
1142 default:
1143 return false;
1144 }
1145 }
1146
1147 static bool
evaluate_condition_use(nir_builder * b,nir_if * nif,nir_src * use_src,bool is_if_condition)1148 evaluate_condition_use(nir_builder *b, nir_if *nif, nir_src *use_src,
1149 bool is_if_condition)
1150 {
1151 bool progress = false;
1152
1153 b->cursor = nir_before_src(use_src, is_if_condition);
1154
1155 bool bool_value;
1156 if (evaluate_if_condition(nif, b->cursor, &bool_value)) {
1157 /* Rewrite use to use const */
1158 nir_src imm_src = nir_src_for_ssa(nir_imm_bool(b, bool_value));
1159 if (is_if_condition)
1160 nir_if_rewrite_condition(use_src->parent_if, imm_src);
1161 else
1162 nir_instr_rewrite_src(use_src->parent_instr, use_src, imm_src);
1163
1164 progress = true;
1165 }
1166
1167 if (!is_if_condition && can_propagate_through_alu(use_src)) {
1168 nir_alu_instr *alu = nir_instr_as_alu(use_src->parent_instr);
1169
1170 nir_foreach_use_safe(alu_use, &alu->dest.dest.ssa) {
1171 progress |= propagate_condition_eval(b, nif, use_src, alu_use, alu,
1172 false);
1173 }
1174
1175 nir_foreach_if_use_safe(alu_use, &alu->dest.dest.ssa) {
1176 progress |= propagate_condition_eval(b, nif, use_src, alu_use, alu,
1177 true);
1178 }
1179 }
1180
1181 return progress;
1182 }
1183
1184 static bool
opt_if_evaluate_condition_use(nir_builder * b,nir_if * nif)1185 opt_if_evaluate_condition_use(nir_builder *b, nir_if *nif)
1186 {
1187 bool progress = false;
1188
1189 /* Evaluate any uses of the if condition inside the if branches */
1190 assert(nif->condition.is_ssa);
1191 nir_foreach_use_safe(use_src, nif->condition.ssa) {
1192 progress |= evaluate_condition_use(b, nif, use_src, false);
1193 }
1194
1195 nir_foreach_if_use_safe(use_src, nif->condition.ssa) {
1196 if (use_src->parent_if != nif)
1197 progress |= evaluate_condition_use(b, nif, use_src, true);
1198 }
1199
1200 return progress;
1201 }
1202
1203 static bool
rewrite_comp_uses_within_if(nir_builder * b,nir_if * nif,bool invert,nir_ssa_scalar scalar,nir_ssa_scalar new_scalar)1204 rewrite_comp_uses_within_if(nir_builder *b, nir_if *nif, bool invert,
1205 nir_ssa_scalar scalar, nir_ssa_scalar new_scalar)
1206 {
1207 bool progress = false;
1208
1209 nir_block *first = invert ? nir_if_first_else_block(nif) : nir_if_first_then_block(nif);
1210 nir_block *last = invert ? nir_if_last_else_block(nif) : nir_if_last_then_block(nif);
1211
1212 nir_ssa_def *new_ssa = NULL;
1213 nir_foreach_use_safe(use, scalar.def) {
1214 if (use->parent_instr->block->index < first->index ||
1215 use->parent_instr->block->index > last->index)
1216 continue;
1217
1218 /* Only rewrite users which use only the new component. This is to avoid a
1219 * situation where copy propagation will undo the rewrite and we risk an infinite
1220 * loop.
1221 *
1222 * We could rewrite users which use a mix of the old and new components, but if
1223 * nir_src_components_read() is incomplete, then we risk the new component actually being
1224 * unused and some optimization later undoing the rewrite.
1225 */
1226 if (nir_src_components_read(use) != BITFIELD64_BIT(scalar.comp))
1227 continue;
1228
1229 if (!new_ssa) {
1230 b->cursor = nir_before_cf_node(&nif->cf_node);
1231 new_ssa = nir_channel(b, new_scalar.def, new_scalar.comp);
1232 if (scalar.def->num_components > 1) {
1233 nir_ssa_def *vec = nir_ssa_undef(b, scalar.def->num_components, scalar.def->bit_size);
1234 new_ssa = nir_vector_insert_imm(b, vec, new_ssa, scalar.comp);
1235 }
1236 }
1237
1238 nir_instr_rewrite_src_ssa(use->parent_instr, use, new_ssa);
1239 progress = true;
1240 }
1241
1242 return progress;
1243 }
1244
1245 /*
1246 * This optimization turns:
1247 *
1248 * if (a == (b=readfirstlane(a)))
1249 * use(a)
1250 * if (c == (d=load_const))
1251 * use(c)
1252 *
1253 * into:
1254 *
1255 * if (a == (b=readfirstlane(a)))
1256 * use(b)
1257 * if (c == (d=load_const))
1258 * use(d)
1259 */
1260 static bool
opt_if_rewrite_uniform_uses(nir_builder * b,nir_if * nif,nir_ssa_scalar cond,bool accept_ine)1261 opt_if_rewrite_uniform_uses(nir_builder *b, nir_if *nif, nir_ssa_scalar cond, bool accept_ine)
1262 {
1263 bool progress = false;
1264
1265 if (!nir_ssa_scalar_is_alu(cond))
1266 return false;
1267
1268 nir_op op = nir_ssa_scalar_alu_op(cond);
1269 if (op == nir_op_iand) {
1270 progress |= opt_if_rewrite_uniform_uses(b, nif, nir_ssa_scalar_chase_alu_src(cond, 0), false);
1271 progress |= opt_if_rewrite_uniform_uses(b, nif, nir_ssa_scalar_chase_alu_src(cond, 1), false);
1272 return progress;
1273 }
1274
1275 if (op != nir_op_ieq && (op != nir_op_ine || !accept_ine))
1276 return false;
1277
1278 for (unsigned i = 0; i < 2; i++) {
1279 nir_ssa_scalar src_uni = nir_ssa_scalar_chase_alu_src(cond, i);
1280 nir_ssa_scalar src_div = nir_ssa_scalar_chase_alu_src(cond, !i);
1281
1282 if (src_uni.def->parent_instr->type == nir_instr_type_load_const && src_div.def != src_uni.def)
1283 return rewrite_comp_uses_within_if(b, nif, op == nir_op_ine, src_div, src_uni);
1284
1285 if (src_uni.def->parent_instr->type != nir_instr_type_intrinsic)
1286 continue;
1287 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(src_uni.def->parent_instr);
1288 if (intrin->intrinsic != nir_intrinsic_read_first_invocation &&
1289 (intrin->intrinsic != nir_intrinsic_reduce || nir_intrinsic_cluster_size(intrin)))
1290 continue;
1291
1292 nir_ssa_scalar intrin_src = {intrin->src[0].ssa, src_uni.comp};
1293 nir_ssa_scalar resolved_intrin_src = nir_ssa_scalar_resolved(intrin_src.def, intrin_src.comp);
1294
1295 if (resolved_intrin_src.comp != src_div.comp || resolved_intrin_src.def != src_div.def)
1296 continue;
1297
1298 progress |= rewrite_comp_uses_within_if(b, nif, op == nir_op_ine, resolved_intrin_src, src_uni);
1299 if (intrin_src.comp != resolved_intrin_src.comp || intrin_src.def != resolved_intrin_src.def)
1300 progress |= rewrite_comp_uses_within_if(b, nif, op == nir_op_ine, intrin_src, src_uni);
1301
1302 return progress;
1303 }
1304
1305 return false;
1306 }
1307
1308 static void
simple_merge_if(nir_if * dest_if,nir_if * src_if,bool dest_if_then,bool src_if_then)1309 simple_merge_if(nir_if *dest_if, nir_if *src_if, bool dest_if_then,
1310 bool src_if_then)
1311 {
1312 /* Now merge the if branch */
1313 nir_block *dest_blk = dest_if_then ? nir_if_last_then_block(dest_if)
1314 : nir_if_last_else_block(dest_if);
1315
1316 struct exec_list *list = src_if_then ? &src_if->then_list
1317 : &src_if->else_list;
1318
1319 nir_cf_list if_cf_list;
1320 nir_cf_extract(&if_cf_list, nir_before_cf_list(list),
1321 nir_after_cf_list(list));
1322 nir_cf_reinsert(&if_cf_list, nir_after_block(dest_blk));
1323 }
1324
1325 static bool
opt_if_merge(nir_if * nif)1326 opt_if_merge(nir_if *nif)
1327 {
1328 bool progress = false;
1329
1330 nir_block *next_blk = nir_cf_node_cf_tree_next(&nif->cf_node);
1331 if (!next_blk || !nif->condition.is_ssa)
1332 return false;
1333
1334 nir_if *next_if = nir_block_get_following_if(next_blk);
1335 if (!next_if || !next_if->condition.is_ssa)
1336 return false;
1337
1338 /* Here we merge two consecutive ifs that have the same condition e.g:
1339 *
1340 * if ssa_12 {
1341 * ...
1342 * } else {
1343 * ...
1344 * }
1345 * if ssa_12 {
1346 * ...
1347 * } else {
1348 * ...
1349 * }
1350 *
1351 * Note: This only merges if-statements when the block between them is
1352 * empty. The reason we don't try to merge ifs that just have phis between
1353 * them is because this can result in increased register pressure. For
1354 * example when merging if ladders created by indirect indexing.
1355 */
1356 if (nif->condition.ssa == next_if->condition.ssa &&
1357 exec_list_is_empty(&next_blk->instr_list)) {
1358
1359 /* This optimization isn't made to work in this case and
1360 * opt_if_evaluate_condition_use will optimize it later.
1361 */
1362 if (nir_block_ends_in_jump(nir_if_last_then_block(nif)) ||
1363 nir_block_ends_in_jump(nir_if_last_else_block(nif)))
1364 return false;
1365
1366 simple_merge_if(nif, next_if, true, true);
1367 simple_merge_if(nif, next_if, false, false);
1368
1369 nir_block *new_then_block = nir_if_last_then_block(nif);
1370 nir_block *new_else_block = nir_if_last_else_block(nif);
1371
1372 nir_block *old_then_block = nir_if_last_then_block(next_if);
1373 nir_block *old_else_block = nir_if_last_else_block(next_if);
1374
1375 /* Rewrite the predecessor block for any phis following the second
1376 * if-statement.
1377 */
1378 rewrite_phi_predecessor_blocks(next_if, old_then_block,
1379 old_else_block,
1380 new_then_block,
1381 new_else_block);
1382
1383 /* Move phis after merged if to avoid them being deleted when we remove
1384 * the merged if-statement.
1385 */
1386 nir_block *after_next_if_block =
1387 nir_cf_node_as_block(nir_cf_node_next(&next_if->cf_node));
1388
1389 nir_foreach_instr_safe(instr, after_next_if_block) {
1390 if (instr->type != nir_instr_type_phi)
1391 break;
1392
1393 exec_node_remove(&instr->node);
1394 exec_list_push_tail(&next_blk->instr_list, &instr->node);
1395 instr->block = next_blk;
1396 }
1397
1398 nir_cf_node_remove(&next_if->cf_node);
1399
1400 progress = true;
1401 }
1402
1403 return progress;
1404 }
1405
1406 static bool
opt_if_cf_list(nir_builder * b,struct exec_list * cf_list,bool aggressive_last_continue)1407 opt_if_cf_list(nir_builder *b, struct exec_list *cf_list,
1408 bool aggressive_last_continue)
1409 {
1410 bool progress = false;
1411 foreach_list_typed(nir_cf_node, cf_node, node, cf_list) {
1412 switch (cf_node->type) {
1413 case nir_cf_node_block:
1414 break;
1415
1416 case nir_cf_node_if: {
1417 nir_if *nif = nir_cf_node_as_if(cf_node);
1418 progress |= opt_if_cf_list(b, &nif->then_list,
1419 aggressive_last_continue);
1420 progress |= opt_if_cf_list(b, &nif->else_list,
1421 aggressive_last_continue);
1422 progress |= opt_if_loop_terminator(nif);
1423 progress |= opt_if_merge(nif);
1424 progress |= opt_if_simplification(b, nif);
1425 break;
1426 }
1427
1428 case nir_cf_node_loop: {
1429 nir_loop *loop = nir_cf_node_as_loop(cf_node);
1430 progress |= opt_if_cf_list(b, &loop->body,
1431 aggressive_last_continue);
1432 progress |= opt_simplify_bcsel_of_phi(b, loop);
1433 progress |= opt_if_loop_last_continue(loop,
1434 aggressive_last_continue);
1435 break;
1436 }
1437
1438 case nir_cf_node_function:
1439 unreachable("Invalid cf type");
1440 }
1441 }
1442
1443 return progress;
1444 }
1445
1446 static bool
opt_peel_loop_initial_if_cf_list(struct exec_list * cf_list)1447 opt_peel_loop_initial_if_cf_list(struct exec_list *cf_list)
1448 {
1449 bool progress = false;
1450 foreach_list_typed(nir_cf_node, cf_node, node, cf_list) {
1451 switch (cf_node->type) {
1452 case nir_cf_node_block:
1453 break;
1454
1455 case nir_cf_node_if: {
1456 nir_if *nif = nir_cf_node_as_if(cf_node);
1457 progress |= opt_peel_loop_initial_if_cf_list(&nif->then_list);
1458 progress |= opt_peel_loop_initial_if_cf_list(&nif->else_list);
1459 break;
1460 }
1461
1462 case nir_cf_node_loop: {
1463 nir_loop *loop = nir_cf_node_as_loop(cf_node);
1464 progress |= opt_peel_loop_initial_if_cf_list(&loop->body);
1465 progress |= opt_peel_loop_initial_if(loop);
1466 break;
1467 }
1468
1469 case nir_cf_node_function:
1470 unreachable("Invalid cf type");
1471 }
1472 }
1473
1474 return progress;
1475 }
1476
1477 /**
1478 * These optimisations depend on nir_metadata_block_index and therefore must
1479 * not do anything to cause the metadata to become invalid.
1480 */
1481 static bool
opt_if_safe_cf_list(nir_builder * b,struct exec_list * cf_list)1482 opt_if_safe_cf_list(nir_builder *b, struct exec_list *cf_list)
1483 {
1484 bool progress = false;
1485 foreach_list_typed(nir_cf_node, cf_node, node, cf_list) {
1486 switch (cf_node->type) {
1487 case nir_cf_node_block:
1488 break;
1489
1490 case nir_cf_node_if: {
1491 nir_if *nif = nir_cf_node_as_if(cf_node);
1492 progress |= opt_if_safe_cf_list(b, &nif->then_list);
1493 progress |= opt_if_safe_cf_list(b, &nif->else_list);
1494 progress |= opt_if_evaluate_condition_use(b, nif);
1495 nir_ssa_scalar cond = nir_ssa_scalar_resolved(nif->condition.ssa, 0);
1496 progress |= opt_if_rewrite_uniform_uses(b, nif, cond, true);
1497 break;
1498 }
1499
1500 case nir_cf_node_loop: {
1501 nir_loop *loop = nir_cf_node_as_loop(cf_node);
1502 progress |= opt_if_safe_cf_list(b, &loop->body);
1503 progress |= opt_split_alu_of_phi(b, loop);
1504 break;
1505 }
1506
1507 case nir_cf_node_function:
1508 unreachable("Invalid cf type");
1509 }
1510 }
1511
1512 return progress;
1513 }
1514
1515 bool
nir_opt_if(nir_shader * shader,bool aggressive_last_continue)1516 nir_opt_if(nir_shader *shader, bool aggressive_last_continue)
1517 {
1518 bool progress = false;
1519
1520 nir_foreach_function(function, shader) {
1521 if (function->impl == NULL)
1522 continue;
1523
1524 nir_builder b;
1525 nir_builder_init(&b, function->impl);
1526
1527 nir_metadata_require(function->impl, nir_metadata_block_index |
1528 nir_metadata_dominance);
1529 progress = opt_if_safe_cf_list(&b, &function->impl->body);
1530 nir_metadata_preserve(function->impl, nir_metadata_block_index |
1531 nir_metadata_dominance);
1532
1533 bool preserve = true;
1534
1535 if (opt_if_cf_list(&b, &function->impl->body, aggressive_last_continue)) {
1536 preserve = false;
1537 progress = true;
1538 }
1539
1540 if (opt_peel_loop_initial_if_cf_list(&function->impl->body)) {
1541 preserve = false;
1542 progress = true;
1543
1544 /* If that made progress, we're no longer really in SSA form. We
1545 * need to convert registers back into SSA defs and clean up SSA defs
1546 * that don't dominate their uses.
1547 */
1548 nir_lower_regs_to_ssa_impl(function->impl);
1549 }
1550
1551 if (preserve) {
1552 nir_metadata_preserve(function->impl, nir_metadata_none);
1553 } else {
1554 nir_metadata_preserve(function->impl, nir_metadata_all);
1555 }
1556 }
1557
1558 return progress;
1559 }
1560