1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/dag.h"
28 #include "util/u_math.h"
29
30 #include "ir3.h"
31 #include "ir3_compiler.h"
32
33 #ifdef DEBUG
34 #define SCHED_DEBUG (ir3_shader_debug & IR3_DBG_SCHEDMSGS)
35 #else
36 #define SCHED_DEBUG 0
37 #endif
38 #define d(fmt, ...) \
39 do { \
40 if (SCHED_DEBUG) { \
41 mesa_logi("SCHED: " fmt, ##__VA_ARGS__); \
42 } \
43 } while (0)
44
45 #define di(instr, fmt, ...) \
46 do { \
47 if (SCHED_DEBUG) { \
48 struct log_stream *stream = mesa_log_streami(); \
49 mesa_log_stream_printf(stream, "SCHED: " fmt ": ", ##__VA_ARGS__); \
50 ir3_print_instr_stream(stream, instr); \
51 mesa_log_stream_destroy(stream); \
52 } \
53 } while (0)
54
55 /*
56 * Instruction Scheduling:
57 *
58 * A block-level pre-RA scheduler, which works by creating a DAG of
59 * instruction dependencies, and heuristically picking a DAG head
60 * (instruction with no unscheduled dependencies).
61 *
62 * Where possible, it tries to pick instructions that avoid nop delay
63 * slots, but it will prefer to pick instructions that reduce (or do
64 * not increase) the number of live values.
65 *
66 * If the only possible choices are instructions that increase the
67 * number of live values, it will try to pick the one with the earliest
68 * consumer (based on pre-sched program order).
69 *
70 * There are a few special cases that need to be handled, since sched
71 * is currently independent of register allocation. Usages of address
72 * register (a0.x) or predicate register (p0.x) must be serialized. Ie.
73 * if you have two pairs of instructions that write the same special
74 * register and then read it, then those pairs cannot be interleaved.
75 * To solve this, when we are in such a scheduling "critical section",
76 * and we encounter a conflicting write to a special register, we try
77 * to schedule any remaining instructions that use that value first.
78 *
79 * TODO we can detect too-large live_values here.. would be a good place
80 * to "spill" cheap things, like move from uniform/immed. (Constructing
81 * list of ssa def consumers before sched pass would make this easier.
82 * Also, in general it is general it might be best not to re-use load_immed
83 * across blocks.
84 *
85 * TODO we can use (abs)/(neg) src modifiers in a lot of cases to reduce
86 * the # of immediates in play (or at least that would help with
87 * dEQP-GLES31.functional.ubo.random.all_per_block_buffers.*).. probably
88 * do this in a nir pass that inserts fneg/etc? The cp pass should fold
89 * these into src modifiers..
90 */
91
92 struct ir3_sched_ctx {
93 struct ir3_block *block; /* the current block */
94 struct dag *dag;
95
96 struct list_head unscheduled_list; /* unscheduled instructions */
97 struct ir3_instruction *scheduled; /* last scheduled instr */
98 struct ir3_instruction *addr0; /* current a0.x user, if any */
99 struct ir3_instruction *addr1; /* current a1.x user, if any */
100 struct ir3_instruction *pred; /* current p0.x user, if any */
101
102 struct ir3_instruction *split; /* most-recently-split a0/a1/p0 producer */
103
104 int remaining_kills;
105 int remaining_tex;
106
107 bool error;
108
109 unsigned ip;
110
111 int sy_delay;
112 int ss_delay;
113
114 /* We order the scheduled (sy)/(ss) producers, and keep track of the
115 * index of the last waited on instruction, so we can know which
116 * instructions are still outstanding (and therefore would require us to
117 * wait for all outstanding instructions before scheduling a use).
118 */
119 int sy_index, first_outstanding_sy_index;
120 int ss_index, first_outstanding_ss_index;
121 };
122
123 struct ir3_sched_node {
124 struct dag_node dag; /* must be first for util_dynarray_foreach */
125 struct ir3_instruction *instr;
126
127 unsigned delay;
128 unsigned max_delay;
129
130 unsigned sy_index;
131 unsigned ss_index;
132
133 /* For ready instructions, the earliest possible ip that it could be
134 * scheduled.
135 */
136 unsigned earliest_ip;
137
138 /* For instructions that are a meta:collect src, once we schedule
139 * the first src of the collect, the entire vecN is live (at least
140 * from the PoV of the first RA pass.. the 2nd scalar pass can fill
141 * in some of the gaps, but often not all). So we want to help out
142 * RA, and realize that as soon as we schedule the first collect
143 * src, there is no penalty to schedule the remainder (ie. they
144 * don't make additional values live). In fact we'd prefer to
145 * schedule the rest ASAP to minimize the live range of the vecN.
146 *
147 * For instructions that are the src of a collect, we track the
148 * corresponding collect, and mark them as partially live as soon
149 * as any one of the src's is scheduled.
150 */
151 struct ir3_instruction *collect;
152 bool partially_live;
153
154 /* Is this instruction a direct or indirect dependency for a kill?
155 * If so, we should prioritize it when possible
156 */
157 bool kill_path;
158
159 /* This node represents a shader output. A semi-common pattern in
160 * shaders is something along the lines of:
161 *
162 * fragcolor.w = 1.0
163 *
164 * Which we'd prefer to schedule as late as possible, since it
165 * produces a live value that is never killed/consumed. So detect
166 * outputs up-front, and avoid scheduling them unless the reduce
167 * register pressure (or at least are neutral)
168 */
169 bool output;
170 };
171
172 #define foreach_sched_node(__n, __list) \
173 list_for_each_entry (struct ir3_sched_node, __n, __list, dag.link)
174
175 static void sched_node_init(struct ir3_sched_ctx *ctx,
176 struct ir3_instruction *instr);
177 static void sched_node_add_dep(struct ir3_instruction *instr,
178 struct ir3_instruction *src, int i);
179
180 static bool
is_scheduled(struct ir3_instruction * instr)181 is_scheduled(struct ir3_instruction *instr)
182 {
183 return !!(instr->flags & IR3_INSTR_MARK);
184 }
185
186 /* check_src_cond() passing a ir3_sched_ctx. */
187 static bool
sched_check_src_cond(struct ir3_instruction * instr,bool (* cond)(struct ir3_instruction *,struct ir3_sched_ctx *),struct ir3_sched_ctx * ctx)188 sched_check_src_cond(struct ir3_instruction *instr,
189 bool (*cond)(struct ir3_instruction *,
190 struct ir3_sched_ctx *),
191 struct ir3_sched_ctx *ctx)
192 {
193 foreach_ssa_src (src, instr) {
194 /* meta:split/collect aren't real instructions, the thing that
195 * we actually care about is *their* srcs
196 */
197 if ((src->opc == OPC_META_SPLIT) || (src->opc == OPC_META_COLLECT)) {
198 if (sched_check_src_cond(src, cond, ctx))
199 return true;
200 } else {
201 if (cond(src, ctx))
202 return true;
203 }
204 }
205
206 return false;
207 }
208
209 /* Is this a sy producer that hasn't been waited on yet? */
210
211 static bool
is_outstanding_sy(struct ir3_instruction * instr,struct ir3_sched_ctx * ctx)212 is_outstanding_sy(struct ir3_instruction *instr, struct ir3_sched_ctx *ctx)
213 {
214 if (!is_sy_producer(instr))
215 return false;
216
217 /* The sched node is only valid within the same block, we cannot
218 * really say anything about src's from other blocks
219 */
220 if (instr->block != ctx->block)
221 return true;
222
223 struct ir3_sched_node *n = instr->data;
224 return n->sy_index >= ctx->first_outstanding_sy_index;
225 }
226
227 static bool
is_outstanding_ss(struct ir3_instruction * instr,struct ir3_sched_ctx * ctx)228 is_outstanding_ss(struct ir3_instruction *instr, struct ir3_sched_ctx *ctx)
229 {
230 if (!is_ss_producer(instr))
231 return false;
232
233 /* The sched node is only valid within the same block, we cannot
234 * really say anything about src's from other blocks
235 */
236 if (instr->block != ctx->block)
237 return true;
238
239 struct ir3_sched_node *n = instr->data;
240 return n->ss_index >= ctx->first_outstanding_ss_index;
241 }
242
243 static unsigned
cycle_count(struct ir3_instruction * instr)244 cycle_count(struct ir3_instruction *instr)
245 {
246 if (instr->opc == OPC_META_COLLECT) {
247 /* Assume that only immed/const sources produce moves */
248 unsigned n = 0;
249 foreach_src (src, instr) {
250 if (src->flags & (IR3_REG_IMMED | IR3_REG_CONST))
251 n++;
252 }
253 return n;
254 } else if (is_meta(instr)) {
255 return 0;
256 } else {
257 return 1;
258 }
259 }
260
261 static void
schedule(struct ir3_sched_ctx * ctx,struct ir3_instruction * instr)262 schedule(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
263 {
264 debug_assert(ctx->block == instr->block);
265
266 /* remove from depth list:
267 */
268 list_delinit(&instr->node);
269
270 if (writes_addr0(instr)) {
271 debug_assert(ctx->addr0 == NULL);
272 ctx->addr0 = instr;
273 }
274
275 if (writes_addr1(instr)) {
276 debug_assert(ctx->addr1 == NULL);
277 ctx->addr1 = instr;
278 }
279
280 if (writes_pred(instr)) {
281 debug_assert(ctx->pred == NULL);
282 ctx->pred = instr;
283 }
284
285 instr->flags |= IR3_INSTR_MARK;
286
287 di(instr, "schedule");
288
289 list_addtail(&instr->node, &instr->block->instr_list);
290 ctx->scheduled = instr;
291
292 if (is_kill_or_demote(instr)) {
293 assert(ctx->remaining_kills > 0);
294 ctx->remaining_kills--;
295 }
296
297 struct ir3_sched_node *n = instr->data;
298
299 /* If this instruction is a meta:collect src, mark the remaining
300 * collect srcs as partially live.
301 */
302 if (n->collect) {
303 foreach_ssa_src (src, n->collect) {
304 if (src->block != instr->block)
305 continue;
306 struct ir3_sched_node *sn = src->data;
307 sn->partially_live = true;
308 }
309 }
310
311 bool counts_for_delay = is_alu(instr) || is_flow(instr);
312
313 /* TODO: switch to "cycles". For now try to match ir3_delay. */
314 unsigned delay_cycles = counts_for_delay ? 1 + instr->repeat : 0;
315
316 /* We insert any nop's needed to get to earliest_ip, then advance
317 * delay_cycles by scheduling the instruction.
318 */
319 ctx->ip = MAX2(ctx->ip, n->earliest_ip) + delay_cycles;
320
321 util_dynarray_foreach (&n->dag.edges, struct dag_edge, edge) {
322 unsigned delay = (unsigned)(uintptr_t)edge->data;
323 struct ir3_sched_node *child =
324 container_of(edge->child, struct ir3_sched_node, dag);
325 child->earliest_ip = MAX2(child->earliest_ip, ctx->ip + delay);
326 }
327
328 dag_prune_head(ctx->dag, &n->dag);
329
330 unsigned cycles = cycle_count(instr);
331
332 if (is_ss_producer(instr)) {
333 ctx->ss_delay = soft_ss_delay(instr);
334 n->ss_index = ctx->ss_index++;
335 } else if (!is_meta(instr) &&
336 sched_check_src_cond(instr, is_outstanding_ss, ctx)) {
337 ctx->ss_delay = 0;
338 ctx->first_outstanding_ss_index = ctx->ss_index;
339 } else if (ctx->ss_delay > 0) {
340 ctx->ss_delay -= MIN2(cycles, ctx->ss_delay);
341 }
342
343 if (is_sy_producer(instr)) {
344 /* NOTE that this isn't an attempt to hide texture fetch latency,
345 * but an attempt to hide the cost of switching to another warp.
346 * If we can, we'd like to try to schedule another texture fetch
347 * before scheduling something that would sync.
348 */
349 ctx->sy_delay = soft_sy_delay(instr, ctx->block->shader);
350 assert(ctx->remaining_tex > 0);
351 ctx->remaining_tex--;
352 n->sy_index = ctx->sy_index++;
353 } else if (!is_meta(instr) &&
354 sched_check_src_cond(instr, is_outstanding_sy, ctx)) {
355 ctx->sy_delay = 0;
356 ctx->first_outstanding_sy_index = ctx->sy_index;
357 } else if (ctx->sy_delay > 0) {
358 ctx->sy_delay -= MIN2(cycles, ctx->sy_delay);
359 }
360
361 }
362
363 struct ir3_sched_notes {
364 /* there is at least one kill which could be scheduled, except
365 * for unscheduled bary.f's:
366 */
367 bool blocked_kill;
368 /* there is at least one instruction that could be scheduled,
369 * except for conflicting address/predicate register usage:
370 */
371 bool addr0_conflict, addr1_conflict, pred_conflict;
372 };
373
374 /* could an instruction be scheduled if specified ssa src was scheduled? */
375 static bool
could_sched(struct ir3_instruction * instr,struct ir3_instruction * src)376 could_sched(struct ir3_instruction *instr, struct ir3_instruction *src)
377 {
378 foreach_ssa_src (other_src, instr) {
379 /* if dependency not scheduled, we aren't ready yet: */
380 if ((src != other_src) && !is_scheduled(other_src)) {
381 return false;
382 }
383 }
384 return true;
385 }
386
387 /* Check if instruction is ok to schedule. Make sure it is not blocked
388 * by use of addr/predicate register, etc.
389 */
390 static bool
check_instr(struct ir3_sched_ctx * ctx,struct ir3_sched_notes * notes,struct ir3_instruction * instr)391 check_instr(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes,
392 struct ir3_instruction *instr)
393 {
394 debug_assert(!is_scheduled(instr));
395
396 if (instr == ctx->split) {
397 /* Don't schedule instructions created by splitting a a0.x/a1.x/p0.x
398 * write until another "normal" instruction has been scheduled.
399 */
400 return false;
401 }
402
403 if (ctx->remaining_kills && (is_tex(instr) || is_mem(instr))) {
404 /* avoid texture/memory access if we have unscheduled kills
405 * that could make the expensive operation unnecessary. By
406 * definition, if there are remaining kills, and this instr
407 * is not a dependency of a kill, there are other instructions
408 * that we can choose from.
409 */
410 struct ir3_sched_node *n = instr->data;
411 if (!n->kill_path)
412 return false;
413 }
414
415 /* For instructions that write address register we need to
416 * make sure there is at least one instruction that uses the
417 * addr value which is otherwise ready.
418 *
419 * NOTE if any instructions use pred register and have other
420 * src args, we would need to do the same for writes_pred()..
421 */
422 if (writes_addr0(instr)) {
423 struct ir3 *ir = instr->block->shader;
424 bool ready = false;
425 for (unsigned i = 0; (i < ir->a0_users_count) && !ready; i++) {
426 struct ir3_instruction *indirect = ir->a0_users[i];
427 if (!indirect)
428 continue;
429 if (indirect->address->def != instr->dsts[0])
430 continue;
431 ready = could_sched(indirect, instr);
432 }
433
434 /* nothing could be scheduled, so keep looking: */
435 if (!ready)
436 return false;
437 }
438
439 if (writes_addr1(instr)) {
440 struct ir3 *ir = instr->block->shader;
441 bool ready = false;
442 for (unsigned i = 0; (i < ir->a1_users_count) && !ready; i++) {
443 struct ir3_instruction *indirect = ir->a1_users[i];
444 if (!indirect)
445 continue;
446 if (indirect->address->def != instr->dsts[0])
447 continue;
448 ready = could_sched(indirect, instr);
449 }
450
451 /* nothing could be scheduled, so keep looking: */
452 if (!ready)
453 return false;
454 }
455
456 /* if this is a write to address/predicate register, and that
457 * register is currently in use, we need to defer until it is
458 * free:
459 */
460 if (writes_addr0(instr) && ctx->addr0) {
461 debug_assert(ctx->addr0 != instr);
462 notes->addr0_conflict = true;
463 return false;
464 }
465
466 if (writes_addr1(instr) && ctx->addr1) {
467 debug_assert(ctx->addr1 != instr);
468 notes->addr1_conflict = true;
469 return false;
470 }
471
472 if (writes_pred(instr) && ctx->pred) {
473 debug_assert(ctx->pred != instr);
474 notes->pred_conflict = true;
475 return false;
476 }
477
478 /* if the instruction is a kill, we need to ensure *every*
479 * bary.f is scheduled. The hw seems unhappy if the thread
480 * gets killed before the end-input (ei) flag is hit.
481 *
482 * We could do this by adding each bary.f instruction as
483 * virtual ssa src for the kill instruction. But we have
484 * fixed length instr->srcs[].
485 *
486 * TODO we could handle this by false-deps now, probably.
487 */
488 if (is_kill_or_demote(instr)) {
489 struct ir3 *ir = instr->block->shader;
490
491 for (unsigned i = 0; i < ir->baryfs_count; i++) {
492 struct ir3_instruction *baryf = ir->baryfs[i];
493 if (baryf->flags & IR3_INSTR_UNUSED)
494 continue;
495 if (!is_scheduled(baryf)) {
496 notes->blocked_kill = true;
497 return false;
498 }
499 }
500 }
501
502 return true;
503 }
504
505 /* Find the instr->ip of the closest use of an instruction, in
506 * pre-sched order. This isn't going to be the same as post-sched
507 * order, but it is a reasonable approximation to limit scheduling
508 * instructions *too* early. This is mostly to prevent bad behavior
509 * in cases where we have a large number of possible instructions
510 * to choose, to avoid creating too much parallelism (ie. blowing
511 * up register pressure)
512 *
513 * See
514 * dEQP-GLES31.functional.atomic_counter.layout.reverse_offset.inc_dec.8_counters_5_calls_1_thread
515 */
516 static int
nearest_use(struct ir3_instruction * instr)517 nearest_use(struct ir3_instruction *instr)
518 {
519 unsigned nearest = ~0;
520 foreach_ssa_use (use, instr)
521 if (!is_scheduled(use))
522 nearest = MIN2(nearest, use->ip);
523
524 /* slight hack.. this heuristic tends to push bary.f's to later
525 * in the shader, closer to their uses. But we actually would
526 * prefer to get these scheduled earlier, to unlock varying
527 * storage for more VS jobs:
528 */
529 if (is_input(instr))
530 nearest /= 2;
531
532 return nearest;
533 }
534
535 static bool
is_only_nonscheduled_use(struct ir3_instruction * instr,struct ir3_instruction * use)536 is_only_nonscheduled_use(struct ir3_instruction *instr,
537 struct ir3_instruction *use)
538 {
539 foreach_ssa_use (other_use, instr) {
540 if (other_use != use && !is_scheduled(other_use))
541 return false;
542 }
543
544 return true;
545 }
546
547 static unsigned
new_regs(struct ir3_instruction * instr)548 new_regs(struct ir3_instruction *instr)
549 {
550 unsigned regs = 0;
551
552 foreach_dst (dst, instr) {
553 if (!is_dest_gpr(dst))
554 continue;
555 regs += reg_elems(dst);
556 }
557
558 return regs;
559 }
560
561 /* find net change to live values if instruction were scheduled: */
562 static int
live_effect(struct ir3_instruction * instr)563 live_effect(struct ir3_instruction *instr)
564 {
565 struct ir3_sched_node *n = instr->data;
566 int new_live =
567 (n->partially_live || !instr->uses || instr->uses->entries == 0)
568 ? 0
569 : new_regs(instr);
570 int freed_live = 0;
571
572 /* if we schedule something that causes a vecN to be live,
573 * then count all it's other components too:
574 */
575 if (n->collect)
576 new_live *= n->collect->srcs_count;
577
578 foreach_ssa_src_n (src, n, instr) {
579 if (__is_false_dep(instr, n))
580 continue;
581
582 if (instr->block != src->block)
583 continue;
584
585 if (is_only_nonscheduled_use(src, instr))
586 freed_live += new_regs(src);
587 }
588
589 return new_live - freed_live;
590 }
591
592 /* Determine if this is an instruction that we'd prefer not to schedule
593 * yet, in order to avoid an (ss)/(sy) sync. This is limited by the
594 * ss_delay/sy_delay counters, ie. the more cycles it has been since
595 * the last SFU/tex, the less costly a sync would be, and the number of
596 * outstanding SFU/tex instructions to prevent a blowup in register pressure.
597 */
598 static bool
should_defer(struct ir3_sched_ctx * ctx,struct ir3_instruction * instr)599 should_defer(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
600 {
601 if (ctx->ss_delay) {
602 if (sched_check_src_cond(instr, is_outstanding_ss, ctx))
603 return true;
604 }
605
606 /* We mostly just want to try to schedule another texture fetch
607 * before scheduling something that would (sy) sync, so we can
608 * limit this rule to cases where there are remaining texture
609 * fetches
610 */
611 if (ctx->sy_delay && ctx->remaining_tex) {
612 if (sched_check_src_cond(instr, is_outstanding_sy, ctx))
613 return true;
614 }
615
616 /* Avoid scheduling too many outstanding texture or sfu instructions at
617 * once by deferring further tex/SFU instructions. This both prevents
618 * stalls when the queue of texture/sfu instructions becomes too large,
619 * and prevents unacceptably large increases in register pressure from too
620 * many outstanding texture instructions.
621 */
622 if (ctx->sy_index - ctx->first_outstanding_sy_index >= 8 && is_sy_producer(instr))
623 return true;
624
625 if (ctx->ss_index - ctx->first_outstanding_ss_index >= 8 && is_ss_producer(instr))
626 return true;
627
628 return false;
629 }
630
631 static struct ir3_sched_node *choose_instr_inc(struct ir3_sched_ctx *ctx,
632 struct ir3_sched_notes *notes,
633 bool defer, bool avoid_output);
634
635 enum choose_instr_dec_rank {
636 DEC_NEUTRAL,
637 DEC_NEUTRAL_READY,
638 DEC_FREED,
639 DEC_FREED_READY,
640 };
641
642 static const char *
dec_rank_name(enum choose_instr_dec_rank rank)643 dec_rank_name(enum choose_instr_dec_rank rank)
644 {
645 switch (rank) {
646 case DEC_NEUTRAL:
647 return "neutral";
648 case DEC_NEUTRAL_READY:
649 return "neutral+ready";
650 case DEC_FREED:
651 return "freed";
652 case DEC_FREED_READY:
653 return "freed+ready";
654 default:
655 return NULL;
656 }
657 }
658
659 static unsigned
node_delay(struct ir3_sched_ctx * ctx,struct ir3_sched_node * n)660 node_delay(struct ir3_sched_ctx *ctx, struct ir3_sched_node *n)
661 {
662 return MAX2(n->earliest_ip, ctx->ip) - ctx->ip;
663 }
664
665 /**
666 * Chooses an instruction to schedule using the Goodman/Hsu (1988) CSR (Code
667 * Scheduling for Register pressure) heuristic.
668 *
669 * Only handles the case of choosing instructions that reduce register pressure
670 * or are even.
671 */
672 static struct ir3_sched_node *
choose_instr_dec(struct ir3_sched_ctx * ctx,struct ir3_sched_notes * notes,bool defer)673 choose_instr_dec(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes,
674 bool defer)
675 {
676 const char *mode = defer ? "-d" : "";
677 struct ir3_sched_node *chosen = NULL;
678 enum choose_instr_dec_rank chosen_rank = DEC_NEUTRAL;
679
680 foreach_sched_node (n, &ctx->dag->heads) {
681 if (defer && should_defer(ctx, n->instr))
682 continue;
683
684 unsigned d = node_delay(ctx, n);
685
686 int live = live_effect(n->instr);
687 if (live > 0)
688 continue;
689
690 if (!check_instr(ctx, notes, n->instr))
691 continue;
692
693 enum choose_instr_dec_rank rank;
694 if (live < 0) {
695 /* Prioritize instrs which free up regs and can be scheduled with no
696 * delay.
697 */
698 if (d == 0)
699 rank = DEC_FREED_READY;
700 else
701 rank = DEC_FREED;
702 } else {
703 /* Contra the paper, pick a leader with no effect on used regs. This
704 * may open up new opportunities, as otherwise a single-operand instr
705 * consuming a value will tend to block finding freeing that value.
706 * This had a massive effect on reducing spilling on V3D.
707 *
708 * XXX: Should this prioritize ready?
709 */
710 if (d == 0)
711 rank = DEC_NEUTRAL_READY;
712 else
713 rank = DEC_NEUTRAL;
714 }
715
716 /* Prefer higher-ranked instructions, or in the case of a rank tie, the
717 * highest latency-to-end-of-program instruction.
718 */
719 if (!chosen || rank > chosen_rank ||
720 (rank == chosen_rank && chosen->max_delay < n->max_delay)) {
721 chosen = n;
722 chosen_rank = rank;
723 }
724 }
725
726 if (chosen) {
727 di(chosen->instr, "dec%s: chose (%s)", mode, dec_rank_name(chosen_rank));
728 return chosen;
729 }
730
731 return choose_instr_inc(ctx, notes, defer, true);
732 }
733
734 enum choose_instr_inc_rank {
735 INC_DISTANCE,
736 INC_DISTANCE_READY,
737 };
738
739 static const char *
inc_rank_name(enum choose_instr_inc_rank rank)740 inc_rank_name(enum choose_instr_inc_rank rank)
741 {
742 switch (rank) {
743 case INC_DISTANCE:
744 return "distance";
745 case INC_DISTANCE_READY:
746 return "distance+ready";
747 default:
748 return NULL;
749 }
750 }
751
752 /**
753 * When we can't choose an instruction that reduces register pressure or
754 * is neutral, we end up here to try and pick the least bad option.
755 */
756 static struct ir3_sched_node *
choose_instr_inc(struct ir3_sched_ctx * ctx,struct ir3_sched_notes * notes,bool defer,bool avoid_output)757 choose_instr_inc(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes,
758 bool defer, bool avoid_output)
759 {
760 const char *mode = defer ? "-d" : "";
761 struct ir3_sched_node *chosen = NULL;
762 enum choose_instr_inc_rank chosen_rank = INC_DISTANCE;
763
764 /*
765 * From hear on out, we are picking something that increases
766 * register pressure. So try to pick something which will
767 * be consumed soon:
768 */
769 unsigned chosen_distance = 0;
770
771 /* Pick the max delay of the remaining ready set. */
772 foreach_sched_node (n, &ctx->dag->heads) {
773 if (avoid_output && n->output)
774 continue;
775
776 if (defer && should_defer(ctx, n->instr))
777 continue;
778
779 if (!check_instr(ctx, notes, n->instr))
780 continue;
781
782 unsigned d = node_delay(ctx, n);
783
784 enum choose_instr_inc_rank rank;
785 if (d == 0)
786 rank = INC_DISTANCE_READY;
787 else
788 rank = INC_DISTANCE;
789
790 unsigned distance = nearest_use(n->instr);
791
792 if (!chosen || rank > chosen_rank ||
793 (rank == chosen_rank && distance < chosen_distance)) {
794 chosen = n;
795 chosen_distance = distance;
796 chosen_rank = rank;
797 }
798 }
799
800 if (chosen) {
801 di(chosen->instr, "inc%s: chose (%s)", mode, inc_rank_name(chosen_rank));
802 return chosen;
803 }
804
805 return NULL;
806 }
807
808 /* Handles instruction selections for instructions we want to prioritize
809 * even if csp/csr would not pick them.
810 */
811 static struct ir3_sched_node *
choose_instr_prio(struct ir3_sched_ctx * ctx,struct ir3_sched_notes * notes)812 choose_instr_prio(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes)
813 {
814 struct ir3_sched_node *chosen = NULL;
815
816 foreach_sched_node (n, &ctx->dag->heads) {
817 /*
818 * - phi nodes and inputs must be scheduled first
819 * - split should be scheduled first, so that the vector value is
820 * killed as soon as possible. RA cannot split up the vector and
821 * reuse components that have been killed until it's been killed.
822 * - collect, on the other hand, should be treated as a "normal"
823 * instruction, and may add to register pressure if its sources are
824 * part of another vector or immediates.
825 */
826 if (!is_meta(n->instr) || n->instr->opc == OPC_META_COLLECT)
827 continue;
828
829 if (!chosen || (chosen->max_delay < n->max_delay))
830 chosen = n;
831 }
832
833 if (chosen) {
834 di(chosen->instr, "prio: chose (meta)");
835 return chosen;
836 }
837
838 return NULL;
839 }
840
841 static void
dump_state(struct ir3_sched_ctx * ctx)842 dump_state(struct ir3_sched_ctx *ctx)
843 {
844 if (!SCHED_DEBUG)
845 return;
846
847 foreach_sched_node (n, &ctx->dag->heads) {
848 di(n->instr, "maxdel=%3d le=%d del=%u ", n->max_delay,
849 live_effect(n->instr), node_delay(ctx, n));
850
851 util_dynarray_foreach (&n->dag.edges, struct dag_edge, edge) {
852 struct ir3_sched_node *child = (struct ir3_sched_node *)edge->child;
853
854 di(child->instr, " -> (%d parents) ", child->dag.parent_count);
855 }
856 }
857 }
858
859 /* find instruction to schedule: */
860 static struct ir3_instruction *
choose_instr(struct ir3_sched_ctx * ctx,struct ir3_sched_notes * notes)861 choose_instr(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes)
862 {
863 struct ir3_sched_node *chosen;
864
865 dump_state(ctx);
866
867 chosen = choose_instr_prio(ctx, notes);
868 if (chosen)
869 return chosen->instr;
870
871 chosen = choose_instr_dec(ctx, notes, true);
872 if (chosen)
873 return chosen->instr;
874
875 chosen = choose_instr_dec(ctx, notes, false);
876 if (chosen)
877 return chosen->instr;
878
879 chosen = choose_instr_inc(ctx, notes, false, false);
880 if (chosen)
881 return chosen->instr;
882
883 return NULL;
884 }
885
886 static struct ir3_instruction *
split_instr(struct ir3_sched_ctx * ctx,struct ir3_instruction * orig_instr)887 split_instr(struct ir3_sched_ctx *ctx, struct ir3_instruction *orig_instr)
888 {
889 struct ir3_instruction *new_instr = ir3_instr_clone(orig_instr);
890 di(new_instr, "split instruction");
891 sched_node_init(ctx, new_instr);
892 return new_instr;
893 }
894
895 /* "spill" the address registers by remapping any unscheduled
896 * instructions which depend on the current address register
897 * to a clone of the instruction which wrote the address reg.
898 */
899 static struct ir3_instruction *
split_addr(struct ir3_sched_ctx * ctx,struct ir3_instruction ** addr,struct ir3_instruction ** users,unsigned users_count)900 split_addr(struct ir3_sched_ctx *ctx, struct ir3_instruction **addr,
901 struct ir3_instruction **users, unsigned users_count)
902 {
903 struct ir3_instruction *new_addr = NULL;
904 unsigned i;
905
906 debug_assert(*addr);
907
908 for (i = 0; i < users_count; i++) {
909 struct ir3_instruction *indirect = users[i];
910
911 if (!indirect)
912 continue;
913
914 /* skip instructions already scheduled: */
915 if (is_scheduled(indirect))
916 continue;
917
918 /* remap remaining instructions using current addr
919 * to new addr:
920 */
921 if (indirect->address->def == (*addr)->dsts[0]) {
922 if (!new_addr) {
923 new_addr = split_instr(ctx, *addr);
924 /* original addr is scheduled, but new one isn't: */
925 new_addr->flags &= ~IR3_INSTR_MARK;
926 }
927 indirect->address->def = new_addr->dsts[0];
928 /* don't need to remove old dag edge since old addr is
929 * already scheduled:
930 */
931 sched_node_add_dep(indirect, new_addr, 0);
932 di(indirect, "new address");
933 }
934 }
935
936 /* all remaining indirects remapped to new addr: */
937 *addr = NULL;
938
939 return new_addr;
940 }
941
942 /* "spill" the predicate register by remapping any unscheduled
943 * instructions which depend on the current predicate register
944 * to a clone of the instruction which wrote the address reg.
945 */
946 static struct ir3_instruction *
split_pred(struct ir3_sched_ctx * ctx)947 split_pred(struct ir3_sched_ctx *ctx)
948 {
949 struct ir3 *ir;
950 struct ir3_instruction *new_pred = NULL;
951 unsigned i;
952
953 debug_assert(ctx->pred);
954
955 ir = ctx->pred->block->shader;
956
957 for (i = 0; i < ir->predicates_count; i++) {
958 struct ir3_instruction *predicated = ir->predicates[i];
959
960 if (!predicated)
961 continue;
962
963 /* skip instructions already scheduled: */
964 if (is_scheduled(predicated))
965 continue;
966
967 /* remap remaining instructions using current pred
968 * to new pred:
969 *
970 * TODO is there ever a case when pred isn't first
971 * (and only) src?
972 */
973 if (ssa(predicated->srcs[0]) == ctx->pred) {
974 if (!new_pred) {
975 new_pred = split_instr(ctx, ctx->pred);
976 /* original pred is scheduled, but new one isn't: */
977 new_pred->flags &= ~IR3_INSTR_MARK;
978 }
979 predicated->srcs[0]->instr = new_pred;
980 /* don't need to remove old dag edge since old pred is
981 * already scheduled:
982 */
983 sched_node_add_dep(predicated, new_pred, 0);
984 di(predicated, "new predicate");
985 }
986 }
987
988 if (ctx->block->condition == ctx->pred) {
989 if (!new_pred) {
990 new_pred = split_instr(ctx, ctx->pred);
991 /* original pred is scheduled, but new one isn't: */
992 new_pred->flags &= ~IR3_INSTR_MARK;
993 }
994 ctx->block->condition = new_pred;
995 d("new branch condition");
996 }
997
998 /* all remaining predicated remapped to new pred: */
999 ctx->pred = NULL;
1000
1001 return new_pred;
1002 }
1003
1004 static void
sched_node_init(struct ir3_sched_ctx * ctx,struct ir3_instruction * instr)1005 sched_node_init(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
1006 {
1007 struct ir3_sched_node *n = rzalloc(ctx->dag, struct ir3_sched_node);
1008
1009 dag_init_node(ctx->dag, &n->dag);
1010
1011 n->instr = instr;
1012 instr->data = n;
1013 }
1014
1015 static void
sched_node_add_dep(struct ir3_instruction * instr,struct ir3_instruction * src,int i)1016 sched_node_add_dep(struct ir3_instruction *instr, struct ir3_instruction *src,
1017 int i)
1018 {
1019 /* don't consider dependencies in other blocks: */
1020 if (src->block != instr->block)
1021 return;
1022
1023 /* we could have false-dep's that end up unused: */
1024 if (src->flags & IR3_INSTR_UNUSED) {
1025 debug_assert(__is_false_dep(instr, i));
1026 return;
1027 }
1028
1029 struct ir3_sched_node *n = instr->data;
1030 struct ir3_sched_node *sn = src->data;
1031
1032 /* If src is consumed by a collect, track that to realize that once
1033 * any of the collect srcs are live, we should hurry up and schedule
1034 * the rest.
1035 */
1036 if (instr->opc == OPC_META_COLLECT)
1037 sn->collect = instr;
1038
1039 unsigned d_soft = ir3_delayslots(src, instr, i, true);
1040 unsigned d = ir3_delayslots(src, instr, i, false);
1041
1042 /* delays from (ss) and (sy) are considered separately and more accurately in
1043 * the scheduling heuristic, so ignore it when calculating the ip of
1044 * instructions, but do consider it when prioritizing which instructions to
1045 * schedule.
1046 */
1047 dag_add_edge_max_data(&sn->dag, &n->dag, (uintptr_t)d);
1048
1049 n->delay = MAX2(n->delay, d_soft);
1050 }
1051
1052 static void
mark_kill_path(struct ir3_instruction * instr)1053 mark_kill_path(struct ir3_instruction *instr)
1054 {
1055 struct ir3_sched_node *n = instr->data;
1056
1057 if (n->kill_path) {
1058 return;
1059 }
1060
1061 n->kill_path = true;
1062
1063 foreach_ssa_src (src, instr) {
1064 if (src->block != instr->block)
1065 continue;
1066 mark_kill_path(src);
1067 }
1068 }
1069
1070 /* Is it an output? */
1071 static bool
is_output_collect(struct ir3_instruction * instr)1072 is_output_collect(struct ir3_instruction *instr)
1073 {
1074 if (instr->opc != OPC_META_COLLECT)
1075 return false;
1076
1077 foreach_ssa_use (use, instr) {
1078 if (use->opc != OPC_END && use->opc != OPC_CHMASK)
1079 return false;
1080 }
1081
1082 return true;
1083 }
1084
1085 /* Is it's only use as output? */
1086 static bool
is_output_only(struct ir3_instruction * instr)1087 is_output_only(struct ir3_instruction *instr)
1088 {
1089 foreach_ssa_use (use, instr)
1090 if (!is_output_collect(use))
1091 return false;
1092
1093 return true;
1094 }
1095
1096 static void
sched_node_add_deps(struct ir3_instruction * instr)1097 sched_node_add_deps(struct ir3_instruction *instr)
1098 {
1099 /* There's nothing to do for phi nodes, since they always go first. And
1100 * phi nodes can reference sources later in the same block, so handling
1101 * sources is not only unnecessary but could cause problems.
1102 */
1103 if (instr->opc == OPC_META_PHI)
1104 return;
1105
1106 /* Since foreach_ssa_src() already handles false-dep's we can construct
1107 * the DAG easily in a single pass.
1108 */
1109 foreach_ssa_src_n (src, i, instr) {
1110 sched_node_add_dep(instr, src, i);
1111 }
1112
1113 /* NOTE that all inputs must be scheduled before a kill, so
1114 * mark these to be prioritized as well:
1115 */
1116 if (is_kill_or_demote(instr) || is_input(instr)) {
1117 mark_kill_path(instr);
1118 }
1119
1120 if (is_output_only(instr)) {
1121 struct ir3_sched_node *n = instr->data;
1122 n->output = true;
1123 }
1124 }
1125
1126 static void
sched_dag_max_delay_cb(struct dag_node * node,void * state)1127 sched_dag_max_delay_cb(struct dag_node *node, void *state)
1128 {
1129 struct ir3_sched_node *n = (struct ir3_sched_node *)node;
1130 uint32_t max_delay = 0;
1131
1132 util_dynarray_foreach (&n->dag.edges, struct dag_edge, edge) {
1133 struct ir3_sched_node *child = (struct ir3_sched_node *)edge->child;
1134 max_delay = MAX2(child->max_delay, max_delay);
1135 }
1136
1137 n->max_delay = MAX2(n->max_delay, max_delay + n->delay);
1138 }
1139
1140 static void
sched_dag_init(struct ir3_sched_ctx * ctx)1141 sched_dag_init(struct ir3_sched_ctx *ctx)
1142 {
1143 ctx->dag = dag_create(ctx);
1144
1145 foreach_instr (instr, &ctx->unscheduled_list) {
1146 sched_node_init(ctx, instr);
1147 sched_node_add_deps(instr);
1148 }
1149
1150 dag_traverse_bottom_up(ctx->dag, sched_dag_max_delay_cb, NULL);
1151 }
1152
1153 static void
sched_dag_destroy(struct ir3_sched_ctx * ctx)1154 sched_dag_destroy(struct ir3_sched_ctx *ctx)
1155 {
1156 ralloc_free(ctx->dag);
1157 ctx->dag = NULL;
1158 }
1159
1160 static void
sched_block(struct ir3_sched_ctx * ctx,struct ir3_block * block)1161 sched_block(struct ir3_sched_ctx *ctx, struct ir3_block *block)
1162 {
1163 ctx->block = block;
1164
1165 /* addr/pred writes are per-block: */
1166 ctx->addr0 = NULL;
1167 ctx->addr1 = NULL;
1168 ctx->pred = NULL;
1169 ctx->sy_delay = 0;
1170 ctx->ss_delay = 0;
1171 ctx->sy_index = ctx->first_outstanding_sy_index = 0;
1172 ctx->ss_index = ctx->first_outstanding_ss_index = 0;
1173
1174 /* move all instructions to the unscheduled list, and
1175 * empty the block's instruction list (to which we will
1176 * be inserting).
1177 */
1178 list_replace(&block->instr_list, &ctx->unscheduled_list);
1179 list_inithead(&block->instr_list);
1180
1181 sched_dag_init(ctx);
1182
1183 ctx->remaining_kills = 0;
1184 ctx->remaining_tex = 0;
1185 foreach_instr_safe (instr, &ctx->unscheduled_list) {
1186 if (is_kill_or_demote(instr))
1187 ctx->remaining_kills++;
1188 if (is_sy_producer(instr))
1189 ctx->remaining_tex++;
1190 }
1191
1192 /* First schedule all meta:input and meta:phi instructions, followed by
1193 * tex-prefetch. We want all of the instructions that load values into
1194 * registers before the shader starts to go before any other instructions.
1195 * But in particular we want inputs to come before prefetches. This is
1196 * because a FS's bary_ij input may not actually be live in the shader,
1197 * but it should not be scheduled on top of any other input (but can be
1198 * overwritten by a tex prefetch)
1199 *
1200 * Note: Because the first block cannot have predecessors, meta:input and
1201 * meta:phi cannot exist in the same block.
1202 */
1203 foreach_instr_safe (instr, &ctx->unscheduled_list)
1204 if (instr->opc == OPC_META_INPUT || instr->opc == OPC_META_PHI)
1205 schedule(ctx, instr);
1206
1207 foreach_instr_safe (instr, &ctx->unscheduled_list)
1208 if (instr->opc == OPC_META_TEX_PREFETCH)
1209 schedule(ctx, instr);
1210
1211 while (!list_is_empty(&ctx->unscheduled_list)) {
1212 struct ir3_sched_notes notes = {0};
1213 struct ir3_instruction *instr;
1214
1215 instr = choose_instr(ctx, ¬es);
1216 if (instr) {
1217 unsigned delay = node_delay(ctx, instr->data);
1218 d("delay=%u", delay);
1219
1220 debug_assert(delay <= 6);
1221
1222 schedule(ctx, instr);
1223
1224 /* Since we've scheduled a "real" instruction, we can now
1225 * schedule any split instruction created by the scheduler again.
1226 */
1227 ctx->split = NULL;
1228 } else {
1229 struct ir3_instruction *new_instr = NULL;
1230 struct ir3 *ir = block->shader;
1231
1232 /* nothing available to schedule.. if we are blocked on
1233 * address/predicate register conflict, then break the
1234 * deadlock by cloning the instruction that wrote that
1235 * reg:
1236 */
1237 if (notes.addr0_conflict) {
1238 new_instr =
1239 split_addr(ctx, &ctx->addr0, ir->a0_users, ir->a0_users_count);
1240 } else if (notes.addr1_conflict) {
1241 new_instr =
1242 split_addr(ctx, &ctx->addr1, ir->a1_users, ir->a1_users_count);
1243 } else if (notes.pred_conflict) {
1244 new_instr = split_pred(ctx);
1245 } else {
1246 d("unscheduled_list:");
1247 foreach_instr (instr, &ctx->unscheduled_list)
1248 di(instr, "unscheduled: ");
1249 debug_assert(0);
1250 ctx->error = true;
1251 return;
1252 }
1253
1254 if (new_instr) {
1255 list_delinit(&new_instr->node);
1256 list_addtail(&new_instr->node, &ctx->unscheduled_list);
1257 }
1258
1259 /* If we produced a new instruction, do not schedule it next to
1260 * guarantee progress.
1261 */
1262 ctx->split = new_instr;
1263 }
1264 }
1265
1266 sched_dag_destroy(ctx);
1267 }
1268
1269 int
ir3_sched(struct ir3 * ir)1270 ir3_sched(struct ir3 *ir)
1271 {
1272 struct ir3_sched_ctx *ctx = rzalloc(NULL, struct ir3_sched_ctx);
1273
1274 foreach_block (block, &ir->block_list) {
1275 foreach_instr (instr, &block->instr_list) {
1276 instr->data = NULL;
1277 }
1278 }
1279
1280 ir3_count_instructions(ir);
1281 ir3_clear_mark(ir);
1282 ir3_find_ssa_uses(ir, ctx, false);
1283
1284 foreach_block (block, &ir->block_list) {
1285 sched_block(ctx, block);
1286 }
1287
1288 int ret = ctx->error ? -1 : 0;
1289
1290 ralloc_free(ctx);
1291
1292 return ret;
1293 }
1294
1295 static unsigned
get_array_id(struct ir3_instruction * instr)1296 get_array_id(struct ir3_instruction *instr)
1297 {
1298 /* The expectation is that there is only a single array
1299 * src or dst, ir3_cp should enforce this.
1300 */
1301
1302 foreach_dst (dst, instr)
1303 if (dst->flags & IR3_REG_ARRAY)
1304 return dst->array.id;
1305 foreach_src (src, instr)
1306 if (src->flags & IR3_REG_ARRAY)
1307 return src->array.id;
1308
1309 unreachable("this was unexpected");
1310 }
1311
1312 /* does instruction 'prior' need to be scheduled before 'instr'? */
1313 static bool
depends_on(struct ir3_instruction * instr,struct ir3_instruction * prior)1314 depends_on(struct ir3_instruction *instr, struct ir3_instruction *prior)
1315 {
1316 /* TODO for dependencies that are related to a specific object, ie
1317 * a specific SSBO/image/array, we could relax this constraint to
1318 * make accesses to unrelated objects not depend on each other (at
1319 * least as long as not declared coherent)
1320 */
1321 if (((instr->barrier_class & IR3_BARRIER_EVERYTHING) &&
1322 prior->barrier_class) ||
1323 ((prior->barrier_class & IR3_BARRIER_EVERYTHING) &&
1324 instr->barrier_class))
1325 return true;
1326
1327 if (instr->barrier_class & prior->barrier_conflict) {
1328 if (!(instr->barrier_class &
1329 ~(IR3_BARRIER_ARRAY_R | IR3_BARRIER_ARRAY_W))) {
1330 /* if only array barrier, then we can further limit false-deps
1331 * by considering the array-id, ie reads/writes to different
1332 * arrays do not depend on each other (no aliasing)
1333 */
1334 if (get_array_id(instr) != get_array_id(prior)) {
1335 return false;
1336 }
1337 }
1338
1339 return true;
1340 }
1341
1342 return false;
1343 }
1344
1345 static void
add_barrier_deps(struct ir3_block * block,struct ir3_instruction * instr)1346 add_barrier_deps(struct ir3_block *block, struct ir3_instruction *instr)
1347 {
1348 struct list_head *prev = instr->node.prev;
1349 struct list_head *next = instr->node.next;
1350
1351 /* add dependencies on previous instructions that must be scheduled
1352 * prior to the current instruction
1353 */
1354 while (prev != &block->instr_list) {
1355 struct ir3_instruction *pi =
1356 LIST_ENTRY(struct ir3_instruction, prev, node);
1357
1358 prev = prev->prev;
1359
1360 if (is_meta(pi))
1361 continue;
1362
1363 if (instr->barrier_class == pi->barrier_class) {
1364 ir3_instr_add_dep(instr, pi);
1365 break;
1366 }
1367
1368 if (depends_on(instr, pi))
1369 ir3_instr_add_dep(instr, pi);
1370 }
1371
1372 /* add dependencies on this instruction to following instructions
1373 * that must be scheduled after the current instruction:
1374 */
1375 while (next != &block->instr_list) {
1376 struct ir3_instruction *ni =
1377 LIST_ENTRY(struct ir3_instruction, next, node);
1378
1379 next = next->next;
1380
1381 if (is_meta(ni))
1382 continue;
1383
1384 if (instr->barrier_class == ni->barrier_class) {
1385 ir3_instr_add_dep(ni, instr);
1386 break;
1387 }
1388
1389 if (depends_on(ni, instr))
1390 ir3_instr_add_dep(ni, instr);
1391 }
1392 }
1393
1394 /* before scheduling a block, we need to add any necessary false-dependencies
1395 * to ensure that:
1396 *
1397 * (1) barriers are scheduled in the right order wrt instructions related
1398 * to the barrier
1399 *
1400 * (2) reads that come before a write actually get scheduled before the
1401 * write
1402 */
1403 bool
ir3_sched_add_deps(struct ir3 * ir)1404 ir3_sched_add_deps(struct ir3 *ir)
1405 {
1406 bool progress = false;
1407
1408 foreach_block (block, &ir->block_list) {
1409 foreach_instr (instr, &block->instr_list) {
1410 if (instr->barrier_class) {
1411 add_barrier_deps(block, instr);
1412 progress = true;
1413 }
1414 }
1415 }
1416
1417 return progress;
1418 }
1419