1 /*
2  * Copyright © 2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include "brw_fs.h"
29 #include "brw_fs_live_variables.h"
30 #include "brw_vec4.h"
31 #include "brw_cfg.h"
32 #include "brw_shader.h"
33 
34 using namespace brw;
35 
36 /** @file brw_fs_schedule_instructions.cpp
37  *
38  * List scheduling of FS instructions.
39  *
40  * The basic model of the list scheduler is to take a basic block,
41  * compute a DAG of the dependencies (RAW ordering with latency, WAW
42  * ordering with latency, WAR ordering), and make a list of the DAG heads.
43  * Heuristically pick a DAG head, then put all the children that are
44  * now DAG heads into the list of things to schedule.
45  *
46  * The heuristic is the important part.  We're trying to be cheap,
47  * since actually computing the optimal scheduling is NP complete.
48  * What we do is track a "current clock".  When we schedule a node, we
49  * update the earliest-unblocked clock time of its children, and
50  * increment the clock.  Then, when trying to schedule, we just pick
51  * the earliest-unblocked instruction to schedule.
52  *
53  * Note that often there will be many things which could execute
54  * immediately, and there are a range of heuristic options to choose
55  * from in picking among those.
56  */
57 
58 static bool debug = false;
59 
60 class instruction_scheduler;
61 
62 class schedule_node : public exec_node
63 {
64 public:
65    schedule_node(backend_instruction *inst, instruction_scheduler *sched);
66    void set_latency_gen4();
67    void set_latency_gen7(bool is_haswell);
68 
69    backend_instruction *inst;
70    schedule_node **children;
71    int *child_latency;
72    int child_count;
73    int parent_count;
74    int child_array_size;
75    int unblocked_time;
76    int latency;
77 
78    /**
79     * Which iteration of pushing groups of children onto the candidates list
80     * this node was a part of.
81     */
82    unsigned cand_generation;
83 
84    /**
85     * This is the sum of the instruction's latency plus the maximum delay of
86     * its children, or just the issue_time if it's a leaf node.
87     */
88    int delay;
89 
90    /**
91     * Preferred exit node among the (direct or indirect) successors of this
92     * node.  Among the scheduler nodes blocked by this node, this will be the
93     * one that may cause earliest program termination, or NULL if none of the
94     * successors is an exit node.
95     */
96    schedule_node *exit;
97 };
98 
99 /**
100  * Lower bound of the scheduling time after which one of the instructions
101  * blocked by this node may lead to program termination.
102  *
103  * exit_unblocked_time() determines a strict partial ordering relation '«' on
104  * the set of scheduler nodes as follows:
105  *
106  *   n « m <-> exit_unblocked_time(n) < exit_unblocked_time(m)
107  *
108  * which can be used to heuristically order nodes according to how early they
109  * can unblock an exit node and lead to program termination.
110  */
111 static inline int
exit_unblocked_time(const schedule_node * n)112 exit_unblocked_time(const schedule_node *n)
113 {
114    return n->exit ? n->exit->unblocked_time : INT_MAX;
115 }
116 
117 void
set_latency_gen4()118 schedule_node::set_latency_gen4()
119 {
120    int chans = 8;
121    int math_latency = 22;
122 
123    switch (inst->opcode) {
124    case SHADER_OPCODE_RCP:
125       this->latency = 1 * chans * math_latency;
126       break;
127    case SHADER_OPCODE_RSQ:
128       this->latency = 2 * chans * math_latency;
129       break;
130    case SHADER_OPCODE_INT_QUOTIENT:
131    case SHADER_OPCODE_SQRT:
132    case SHADER_OPCODE_LOG2:
133       /* full precision log.  partial is 2. */
134       this->latency = 3 * chans * math_latency;
135       break;
136    case SHADER_OPCODE_INT_REMAINDER:
137    case SHADER_OPCODE_EXP2:
138       /* full precision.  partial is 3, same throughput. */
139       this->latency = 4 * chans * math_latency;
140       break;
141    case SHADER_OPCODE_POW:
142       this->latency = 8 * chans * math_latency;
143       break;
144    case SHADER_OPCODE_SIN:
145    case SHADER_OPCODE_COS:
146       /* minimum latency, max is 12 rounds. */
147       this->latency = 5 * chans * math_latency;
148       break;
149    default:
150       this->latency = 2;
151       break;
152    }
153 }
154 
155 void
set_latency_gen7(bool is_haswell)156 schedule_node::set_latency_gen7(bool is_haswell)
157 {
158    switch (inst->opcode) {
159    case BRW_OPCODE_MAD:
160       /* 2 cycles
161        *  (since the last two src operands are in different register banks):
162        * mad(8) g4<1>F g2.2<4,4,1>F.x  g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
163        *
164        * 3 cycles on IVB, 4 on HSW
165        *  (since the last two src operands are in the same register bank):
166        * mad(8) g4<1>F g2.2<4,4,1>F.x  g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
167        *
168        * 18 cycles on IVB, 16 on HSW
169        *  (since the last two src operands are in different register banks):
170        * mad(8) g4<1>F g2.2<4,4,1>F.x  g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
171        * mov(8) null   g4<4,5,1>F                     { align16 WE_normal 1Q };
172        *
173        * 20 cycles on IVB, 18 on HSW
174        *  (since the last two src operands are in the same register bank):
175        * mad(8) g4<1>F g2.2<4,4,1>F.x  g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
176        * mov(8) null   g4<4,4,1>F                     { align16 WE_normal 1Q };
177        */
178 
179       /* Our register allocator doesn't know about register banks, so use the
180        * higher latency.
181        */
182       latency = is_haswell ? 16 : 18;
183       break;
184 
185    case BRW_OPCODE_LRP:
186       /* 2 cycles
187        *  (since the last two src operands are in different register banks):
188        * lrp(8) g4<1>F g2.2<4,4,1>F.x  g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
189        *
190        * 3 cycles on IVB, 4 on HSW
191        *  (since the last two src operands are in the same register bank):
192        * lrp(8) g4<1>F g2.2<4,4,1>F.x  g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
193        *
194        * 16 cycles on IVB, 14 on HSW
195        *  (since the last two src operands are in different register banks):
196        * lrp(8) g4<1>F g2.2<4,4,1>F.x  g2<4,4,1>F.x g3.1<4,4,1>F.x { align16 WE_normal 1Q };
197        * mov(8) null   g4<4,4,1>F                     { align16 WE_normal 1Q };
198        *
199        * 16 cycles
200        *  (since the last two src operands are in the same register bank):
201        * lrp(8) g4<1>F g2.2<4,4,1>F.x  g2<4,4,1>F.x g2.1<4,4,1>F.x { align16 WE_normal 1Q };
202        * mov(8) null   g4<4,4,1>F                     { align16 WE_normal 1Q };
203        */
204 
205       /* Our register allocator doesn't know about register banks, so use the
206        * higher latency.
207        */
208       latency = 14;
209       break;
210 
211    case SHADER_OPCODE_RCP:
212    case SHADER_OPCODE_RSQ:
213    case SHADER_OPCODE_SQRT:
214    case SHADER_OPCODE_LOG2:
215    case SHADER_OPCODE_EXP2:
216    case SHADER_OPCODE_SIN:
217    case SHADER_OPCODE_COS:
218       /* 2 cycles:
219        * math inv(8) g4<1>F g2<0,1,0>F      null       { align1 WE_normal 1Q };
220        *
221        * 18 cycles:
222        * math inv(8) g4<1>F g2<0,1,0>F      null       { align1 WE_normal 1Q };
223        * mov(8)      null   g4<8,8,1>F                 { align1 WE_normal 1Q };
224        *
225        * Same for exp2, log2, rsq, sqrt, sin, cos.
226        */
227       latency = is_haswell ? 14 : 16;
228       break;
229 
230    case SHADER_OPCODE_POW:
231       /* 2 cycles:
232        * math pow(8) g4<1>F g2<0,1,0>F   g2.1<0,1,0>F  { align1 WE_normal 1Q };
233        *
234        * 26 cycles:
235        * math pow(8) g4<1>F g2<0,1,0>F   g2.1<0,1,0>F  { align1 WE_normal 1Q };
236        * mov(8)      null   g4<8,8,1>F                 { align1 WE_normal 1Q };
237        */
238       latency = is_haswell ? 22 : 24;
239       break;
240 
241    case SHADER_OPCODE_TEX:
242    case SHADER_OPCODE_TXD:
243    case SHADER_OPCODE_TXF:
244    case SHADER_OPCODE_TXF_LZ:
245    case SHADER_OPCODE_TXL:
246    case SHADER_OPCODE_TXL_LZ:
247       /* 18 cycles:
248        * mov(8)  g115<1>F   0F                         { align1 WE_normal 1Q };
249        * mov(8)  g114<1>F   0F                         { align1 WE_normal 1Q };
250        * send(8) g4<1>UW    g114<8,8,1>F
251        *   sampler (10, 0, 0, 1) mlen 2 rlen 4         { align1 WE_normal 1Q };
252        *
253        * 697 +/-49 cycles (min 610, n=26):
254        * mov(8)  g115<1>F   0F                         { align1 WE_normal 1Q };
255        * mov(8)  g114<1>F   0F                         { align1 WE_normal 1Q };
256        * send(8) g4<1>UW    g114<8,8,1>F
257        *   sampler (10, 0, 0, 1) mlen 2 rlen 4         { align1 WE_normal 1Q };
258        * mov(8)  null       g4<8,8,1>F                 { align1 WE_normal 1Q };
259        *
260        * So the latency on our first texture load of the batchbuffer takes
261        * ~700 cycles, since the caches are cold at that point.
262        *
263        * 840 +/- 92 cycles (min 720, n=25):
264        * mov(8)  g115<1>F   0F                         { align1 WE_normal 1Q };
265        * mov(8)  g114<1>F   0F                         { align1 WE_normal 1Q };
266        * send(8) g4<1>UW    g114<8,8,1>F
267        *   sampler (10, 0, 0, 1) mlen 2 rlen 4         { align1 WE_normal 1Q };
268        * mov(8)  null       g4<8,8,1>F                 { align1 WE_normal 1Q };
269        * send(8) g4<1>UW    g114<8,8,1>F
270        *   sampler (10, 0, 0, 1) mlen 2 rlen 4         { align1 WE_normal 1Q };
271        * mov(8)  null       g4<8,8,1>F                 { align1 WE_normal 1Q };
272        *
273        * On the second load, it takes just an extra ~140 cycles, and after
274        * accounting for the 14 cycles of the MOV's latency, that makes ~130.
275        *
276        * 683 +/- 49 cycles (min = 602, n=47):
277        * mov(8)  g115<1>F   0F                         { align1 WE_normal 1Q };
278        * mov(8)  g114<1>F   0F                         { align1 WE_normal 1Q };
279        * send(8) g4<1>UW    g114<8,8,1>F
280        *   sampler (10, 0, 0, 1) mlen 2 rlen 4         { align1 WE_normal 1Q };
281        * send(8) g50<1>UW   g114<8,8,1>F
282        *   sampler (10, 0, 0, 1) mlen 2 rlen 4         { align1 WE_normal 1Q };
283        * mov(8)  null       g4<8,8,1>F                 { align1 WE_normal 1Q };
284        *
285        * The unit appears to be pipelined, since this matches up with the
286        * cache-cold case, despite there being two loads here.  If you replace
287        * the g4 in the MOV to null with g50, it's still 693 +/- 52 (n=39).
288        *
289        * So, take some number between the cache-hot 140 cycles and the
290        * cache-cold 700 cycles.  No particular tuning was done on this.
291        *
292        * I haven't done significant testing of the non-TEX opcodes.  TXL at
293        * least looked about the same as TEX.
294        */
295       latency = 200;
296       break;
297 
298    case SHADER_OPCODE_TXS:
299       /* Testing textureSize(sampler2D, 0), one load was 420 +/- 41
300        * cycles (n=15):
301        * mov(8)   g114<1>UD  0D                        { align1 WE_normal 1Q };
302        * send(8)  g6<1>UW    g114<8,8,1>F
303        *   sampler (10, 0, 10, 1) mlen 1 rlen 4        { align1 WE_normal 1Q };
304        * mov(16)  g6<1>F     g6<8,8,1>D                { align1 WE_normal 1Q };
305        *
306        *
307        * Two loads was 535 +/- 30 cycles (n=19):
308        * mov(16)   g114<1>UD  0D                       { align1 WE_normal 1H };
309        * send(16)  g6<1>UW    g114<8,8,1>F
310        *   sampler (10, 0, 10, 2) mlen 2 rlen 8        { align1 WE_normal 1H };
311        * mov(16)   g114<1>UD  0D                       { align1 WE_normal 1H };
312        * mov(16)   g6<1>F     g6<8,8,1>D               { align1 WE_normal 1H };
313        * send(16)  g8<1>UW    g114<8,8,1>F
314        *   sampler (10, 0, 10, 2) mlen 2 rlen 8        { align1 WE_normal 1H };
315        * mov(16)   g8<1>F     g8<8,8,1>D               { align1 WE_normal 1H };
316        * add(16)   g6<1>F     g6<8,8,1>F   g8<8,8,1>F  { align1 WE_normal 1H };
317        *
318        * Since the only caches that should matter are just the
319        * instruction/state cache containing the surface state, assume that we
320        * always have hot caches.
321        */
322       latency = 100;
323       break;
324 
325    case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4:
326    case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
327    case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
328    case VS_OPCODE_PULL_CONSTANT_LOAD:
329       /* testing using varying-index pull constants:
330        *
331        * 16 cycles:
332        * mov(8)  g4<1>D  g2.1<0,1,0>F                  { align1 WE_normal 1Q };
333        * send(8) g4<1>F  g4<8,8,1>D
334        *   data (9, 2, 3) mlen 1 rlen 1                { align1 WE_normal 1Q };
335        *
336        * ~480 cycles:
337        * mov(8)  g4<1>D  g2.1<0,1,0>F                  { align1 WE_normal 1Q };
338        * send(8) g4<1>F  g4<8,8,1>D
339        *   data (9, 2, 3) mlen 1 rlen 1                { align1 WE_normal 1Q };
340        * mov(8)  null    g4<8,8,1>F                    { align1 WE_normal 1Q };
341        *
342        * ~620 cycles:
343        * mov(8)  g4<1>D  g2.1<0,1,0>F                  { align1 WE_normal 1Q };
344        * send(8) g4<1>F  g4<8,8,1>D
345        *   data (9, 2, 3) mlen 1 rlen 1                { align1 WE_normal 1Q };
346        * mov(8)  null    g4<8,8,1>F                    { align1 WE_normal 1Q };
347        * send(8) g4<1>F  g4<8,8,1>D
348        *   data (9, 2, 3) mlen 1 rlen 1                { align1 WE_normal 1Q };
349        * mov(8)  null    g4<8,8,1>F                    { align1 WE_normal 1Q };
350        *
351        * So, if it's cache-hot, it's about 140.  If it's cache cold, it's
352        * about 460.  We expect to mostly be cache hot, so pick something more
353        * in that direction.
354        */
355       latency = 200;
356       break;
357 
358    case SHADER_OPCODE_GEN7_SCRATCH_READ:
359       /* Testing a load from offset 0, that had been previously written:
360        *
361        * send(8) g114<1>UW g0<8,8,1>F data (0, 0, 0) mlen 1 rlen 1 { align1 WE_normal 1Q };
362        * mov(8)  null      g114<8,8,1>F { align1 WE_normal 1Q };
363        *
364        * The cycles spent seemed to be grouped around 40-50 (as low as 38),
365        * then around 140.  Presumably this is cache hit vs miss.
366        */
367       latency = 50;
368       break;
369 
370    case VEC4_OPCODE_UNTYPED_ATOMIC:
371       /* See GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP */
372       latency = 14000;
373       break;
374 
375    case VEC4_OPCODE_UNTYPED_SURFACE_READ:
376    case VEC4_OPCODE_UNTYPED_SURFACE_WRITE:
377       /* See also GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ */
378       latency = is_haswell ? 300 : 600;
379       break;
380 
381    case SHADER_OPCODE_SEND:
382       switch (inst->sfid) {
383       case BRW_SFID_SAMPLER: {
384          unsigned msg_type = (inst->desc >> 12) & 0x1f;
385          switch (msg_type) {
386          case GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO:
387          case GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO:
388             /* See also SHADER_OPCODE_TXS */
389             latency = 100;
390             break;
391 
392          default:
393             /* See also SHADER_OPCODE_TEX */
394             latency = 200;
395             break;
396          }
397          break;
398       }
399 
400       case GEN6_SFID_DATAPORT_RENDER_CACHE:
401          switch ((inst->desc >> 14) & 0x1f) {
402          case GEN7_DATAPORT_RC_TYPED_SURFACE_WRITE:
403          case GEN7_DATAPORT_RC_TYPED_SURFACE_READ:
404             /* See also SHADER_OPCODE_TYPED_SURFACE_READ */
405             assert(!is_haswell);
406             latency = 600;
407             break;
408 
409          case GEN7_DATAPORT_RC_TYPED_ATOMIC_OP:
410             /* See also SHADER_OPCODE_TYPED_ATOMIC */
411             assert(!is_haswell);
412             latency = 14000;
413             break;
414 
415          case GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE:
416             /* completely fabricated number */
417             latency = 600;
418             break;
419 
420          default:
421             unreachable("Unknown render cache message");
422          }
423          break;
424 
425       case GEN7_SFID_DATAPORT_DATA_CACHE:
426          switch ((inst->desc >> 14) & 0x1f) {
427          case GEN7_DATAPORT_DC_DWORD_SCATTERED_READ:
428          case GEN6_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE:
429          case HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_READ:
430          case HSW_DATAPORT_DC_PORT0_BYTE_SCATTERED_WRITE:
431             /* We have no data for this but assume it's roughly the same as
432              * untyped surface read/write.
433              */
434             latency = 300;
435             break;
436 
437          case GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ:
438          case GEN7_DATAPORT_DC_UNTYPED_SURFACE_WRITE:
439             /* Test code:
440              *   mov(8)    g112<1>UD       0x00000000UD       { align1 WE_all 1Q };
441              *   mov(1)    g112.7<1>UD     g1.7<0,1,0>UD      { align1 WE_all };
442              *   mov(8)    g113<1>UD       0x00000000UD       { align1 WE_normal 1Q };
443              *   send(8)   g4<1>UD         g112<8,8,1>UD
444              *             data (38, 6, 5) mlen 2 rlen 1      { align1 WE_normal 1Q };
445              *   .
446              *   . [repeats 8 times]
447              *   .
448              *   mov(8)    g112<1>UD       0x00000000UD       { align1 WE_all 1Q };
449              *   mov(1)    g112.7<1>UD     g1.7<0,1,0>UD      { align1 WE_all };
450              *   mov(8)    g113<1>UD       0x00000000UD       { align1 WE_normal 1Q };
451              *   send(8)   g4<1>UD         g112<8,8,1>UD
452              *             data (38, 6, 5) mlen 2 rlen 1      { align1 WE_normal 1Q };
453              *
454              * Running it 100 times as fragment shader on a 128x128 quad
455              * gives an average latency of 583 cycles per surface read,
456              * standard deviation 0.9%.
457              */
458             assert(!is_haswell);
459             latency = 600;
460             break;
461 
462          case GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP:
463             /* Test code:
464              *   mov(8)    g112<1>ud       0x00000000ud       { align1 WE_all 1Q };
465              *   mov(1)    g112.7<1>ud     g1.7<0,1,0>ud      { align1 WE_all };
466              *   mov(8)    g113<1>ud       0x00000000ud       { align1 WE_normal 1Q };
467              *   send(8)   g4<1>ud         g112<8,8,1>ud
468              *             data (38, 5, 6) mlen 2 rlen 1      { align1 WE_normal 1Q };
469              *
470              * Running it 100 times as fragment shader on a 128x128 quad
471              * gives an average latency of 13867 cycles per atomic op,
472              * standard deviation 3%.  Note that this is a rather
473              * pessimistic estimate, the actual latency in cases with few
474              * collisions between threads and favorable pipelining has been
475              * seen to be reduced by a factor of 100.
476              */
477             assert(!is_haswell);
478             latency = 14000;
479             break;
480 
481          default:
482             unreachable("Unknown data cache message");
483          }
484          break;
485 
486       case HSW_SFID_DATAPORT_DATA_CACHE_1:
487          switch ((inst->desc >> 14) & 0x1f) {
488          case HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ:
489          case HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_WRITE:
490          case HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_READ:
491          case HSW_DATAPORT_DC_PORT1_TYPED_SURFACE_WRITE:
492          case GEN8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_WRITE:
493          case GEN8_DATAPORT_DC_PORT1_A64_UNTYPED_SURFACE_READ:
494          case GEN8_DATAPORT_DC_PORT1_A64_SCATTERED_WRITE:
495          case GEN9_DATAPORT_DC_PORT1_A64_SCATTERED_READ:
496             /* See also GEN7_DATAPORT_DC_UNTYPED_SURFACE_READ */
497             latency = 300;
498             break;
499 
500          case HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP:
501          case HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP_SIMD4X2:
502          case HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP_SIMD4X2:
503          case HSW_DATAPORT_DC_PORT1_TYPED_ATOMIC_OP:
504          case GEN9_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_FLOAT_OP:
505          case GEN8_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_OP:
506          case GEN9_DATAPORT_DC_PORT1_A64_UNTYPED_ATOMIC_FLOAT_OP:
507             /* See also GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP */
508             latency = 14000;
509             break;
510 
511          default:
512             unreachable("Unknown data cache message");
513          }
514          break;
515 
516       default:
517          unreachable("Unknown SFID");
518       }
519       break;
520 
521    default:
522       /* 2 cycles:
523        * mul(8) g4<1>F g2<0,1,0>F      0.5F            { align1 WE_normal 1Q };
524        *
525        * 16 cycles:
526        * mul(8) g4<1>F g2<0,1,0>F      0.5F            { align1 WE_normal 1Q };
527        * mov(8) null   g4<8,8,1>F                      { align1 WE_normal 1Q };
528        */
529       latency = 14;
530       break;
531    }
532 }
533 
534 class instruction_scheduler {
535 public:
instruction_scheduler(const backend_shader * s,int grf_count,unsigned hw_reg_count,int block_count,instruction_scheduler_mode mode)536    instruction_scheduler(const backend_shader *s, int grf_count,
537                          unsigned hw_reg_count, int block_count,
538                          instruction_scheduler_mode mode):
539       bs(s)
540    {
541       this->mem_ctx = ralloc_context(NULL);
542       this->grf_count = grf_count;
543       this->hw_reg_count = hw_reg_count;
544       this->instructions.make_empty();
545       this->post_reg_alloc = (mode == SCHEDULE_POST);
546       this->mode = mode;
547       if (!post_reg_alloc) {
548          this->reg_pressure_in = rzalloc_array(mem_ctx, int, block_count);
549 
550          this->livein = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
551          for (int i = 0; i < block_count; i++)
552             this->livein[i] = rzalloc_array(mem_ctx, BITSET_WORD,
553                                             BITSET_WORDS(grf_count));
554 
555          this->liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
556          for (int i = 0; i < block_count; i++)
557             this->liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
558                                              BITSET_WORDS(grf_count));
559 
560          this->hw_liveout = ralloc_array(mem_ctx, BITSET_WORD *, block_count);
561          for (int i = 0; i < block_count; i++)
562             this->hw_liveout[i] = rzalloc_array(mem_ctx, BITSET_WORD,
563                                                 BITSET_WORDS(hw_reg_count));
564 
565          this->written = rzalloc_array(mem_ctx, bool, grf_count);
566 
567          this->reads_remaining = rzalloc_array(mem_ctx, int, grf_count);
568 
569          this->hw_reads_remaining = rzalloc_array(mem_ctx, int, hw_reg_count);
570       } else {
571          this->reg_pressure_in = NULL;
572          this->livein = NULL;
573          this->liveout = NULL;
574          this->hw_liveout = NULL;
575          this->written = NULL;
576          this->reads_remaining = NULL;
577          this->hw_reads_remaining = NULL;
578       }
579    }
580 
~instruction_scheduler()581    ~instruction_scheduler()
582    {
583       ralloc_free(this->mem_ctx);
584    }
585    void add_barrier_deps(schedule_node *n);
586    void add_dep(schedule_node *before, schedule_node *after, int latency);
587    void add_dep(schedule_node *before, schedule_node *after);
588 
589    void run(cfg_t *cfg);
590    void add_insts_from_block(bblock_t *block);
591    void compute_delays();
592    void compute_exits();
593    virtual void calculate_deps() = 0;
594    virtual schedule_node *choose_instruction_to_schedule() = 0;
595 
596    /**
597     * Returns how many cycles it takes the instruction to issue.
598     *
599     * Instructions in gen hardware are handled one simd4 vector at a time,
600     * with 1 cycle per vector dispatched.  Thus SIMD8 pixel shaders take 2
601     * cycles to dispatch and SIMD16 (compressed) instructions take 4.
602     */
603    virtual int issue_time(backend_instruction *inst) = 0;
604 
605    virtual void count_reads_remaining(backend_instruction *inst) = 0;
606    virtual void setup_liveness(cfg_t *cfg) = 0;
607    virtual void update_register_pressure(backend_instruction *inst) = 0;
608    virtual int get_register_pressure_benefit(backend_instruction *inst) = 0;
609 
610    void schedule_instructions(bblock_t *block);
611 
612    void *mem_ctx;
613 
614    bool post_reg_alloc;
615    int grf_count;
616    unsigned hw_reg_count;
617    int reg_pressure;
618    int block_idx;
619    exec_list instructions;
620    const backend_shader *bs;
621 
622    instruction_scheduler_mode mode;
623 
624    /*
625     * The register pressure at the beginning of each basic block.
626     */
627 
628    int *reg_pressure_in;
629 
630    /*
631     * The virtual GRF's whose range overlaps the beginning of each basic block.
632     */
633 
634    BITSET_WORD **livein;
635 
636    /*
637     * The virtual GRF's whose range overlaps the end of each basic block.
638     */
639 
640    BITSET_WORD **liveout;
641 
642    /*
643     * The hardware GRF's whose range overlaps the end of each basic block.
644     */
645 
646    BITSET_WORD **hw_liveout;
647 
648    /*
649     * Whether we've scheduled a write for this virtual GRF yet.
650     */
651 
652    bool *written;
653 
654    /*
655     * How many reads we haven't scheduled for this virtual GRF yet.
656     */
657 
658    int *reads_remaining;
659 
660    /*
661     * How many reads we haven't scheduled for this hardware GRF yet.
662     */
663 
664    int *hw_reads_remaining;
665 };
666 
667 class fs_instruction_scheduler : public instruction_scheduler
668 {
669 public:
670    fs_instruction_scheduler(const fs_visitor *v, int grf_count, int hw_reg_count,
671                             int block_count,
672                             instruction_scheduler_mode mode);
673    void calculate_deps();
674    bool is_compressed(const fs_inst *inst);
675    schedule_node *choose_instruction_to_schedule();
676    int issue_time(backend_instruction *inst);
677    const fs_visitor *v;
678 
679    void count_reads_remaining(backend_instruction *inst);
680    void setup_liveness(cfg_t *cfg);
681    void update_register_pressure(backend_instruction *inst);
682    int get_register_pressure_benefit(backend_instruction *inst);
683 };
684 
fs_instruction_scheduler(const fs_visitor * v,int grf_count,int hw_reg_count,int block_count,instruction_scheduler_mode mode)685 fs_instruction_scheduler::fs_instruction_scheduler(const fs_visitor *v,
686                                                    int grf_count, int hw_reg_count,
687                                                    int block_count,
688                                                    instruction_scheduler_mode mode)
689    : instruction_scheduler(v, grf_count, hw_reg_count, block_count, mode),
690      v(v)
691 {
692 }
693 
694 static bool
is_src_duplicate(fs_inst * inst,int src)695 is_src_duplicate(fs_inst *inst, int src)
696 {
697    for (int i = 0; i < src; i++)
698      if (inst->src[i].equals(inst->src[src]))
699        return true;
700 
701   return false;
702 }
703 
704 void
count_reads_remaining(backend_instruction * be)705 fs_instruction_scheduler::count_reads_remaining(backend_instruction *be)
706 {
707    fs_inst *inst = (fs_inst *)be;
708 
709    if (!reads_remaining)
710       return;
711 
712    for (int i = 0; i < inst->sources; i++) {
713       if (is_src_duplicate(inst, i))
714          continue;
715 
716       if (inst->src[i].file == VGRF) {
717          reads_remaining[inst->src[i].nr]++;
718       } else if (inst->src[i].file == FIXED_GRF) {
719          if (inst->src[i].nr >= hw_reg_count)
720             continue;
721 
722          for (unsigned j = 0; j < regs_read(inst, i); j++)
723             hw_reads_remaining[inst->src[i].nr + j]++;
724       }
725    }
726 }
727 
728 void
setup_liveness(cfg_t * cfg)729 fs_instruction_scheduler::setup_liveness(cfg_t *cfg)
730 {
731    const fs_live_variables &live = v->live_analysis.require();
732 
733    /* First, compute liveness on a per-GRF level using the in/out sets from
734     * liveness calculation.
735     */
736    for (int block = 0; block < cfg->num_blocks; block++) {
737       for (int i = 0; i < live.num_vars; i++) {
738          if (BITSET_TEST(live.block_data[block].livein, i)) {
739             int vgrf = live.vgrf_from_var[i];
740             if (!BITSET_TEST(livein[block], vgrf)) {
741                reg_pressure_in[block] += v->alloc.sizes[vgrf];
742                BITSET_SET(livein[block], vgrf);
743             }
744          }
745 
746          if (BITSET_TEST(live.block_data[block].liveout, i))
747             BITSET_SET(liveout[block], live.vgrf_from_var[i]);
748       }
749    }
750 
751    /* Now, extend the live in/live out sets for when a range crosses a block
752     * boundary, which matches what our register allocator/interference code
753     * does to account for force_writemask_all and incompatible exec_mask's.
754     */
755    for (int block = 0; block < cfg->num_blocks - 1; block++) {
756       for (int i = 0; i < grf_count; i++) {
757          if (live.vgrf_start[i] <= cfg->blocks[block]->end_ip &&
758              live.vgrf_end[i] >= cfg->blocks[block + 1]->start_ip) {
759             if (!BITSET_TEST(livein[block + 1], i)) {
760                 reg_pressure_in[block + 1] += v->alloc.sizes[i];
761                 BITSET_SET(livein[block + 1], i);
762             }
763 
764             BITSET_SET(liveout[block], i);
765          }
766       }
767    }
768 
769    int payload_last_use_ip[hw_reg_count];
770    v->calculate_payload_ranges(hw_reg_count, payload_last_use_ip);
771 
772    for (unsigned i = 0; i < hw_reg_count; i++) {
773       if (payload_last_use_ip[i] == -1)
774          continue;
775 
776       for (int block = 0; block < cfg->num_blocks; block++) {
777          if (cfg->blocks[block]->start_ip <= payload_last_use_ip[i])
778             reg_pressure_in[block]++;
779 
780          if (cfg->blocks[block]->end_ip <= payload_last_use_ip[i])
781             BITSET_SET(hw_liveout[block], i);
782       }
783    }
784 }
785 
786 void
update_register_pressure(backend_instruction * be)787 fs_instruction_scheduler::update_register_pressure(backend_instruction *be)
788 {
789    fs_inst *inst = (fs_inst *)be;
790 
791    if (!reads_remaining)
792       return;
793 
794    if (inst->dst.file == VGRF) {
795       written[inst->dst.nr] = true;
796    }
797 
798    for (int i = 0; i < inst->sources; i++) {
799       if (is_src_duplicate(inst, i))
800           continue;
801 
802       if (inst->src[i].file == VGRF) {
803          reads_remaining[inst->src[i].nr]--;
804       } else if (inst->src[i].file == FIXED_GRF &&
805                  inst->src[i].nr < hw_reg_count) {
806          for (unsigned off = 0; off < regs_read(inst, i); off++)
807             hw_reads_remaining[inst->src[i].nr + off]--;
808       }
809    }
810 }
811 
812 int
get_register_pressure_benefit(backend_instruction * be)813 fs_instruction_scheduler::get_register_pressure_benefit(backend_instruction *be)
814 {
815    fs_inst *inst = (fs_inst *)be;
816    int benefit = 0;
817 
818    if (inst->dst.file == VGRF) {
819       if (!BITSET_TEST(livein[block_idx], inst->dst.nr) &&
820           !written[inst->dst.nr])
821          benefit -= v->alloc.sizes[inst->dst.nr];
822    }
823 
824    for (int i = 0; i < inst->sources; i++) {
825       if (is_src_duplicate(inst, i))
826          continue;
827 
828       if (inst->src[i].file == VGRF &&
829           !BITSET_TEST(liveout[block_idx], inst->src[i].nr) &&
830           reads_remaining[inst->src[i].nr] == 1)
831          benefit += v->alloc.sizes[inst->src[i].nr];
832 
833       if (inst->src[i].file == FIXED_GRF &&
834           inst->src[i].nr < hw_reg_count) {
835          for (unsigned off = 0; off < regs_read(inst, i); off++) {
836             int reg = inst->src[i].nr + off;
837             if (!BITSET_TEST(hw_liveout[block_idx], reg) &&
838                 hw_reads_remaining[reg] == 1) {
839                benefit++;
840             }
841          }
842       }
843    }
844 
845    return benefit;
846 }
847 
848 class vec4_instruction_scheduler : public instruction_scheduler
849 {
850 public:
851    vec4_instruction_scheduler(const vec4_visitor *v, int grf_count);
852    void calculate_deps();
853    schedule_node *choose_instruction_to_schedule();
854    int issue_time(backend_instruction *inst);
855    const vec4_visitor *v;
856 
857    void count_reads_remaining(backend_instruction *inst);
858    void setup_liveness(cfg_t *cfg);
859    void update_register_pressure(backend_instruction *inst);
860    int get_register_pressure_benefit(backend_instruction *inst);
861 };
862 
vec4_instruction_scheduler(const vec4_visitor * v,int grf_count)863 vec4_instruction_scheduler::vec4_instruction_scheduler(const vec4_visitor *v,
864                                                        int grf_count)
865    : instruction_scheduler(v, grf_count, 0, 0, SCHEDULE_POST),
866      v(v)
867 {
868 }
869 
870 void
count_reads_remaining(backend_instruction *)871 vec4_instruction_scheduler::count_reads_remaining(backend_instruction *)
872 {
873 }
874 
875 void
setup_liveness(cfg_t *)876 vec4_instruction_scheduler::setup_liveness(cfg_t *)
877 {
878 }
879 
880 void
update_register_pressure(backend_instruction *)881 vec4_instruction_scheduler::update_register_pressure(backend_instruction *)
882 {
883 }
884 
885 int
get_register_pressure_benefit(backend_instruction *)886 vec4_instruction_scheduler::get_register_pressure_benefit(backend_instruction *)
887 {
888    return 0;
889 }
890 
schedule_node(backend_instruction * inst,instruction_scheduler * sched)891 schedule_node::schedule_node(backend_instruction *inst,
892                              instruction_scheduler *sched)
893 {
894    const struct gen_device_info *devinfo = sched->bs->devinfo;
895 
896    this->inst = inst;
897    this->child_array_size = 0;
898    this->children = NULL;
899    this->child_latency = NULL;
900    this->child_count = 0;
901    this->parent_count = 0;
902    this->unblocked_time = 0;
903    this->cand_generation = 0;
904    this->delay = 0;
905    this->exit = NULL;
906 
907    /* We can't measure Gen6 timings directly but expect them to be much
908     * closer to Gen7 than Gen4.
909     */
910    if (!sched->post_reg_alloc)
911       this->latency = 1;
912    else if (devinfo->gen >= 6)
913       set_latency_gen7(devinfo->is_haswell);
914    else
915       set_latency_gen4();
916 }
917 
918 void
add_insts_from_block(bblock_t * block)919 instruction_scheduler::add_insts_from_block(bblock_t *block)
920 {
921    foreach_inst_in_block(backend_instruction, inst, block) {
922       schedule_node *n = new(mem_ctx) schedule_node(inst, this);
923 
924       instructions.push_tail(n);
925    }
926 }
927 
928 /** Computation of the delay member of each node. */
929 void
compute_delays()930 instruction_scheduler::compute_delays()
931 {
932    foreach_in_list_reverse(schedule_node, n, &instructions) {
933       if (!n->child_count) {
934          n->delay = issue_time(n->inst);
935       } else {
936          for (int i = 0; i < n->child_count; i++) {
937             assert(n->children[i]->delay);
938             n->delay = MAX2(n->delay, n->latency + n->children[i]->delay);
939          }
940       }
941    }
942 }
943 
944 void
compute_exits()945 instruction_scheduler::compute_exits()
946 {
947    /* Calculate a lower bound of the scheduling time of each node in the
948     * graph.  This is analogous to the node's critical path but calculated
949     * from the top instead of from the bottom of the block.
950     */
951    foreach_in_list(schedule_node, n, &instructions) {
952       for (int i = 0; i < n->child_count; i++) {
953          n->children[i]->unblocked_time =
954             MAX2(n->children[i]->unblocked_time,
955                  n->unblocked_time + issue_time(n->inst) + n->child_latency[i]);
956       }
957    }
958 
959    /* Calculate the exit of each node by induction based on the exit nodes of
960     * its children.  The preferred exit of a node is the one among the exit
961     * nodes of its children which can be unblocked first according to the
962     * optimistic unblocked time estimate calculated above.
963     */
964    foreach_in_list_reverse(schedule_node, n, &instructions) {
965       n->exit = (n->inst->opcode == FS_OPCODE_DISCARD_JUMP ? n : NULL);
966 
967       for (int i = 0; i < n->child_count; i++) {
968          if (exit_unblocked_time(n->children[i]) < exit_unblocked_time(n))
969             n->exit = n->children[i]->exit;
970       }
971    }
972 }
973 
974 /**
975  * Add a dependency between two instruction nodes.
976  *
977  * The @after node will be scheduled after @before.  We will try to
978  * schedule it @latency cycles after @before, but no guarantees there.
979  */
980 void
add_dep(schedule_node * before,schedule_node * after,int latency)981 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after,
982                                int latency)
983 {
984    if (!before || !after)
985       return;
986 
987    assert(before != after);
988 
989    for (int i = 0; i < before->child_count; i++) {
990       if (before->children[i] == after) {
991          before->child_latency[i] = MAX2(before->child_latency[i], latency);
992          return;
993       }
994    }
995 
996    if (before->child_array_size <= before->child_count) {
997       if (before->child_array_size < 16)
998          before->child_array_size = 16;
999       else
1000          before->child_array_size *= 2;
1001 
1002       before->children = reralloc(mem_ctx, before->children,
1003                                   schedule_node *,
1004                                   before->child_array_size);
1005       before->child_latency = reralloc(mem_ctx, before->child_latency,
1006                                        int, before->child_array_size);
1007    }
1008 
1009    before->children[before->child_count] = after;
1010    before->child_latency[before->child_count] = latency;
1011    before->child_count++;
1012    after->parent_count++;
1013 }
1014 
1015 void
add_dep(schedule_node * before,schedule_node * after)1016 instruction_scheduler::add_dep(schedule_node *before, schedule_node *after)
1017 {
1018    if (!before)
1019       return;
1020 
1021    add_dep(before, after, before->latency);
1022 }
1023 
1024 static bool
is_scheduling_barrier(const backend_instruction * inst)1025 is_scheduling_barrier(const backend_instruction *inst)
1026 {
1027    return inst->opcode == FS_OPCODE_PLACEHOLDER_HALT ||
1028           inst->is_control_flow() ||
1029           inst->has_side_effects();
1030 }
1031 
1032 /**
1033  * Sometimes we really want this node to execute after everything that
1034  * was before it and before everything that followed it.  This adds
1035  * the deps to do so.
1036  */
1037 void
add_barrier_deps(schedule_node * n)1038 instruction_scheduler::add_barrier_deps(schedule_node *n)
1039 {
1040    schedule_node *prev = (schedule_node *)n->prev;
1041    schedule_node *next = (schedule_node *)n->next;
1042 
1043    if (prev) {
1044       while (!prev->is_head_sentinel()) {
1045          add_dep(prev, n, 0);
1046          if (is_scheduling_barrier(prev->inst))
1047             break;
1048          prev = (schedule_node *)prev->prev;
1049       }
1050    }
1051 
1052    if (next) {
1053       while (!next->is_tail_sentinel()) {
1054          add_dep(n, next, 0);
1055          if (is_scheduling_barrier(next->inst))
1056             break;
1057          next = (schedule_node *)next->next;
1058       }
1059    }
1060 }
1061 
1062 /* instruction scheduling needs to be aware of when an MRF write
1063  * actually writes 2 MRFs.
1064  */
1065 bool
is_compressed(const fs_inst * inst)1066 fs_instruction_scheduler::is_compressed(const fs_inst *inst)
1067 {
1068    return inst->exec_size == 16;
1069 }
1070 
1071 void
calculate_deps()1072 fs_instruction_scheduler::calculate_deps()
1073 {
1074    /* Pre-register-allocation, this tracks the last write per VGRF offset.
1075     * After register allocation, reg_offsets are gone and we track individual
1076     * GRF registers.
1077     */
1078    schedule_node **last_grf_write;
1079    schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
1080    schedule_node *last_conditional_mod[8] = {};
1081    schedule_node *last_accumulator_write = NULL;
1082    /* Fixed HW registers are assumed to be separate from the virtual
1083     * GRFs, so they can be tracked separately.  We don't really write
1084     * to fixed GRFs much, so don't bother tracking them on a more
1085     * granular level.
1086     */
1087    schedule_node *last_fixed_grf_write = NULL;
1088 
1089    last_grf_write = (schedule_node **)calloc(sizeof(schedule_node *), grf_count * 16);
1090    memset(last_mrf_write, 0, sizeof(last_mrf_write));
1091 
1092    /* top-to-bottom dependencies: RAW and WAW. */
1093    foreach_in_list(schedule_node, n, &instructions) {
1094       fs_inst *inst = (fs_inst *)n->inst;
1095 
1096       if (is_scheduling_barrier(inst))
1097          add_barrier_deps(n);
1098 
1099       /* read-after-write deps. */
1100       for (int i = 0; i < inst->sources; i++) {
1101          if (inst->src[i].file == VGRF) {
1102             if (post_reg_alloc) {
1103                for (unsigned r = 0; r < regs_read(inst, i); r++)
1104                   add_dep(last_grf_write[inst->src[i].nr + r], n);
1105             } else {
1106                for (unsigned r = 0; r < regs_read(inst, i); r++) {
1107                   add_dep(last_grf_write[inst->src[i].nr * 16 +
1108                                          inst->src[i].offset / REG_SIZE + r], n);
1109                }
1110             }
1111          } else if (inst->src[i].file == FIXED_GRF) {
1112             if (post_reg_alloc) {
1113                for (unsigned r = 0; r < regs_read(inst, i); r++)
1114                   add_dep(last_grf_write[inst->src[i].nr + r], n);
1115             } else {
1116                add_dep(last_fixed_grf_write, n);
1117             }
1118          } else if (inst->src[i].is_accumulator()) {
1119             add_dep(last_accumulator_write, n);
1120          } else if (inst->src[i].file == ARF) {
1121             add_barrier_deps(n);
1122          }
1123       }
1124 
1125       if (inst->base_mrf != -1) {
1126          for (int i = 0; i < inst->mlen; i++) {
1127             /* It looks like the MRF regs are released in the send
1128              * instruction once it's sent, not when the result comes
1129              * back.
1130              */
1131             add_dep(last_mrf_write[inst->base_mrf + i], n);
1132          }
1133       }
1134 
1135       if (const unsigned mask = inst->flags_read(v->devinfo)) {
1136          assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1137 
1138          for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1139             if (mask & (1 << i))
1140                add_dep(last_conditional_mod[i], n);
1141          }
1142       }
1143 
1144       if (inst->reads_accumulator_implicitly()) {
1145          add_dep(last_accumulator_write, n);
1146       }
1147 
1148       /* write-after-write deps. */
1149       if (inst->dst.file == VGRF) {
1150          if (post_reg_alloc) {
1151             for (unsigned r = 0; r < regs_written(inst); r++) {
1152                add_dep(last_grf_write[inst->dst.nr + r], n);
1153                last_grf_write[inst->dst.nr + r] = n;
1154             }
1155          } else {
1156             for (unsigned r = 0; r < regs_written(inst); r++) {
1157                add_dep(last_grf_write[inst->dst.nr * 16 +
1158                                       inst->dst.offset / REG_SIZE + r], n);
1159                last_grf_write[inst->dst.nr * 16 +
1160                               inst->dst.offset / REG_SIZE + r] = n;
1161             }
1162          }
1163       } else if (inst->dst.file == MRF) {
1164          int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
1165 
1166          add_dep(last_mrf_write[reg], n);
1167          last_mrf_write[reg] = n;
1168          if (is_compressed(inst)) {
1169             if (inst->dst.nr & BRW_MRF_COMPR4)
1170                reg += 4;
1171             else
1172                reg++;
1173             add_dep(last_mrf_write[reg], n);
1174             last_mrf_write[reg] = n;
1175          }
1176       } else if (inst->dst.file == FIXED_GRF) {
1177          if (post_reg_alloc) {
1178             for (unsigned r = 0; r < regs_written(inst); r++)
1179                last_grf_write[inst->dst.nr + r] = n;
1180          } else {
1181             last_fixed_grf_write = n;
1182          }
1183       } else if (inst->dst.is_accumulator()) {
1184          add_dep(last_accumulator_write, n);
1185          last_accumulator_write = n;
1186       } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1187          add_barrier_deps(n);
1188       }
1189 
1190       if (inst->mlen > 0 && inst->base_mrf != -1) {
1191          for (unsigned i = 0; i < inst->implied_mrf_writes(); i++) {
1192             add_dep(last_mrf_write[inst->base_mrf + i], n);
1193             last_mrf_write[inst->base_mrf + i] = n;
1194          }
1195       }
1196 
1197       if (const unsigned mask = inst->flags_written()) {
1198          assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1199 
1200          for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1201             if (mask & (1 << i)) {
1202                add_dep(last_conditional_mod[i], n, 0);
1203                last_conditional_mod[i] = n;
1204             }
1205          }
1206       }
1207 
1208       if (inst->writes_accumulator_implicitly(v->devinfo) &&
1209           !inst->dst.is_accumulator()) {
1210          add_dep(last_accumulator_write, n);
1211          last_accumulator_write = n;
1212       }
1213    }
1214 
1215    /* bottom-to-top dependencies: WAR */
1216    memset(last_grf_write, 0, sizeof(schedule_node *) * grf_count * 16);
1217    memset(last_mrf_write, 0, sizeof(last_mrf_write));
1218    memset(last_conditional_mod, 0, sizeof(last_conditional_mod));
1219    last_accumulator_write = NULL;
1220    last_fixed_grf_write = NULL;
1221 
1222    foreach_in_list_reverse_safe(schedule_node, n, &instructions) {
1223       fs_inst *inst = (fs_inst *)n->inst;
1224 
1225       /* write-after-read deps. */
1226       for (int i = 0; i < inst->sources; i++) {
1227          if (inst->src[i].file == VGRF) {
1228             if (post_reg_alloc) {
1229                for (unsigned r = 0; r < regs_read(inst, i); r++)
1230                   add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1231             } else {
1232                for (unsigned r = 0; r < regs_read(inst, i); r++) {
1233                   add_dep(n, last_grf_write[inst->src[i].nr * 16 +
1234                                             inst->src[i].offset / REG_SIZE + r], 0);
1235                }
1236             }
1237          } else if (inst->src[i].file == FIXED_GRF) {
1238             if (post_reg_alloc) {
1239                for (unsigned r = 0; r < regs_read(inst, i); r++)
1240                   add_dep(n, last_grf_write[inst->src[i].nr + r], 0);
1241             } else {
1242                add_dep(n, last_fixed_grf_write, 0);
1243             }
1244          } else if (inst->src[i].is_accumulator()) {
1245             add_dep(n, last_accumulator_write, 0);
1246          } else if (inst->src[i].file == ARF) {
1247             add_barrier_deps(n);
1248          }
1249       }
1250 
1251       if (inst->base_mrf != -1) {
1252          for (int i = 0; i < inst->mlen; i++) {
1253             /* It looks like the MRF regs are released in the send
1254              * instruction once it's sent, not when the result comes
1255              * back.
1256              */
1257             add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1258          }
1259       }
1260 
1261       if (const unsigned mask = inst->flags_read(v->devinfo)) {
1262          assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1263 
1264          for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1265             if (mask & (1 << i))
1266                add_dep(n, last_conditional_mod[i]);
1267          }
1268       }
1269 
1270       if (inst->reads_accumulator_implicitly()) {
1271          add_dep(n, last_accumulator_write);
1272       }
1273 
1274       /* Update the things this instruction wrote, so earlier reads
1275        * can mark this as WAR dependency.
1276        */
1277       if (inst->dst.file == VGRF) {
1278          if (post_reg_alloc) {
1279             for (unsigned r = 0; r < regs_written(inst); r++)
1280                last_grf_write[inst->dst.nr + r] = n;
1281          } else {
1282             for (unsigned r = 0; r < regs_written(inst); r++) {
1283                last_grf_write[inst->dst.nr * 16 +
1284                               inst->dst.offset / REG_SIZE + r] = n;
1285             }
1286          }
1287       } else if (inst->dst.file == MRF) {
1288          int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
1289 
1290          last_mrf_write[reg] = n;
1291 
1292          if (is_compressed(inst)) {
1293             if (inst->dst.nr & BRW_MRF_COMPR4)
1294                reg += 4;
1295             else
1296                reg++;
1297 
1298             last_mrf_write[reg] = n;
1299          }
1300       } else if (inst->dst.file == FIXED_GRF) {
1301          if (post_reg_alloc) {
1302             for (unsigned r = 0; r < regs_written(inst); r++)
1303                last_grf_write[inst->dst.nr + r] = n;
1304          } else {
1305             last_fixed_grf_write = n;
1306          }
1307       } else if (inst->dst.is_accumulator()) {
1308          last_accumulator_write = n;
1309       } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1310          add_barrier_deps(n);
1311       }
1312 
1313       if (inst->mlen > 0 && inst->base_mrf != -1) {
1314          for (unsigned i = 0; i < inst->implied_mrf_writes(); i++) {
1315             last_mrf_write[inst->base_mrf + i] = n;
1316          }
1317       }
1318 
1319       if (const unsigned mask = inst->flags_written()) {
1320          assert(mask < (1 << ARRAY_SIZE(last_conditional_mod)));
1321 
1322          for (unsigned i = 0; i < ARRAY_SIZE(last_conditional_mod); i++) {
1323             if (mask & (1 << i))
1324                last_conditional_mod[i] = n;
1325          }
1326       }
1327 
1328       if (inst->writes_accumulator_implicitly(v->devinfo)) {
1329          last_accumulator_write = n;
1330       }
1331    }
1332 
1333    free(last_grf_write);
1334 }
1335 
1336 void
calculate_deps()1337 vec4_instruction_scheduler::calculate_deps()
1338 {
1339    schedule_node *last_grf_write[grf_count];
1340    schedule_node *last_mrf_write[BRW_MAX_MRF(v->devinfo->gen)];
1341    schedule_node *last_conditional_mod = NULL;
1342    schedule_node *last_accumulator_write = NULL;
1343    /* Fixed HW registers are assumed to be separate from the virtual
1344     * GRFs, so they can be tracked separately.  We don't really write
1345     * to fixed GRFs much, so don't bother tracking them on a more
1346     * granular level.
1347     */
1348    schedule_node *last_fixed_grf_write = NULL;
1349 
1350    memset(last_grf_write, 0, sizeof(last_grf_write));
1351    memset(last_mrf_write, 0, sizeof(last_mrf_write));
1352 
1353    /* top-to-bottom dependencies: RAW and WAW. */
1354    foreach_in_list(schedule_node, n, &instructions) {
1355       vec4_instruction *inst = (vec4_instruction *)n->inst;
1356 
1357       if (is_scheduling_barrier(inst))
1358          add_barrier_deps(n);
1359 
1360       /* read-after-write deps. */
1361       for (int i = 0; i < 3; i++) {
1362          if (inst->src[i].file == VGRF) {
1363             for (unsigned j = 0; j < regs_read(inst, i); ++j)
1364                add_dep(last_grf_write[inst->src[i].nr + j], n);
1365          } else if (inst->src[i].file == FIXED_GRF) {
1366             add_dep(last_fixed_grf_write, n);
1367          } else if (inst->src[i].is_accumulator()) {
1368             assert(last_accumulator_write);
1369             add_dep(last_accumulator_write, n);
1370          } else if (inst->src[i].file == ARF) {
1371             add_barrier_deps(n);
1372          }
1373       }
1374 
1375       if (inst->reads_g0_implicitly())
1376          add_dep(last_fixed_grf_write, n);
1377 
1378       if (!inst->is_send_from_grf()) {
1379          for (int i = 0; i < inst->mlen; i++) {
1380             /* It looks like the MRF regs are released in the send
1381              * instruction once it's sent, not when the result comes
1382              * back.
1383              */
1384             add_dep(last_mrf_write[inst->base_mrf + i], n);
1385          }
1386       }
1387 
1388       if (inst->reads_flag()) {
1389          assert(last_conditional_mod);
1390          add_dep(last_conditional_mod, n);
1391       }
1392 
1393       if (inst->reads_accumulator_implicitly()) {
1394          assert(last_accumulator_write);
1395          add_dep(last_accumulator_write, n);
1396       }
1397 
1398       /* write-after-write deps. */
1399       if (inst->dst.file == VGRF) {
1400          for (unsigned j = 0; j < regs_written(inst); ++j) {
1401             add_dep(last_grf_write[inst->dst.nr + j], n);
1402             last_grf_write[inst->dst.nr + j] = n;
1403          }
1404       } else if (inst->dst.file == MRF) {
1405          add_dep(last_mrf_write[inst->dst.nr], n);
1406          last_mrf_write[inst->dst.nr] = n;
1407      } else if (inst->dst.file == FIXED_GRF) {
1408          last_fixed_grf_write = n;
1409       } else if (inst->dst.is_accumulator()) {
1410          add_dep(last_accumulator_write, n);
1411          last_accumulator_write = n;
1412       } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1413          add_barrier_deps(n);
1414       }
1415 
1416       if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1417          for (unsigned i = 0; i < inst->implied_mrf_writes(); i++) {
1418             add_dep(last_mrf_write[inst->base_mrf + i], n);
1419             last_mrf_write[inst->base_mrf + i] = n;
1420          }
1421       }
1422 
1423       if (inst->writes_flag()) {
1424          add_dep(last_conditional_mod, n, 0);
1425          last_conditional_mod = n;
1426       }
1427 
1428       if (inst->writes_accumulator_implicitly(v->devinfo) &&
1429           !inst->dst.is_accumulator()) {
1430          add_dep(last_accumulator_write, n);
1431          last_accumulator_write = n;
1432       }
1433    }
1434 
1435    /* bottom-to-top dependencies: WAR */
1436    memset(last_grf_write, 0, sizeof(last_grf_write));
1437    memset(last_mrf_write, 0, sizeof(last_mrf_write));
1438    last_conditional_mod = NULL;
1439    last_accumulator_write = NULL;
1440    last_fixed_grf_write = NULL;
1441 
1442    foreach_in_list_reverse_safe(schedule_node, n, &instructions) {
1443       vec4_instruction *inst = (vec4_instruction *)n->inst;
1444 
1445       /* write-after-read deps. */
1446       for (int i = 0; i < 3; i++) {
1447          if (inst->src[i].file == VGRF) {
1448             for (unsigned j = 0; j < regs_read(inst, i); ++j)
1449                add_dep(n, last_grf_write[inst->src[i].nr + j]);
1450          } else if (inst->src[i].file == FIXED_GRF) {
1451             add_dep(n, last_fixed_grf_write);
1452          } else if (inst->src[i].is_accumulator()) {
1453             add_dep(n, last_accumulator_write);
1454          } else if (inst->src[i].file == ARF) {
1455             add_barrier_deps(n);
1456          }
1457       }
1458 
1459       if (!inst->is_send_from_grf()) {
1460          for (int i = 0; i < inst->mlen; i++) {
1461             /* It looks like the MRF regs are released in the send
1462              * instruction once it's sent, not when the result comes
1463              * back.
1464              */
1465             add_dep(n, last_mrf_write[inst->base_mrf + i], 2);
1466          }
1467       }
1468 
1469       if (inst->reads_flag()) {
1470          add_dep(n, last_conditional_mod);
1471       }
1472 
1473       if (inst->reads_accumulator_implicitly()) {
1474          add_dep(n, last_accumulator_write);
1475       }
1476 
1477       /* Update the things this instruction wrote, so earlier reads
1478        * can mark this as WAR dependency.
1479        */
1480       if (inst->dst.file == VGRF) {
1481          for (unsigned j = 0; j < regs_written(inst); ++j)
1482             last_grf_write[inst->dst.nr + j] = n;
1483       } else if (inst->dst.file == MRF) {
1484          last_mrf_write[inst->dst.nr] = n;
1485       } else if (inst->dst.file == FIXED_GRF) {
1486          last_fixed_grf_write = n;
1487       } else if (inst->dst.is_accumulator()) {
1488          last_accumulator_write = n;
1489       } else if (inst->dst.file == ARF && !inst->dst.is_null()) {
1490          add_barrier_deps(n);
1491       }
1492 
1493       if (inst->mlen > 0 && !inst->is_send_from_grf()) {
1494          for (unsigned i = 0; i < inst->implied_mrf_writes(); i++) {
1495             last_mrf_write[inst->base_mrf + i] = n;
1496          }
1497       }
1498 
1499       if (inst->writes_flag()) {
1500          last_conditional_mod = n;
1501       }
1502 
1503       if (inst->writes_accumulator_implicitly(v->devinfo)) {
1504          last_accumulator_write = n;
1505       }
1506    }
1507 }
1508 
1509 schedule_node *
choose_instruction_to_schedule()1510 fs_instruction_scheduler::choose_instruction_to_schedule()
1511 {
1512    schedule_node *chosen = NULL;
1513 
1514    if (mode == SCHEDULE_PRE || mode == SCHEDULE_POST) {
1515       int chosen_time = 0;
1516 
1517       /* Of the instructions ready to execute or the closest to being ready,
1518        * choose the one most likely to unblock an early program exit, or
1519        * otherwise the oldest one.
1520        */
1521       foreach_in_list(schedule_node, n, &instructions) {
1522          if (!chosen ||
1523              exit_unblocked_time(n) < exit_unblocked_time(chosen) ||
1524              (exit_unblocked_time(n) == exit_unblocked_time(chosen) &&
1525               n->unblocked_time < chosen_time)) {
1526             chosen = n;
1527             chosen_time = n->unblocked_time;
1528          }
1529       }
1530    } else {
1531       /* Before register allocation, we don't care about the latencies of
1532        * instructions.  All we care about is reducing live intervals of
1533        * variables so that we can avoid register spilling, or get SIMD16
1534        * shaders which naturally do a better job of hiding instruction
1535        * latency.
1536        */
1537       foreach_in_list(schedule_node, n, &instructions) {
1538          fs_inst *inst = (fs_inst *)n->inst;
1539 
1540          if (!chosen) {
1541             chosen = n;
1542             continue;
1543          }
1544 
1545          /* Most important: If we can definitely reduce register pressure, do
1546           * so immediately.
1547           */
1548          int register_pressure_benefit = get_register_pressure_benefit(n->inst);
1549          int chosen_register_pressure_benefit =
1550             get_register_pressure_benefit(chosen->inst);
1551 
1552          if (register_pressure_benefit > 0 &&
1553              register_pressure_benefit > chosen_register_pressure_benefit) {
1554             chosen = n;
1555             continue;
1556          } else if (chosen_register_pressure_benefit > 0 &&
1557                     (register_pressure_benefit <
1558                      chosen_register_pressure_benefit)) {
1559             continue;
1560          }
1561 
1562          if (mode == SCHEDULE_PRE_LIFO) {
1563             /* Prefer instructions that recently became available for
1564              * scheduling.  These are the things that are most likely to
1565              * (eventually) make a variable dead and reduce register pressure.
1566              * Typical register pressure estimates don't work for us because
1567              * most of our pressure comes from texturing, where no single
1568              * instruction to schedule will make a vec4 value dead.
1569              */
1570             if (n->cand_generation > chosen->cand_generation) {
1571                chosen = n;
1572                continue;
1573             } else if (n->cand_generation < chosen->cand_generation) {
1574                continue;
1575             }
1576 
1577             /* On MRF-using chips, prefer non-SEND instructions.  If we don't
1578              * do this, then because we prefer instructions that just became
1579              * candidates, we'll end up in a pattern of scheduling a SEND,
1580              * then the MRFs for the next SEND, then the next SEND, then the
1581              * MRFs, etc., without ever consuming the results of a send.
1582              */
1583             if (v->devinfo->gen < 7) {
1584                fs_inst *chosen_inst = (fs_inst *)chosen->inst;
1585 
1586                /* We use size_written > 4 * exec_size as our test for the kind
1587                 * of send instruction to avoid -- only sends generate many
1588                 * regs, and a single-result send is probably actually reducing
1589                 * register pressure.
1590                 */
1591                if (inst->size_written <= 4 * inst->exec_size &&
1592                    chosen_inst->size_written > 4 * chosen_inst->exec_size) {
1593                   chosen = n;
1594                   continue;
1595                } else if (inst->size_written > chosen_inst->size_written) {
1596                   continue;
1597                }
1598             }
1599          }
1600 
1601          /* For instructions pushed on the cands list at the same time, prefer
1602           * the one with the highest delay to the end of the program.  This is
1603           * most likely to have its values able to be consumed first (such as
1604           * for a large tree of lowered ubo loads, which appear reversed in
1605           * the instruction stream with respect to when they can be consumed).
1606           */
1607          if (n->delay > chosen->delay) {
1608             chosen = n;
1609             continue;
1610          } else if (n->delay < chosen->delay) {
1611             continue;
1612          }
1613 
1614          /* Prefer the node most likely to unblock an early program exit.
1615           */
1616          if (exit_unblocked_time(n) < exit_unblocked_time(chosen)) {
1617             chosen = n;
1618             continue;
1619          } else if (exit_unblocked_time(n) > exit_unblocked_time(chosen)) {
1620             continue;
1621          }
1622 
1623          /* If all other metrics are equal, we prefer the first instruction in
1624           * the list (program execution).
1625           */
1626       }
1627    }
1628 
1629    return chosen;
1630 }
1631 
1632 schedule_node *
choose_instruction_to_schedule()1633 vec4_instruction_scheduler::choose_instruction_to_schedule()
1634 {
1635    schedule_node *chosen = NULL;
1636    int chosen_time = 0;
1637 
1638    /* Of the instructions ready to execute or the closest to being ready,
1639     * choose the oldest one.
1640     */
1641    foreach_in_list(schedule_node, n, &instructions) {
1642       if (!chosen || n->unblocked_time < chosen_time) {
1643          chosen = n;
1644          chosen_time = n->unblocked_time;
1645       }
1646    }
1647 
1648    return chosen;
1649 }
1650 
1651 int
issue_time(backend_instruction * inst0)1652 fs_instruction_scheduler::issue_time(backend_instruction *inst0)
1653 {
1654    const fs_inst *inst = static_cast<fs_inst *>(inst0);
1655    const unsigned overhead = v->grf_used && has_bank_conflict(v->devinfo, inst) ?
1656       DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE) : 0;
1657    if (is_compressed(inst))
1658       return 4 + overhead;
1659    else
1660       return 2 + overhead;
1661 }
1662 
1663 int
issue_time(backend_instruction *)1664 vec4_instruction_scheduler::issue_time(backend_instruction *)
1665 {
1666    /* We always execute as two vec4s in parallel. */
1667    return 2;
1668 }
1669 
1670 void
schedule_instructions(bblock_t * block)1671 instruction_scheduler::schedule_instructions(bblock_t *block)
1672 {
1673    const struct gen_device_info *devinfo = bs->devinfo;
1674    int time = 0;
1675    int instructions_to_schedule = block->end_ip - block->start_ip + 1;
1676 
1677    if (!post_reg_alloc)
1678       reg_pressure = reg_pressure_in[block->num];
1679    block_idx = block->num;
1680 
1681    /* Remove non-DAG heads from the list. */
1682    foreach_in_list_safe(schedule_node, n, &instructions) {
1683       if (n->parent_count != 0)
1684          n->remove();
1685    }
1686 
1687    unsigned cand_generation = 1;
1688    while (!instructions.is_empty()) {
1689       schedule_node *chosen = choose_instruction_to_schedule();
1690 
1691       /* Schedule this instruction. */
1692       assert(chosen);
1693       chosen->remove();
1694       chosen->inst->exec_node::remove();
1695       block->instructions.push_tail(chosen->inst);
1696       instructions_to_schedule--;
1697 
1698       if (!post_reg_alloc) {
1699          reg_pressure -= get_register_pressure_benefit(chosen->inst);
1700          update_register_pressure(chosen->inst);
1701       }
1702 
1703       /* If we expected a delay for scheduling, then bump the clock to reflect
1704        * that.  In reality, the hardware will switch to another hyperthread
1705        * and may not return to dispatching our thread for a while even after
1706        * we're unblocked.  After this, we have the time when the chosen
1707        * instruction will start executing.
1708        */
1709       time = MAX2(time, chosen->unblocked_time);
1710 
1711       /* Update the clock for how soon an instruction could start after the
1712        * chosen one.
1713        */
1714       time += issue_time(chosen->inst);
1715 
1716       if (debug) {
1717          fprintf(stderr, "clock %4d, scheduled: ", time);
1718          bs->dump_instruction(chosen->inst);
1719          if (!post_reg_alloc)
1720             fprintf(stderr, "(register pressure %d)\n", reg_pressure);
1721       }
1722 
1723       /* Now that we've scheduled a new instruction, some of its
1724        * children can be promoted to the list of instructions ready to
1725        * be scheduled.  Update the children's unblocked time for this
1726        * DAG edge as we do so.
1727        */
1728       for (int i = chosen->child_count - 1; i >= 0; i--) {
1729          schedule_node *child = chosen->children[i];
1730 
1731          child->unblocked_time = MAX2(child->unblocked_time,
1732                                       time + chosen->child_latency[i]);
1733 
1734          if (debug) {
1735             fprintf(stderr, "\tchild %d, %d parents: ", i, child->parent_count);
1736             bs->dump_instruction(child->inst);
1737          }
1738 
1739          child->cand_generation = cand_generation;
1740          child->parent_count--;
1741          if (child->parent_count == 0) {
1742             if (debug) {
1743                fprintf(stderr, "\t\tnow available\n");
1744             }
1745             instructions.push_head(child);
1746          }
1747       }
1748       cand_generation++;
1749 
1750       /* Shared resource: the mathbox.  There's one mathbox per EU on Gen6+
1751        * but it's more limited pre-gen6, so if we send something off to it then
1752        * the next math instruction isn't going to make progress until the first
1753        * is done.
1754        */
1755       if (devinfo->gen < 6 && chosen->inst->is_math()) {
1756          foreach_in_list(schedule_node, n, &instructions) {
1757             if (n->inst->is_math())
1758                n->unblocked_time = MAX2(n->unblocked_time,
1759                                         time + chosen->latency);
1760          }
1761       }
1762    }
1763 
1764    assert(instructions_to_schedule == 0);
1765 }
1766 
1767 void
run(cfg_t * cfg)1768 instruction_scheduler::run(cfg_t *cfg)
1769 {
1770    if (debug && !post_reg_alloc) {
1771       fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n",
1772               post_reg_alloc);
1773          bs->dump_instructions();
1774    }
1775 
1776    if (!post_reg_alloc)
1777       setup_liveness(cfg);
1778 
1779    foreach_block(block, cfg) {
1780       if (reads_remaining) {
1781          memset(reads_remaining, 0,
1782                 grf_count * sizeof(*reads_remaining));
1783          memset(hw_reads_remaining, 0,
1784                 hw_reg_count * sizeof(*hw_reads_remaining));
1785          memset(written, 0, grf_count * sizeof(*written));
1786 
1787          foreach_inst_in_block(fs_inst, inst, block)
1788             count_reads_remaining(inst);
1789       }
1790 
1791       add_insts_from_block(block);
1792 
1793       calculate_deps();
1794 
1795       compute_delays();
1796       compute_exits();
1797 
1798       schedule_instructions(block);
1799    }
1800 
1801    if (debug && !post_reg_alloc) {
1802       fprintf(stderr, "\nInstructions after scheduling (reg_alloc %d)\n",
1803               post_reg_alloc);
1804       bs->dump_instructions();
1805    }
1806 }
1807 
1808 void
schedule_instructions(instruction_scheduler_mode mode)1809 fs_visitor::schedule_instructions(instruction_scheduler_mode mode)
1810 {
1811    int grf_count;
1812    if (mode == SCHEDULE_POST)
1813       grf_count = grf_used;
1814    else
1815       grf_count = alloc.count;
1816 
1817    fs_instruction_scheduler sched(this, grf_count, first_non_payload_grf,
1818                                   cfg->num_blocks, mode);
1819    sched.run(cfg);
1820 
1821    invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
1822 }
1823 
1824 void
opt_schedule_instructions()1825 vec4_visitor::opt_schedule_instructions()
1826 {
1827    vec4_instruction_scheduler sched(this, prog_data->total_grf);
1828    sched.run(cfg);
1829 
1830    invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
1831 }
1832