1 /*
2  * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #ifndef _MDG_COMPILER_H
25 #define _MDG_COMPILER_H
26 
27 #include "midgard.h"
28 #include "helpers.h"
29 #include "midgard_compile.h"
30 #include "midgard_ops.h"
31 
32 #include "util/hash_table.h"
33 #include "util/u_dynarray.h"
34 #include "util/set.h"
35 #include "util/list.h"
36 
37 #include "main/mtypes.h"
38 #include "compiler/nir_types.h"
39 #include "compiler/nir/nir.h"
40 #include "panfrost/util/pan_ir.h"
41 #include "panfrost/util/lcra.h"
42 
43 /* Forward declare */
44 struct midgard_block;
45 
46 /* Target types. Defaults to TARGET_GOTO (the type corresponding directly to
47  * the hardware), hence why that must be zero. TARGET_DISCARD signals this
48  * instruction is actually a discard op. */
49 
50 #define TARGET_GOTO 0
51 #define TARGET_BREAK 1
52 #define TARGET_CONTINUE 2
53 #define TARGET_DISCARD 3
54 #define TARGET_TILEBUF_WAIT 4
55 
56 typedef struct midgard_branch {
57         /* If conditional, the condition is specified in r31.w */
58         bool conditional;
59 
60         /* For conditionals, if this is true, we branch on FALSE. If false, we  branch on TRUE. */
61         bool invert_conditional;
62 
63         /* Branch targets: the start of a block, the start of a loop (continue), the end of a loop (break). Value is one of TARGET_ */
64         unsigned target_type;
65 
66         /* The actual target */
67         union {
68                 int target_block;
69                 int target_break;
70                 int target_continue;
71         };
72 } midgard_branch;
73 
74 #define PAN_WRITEOUT_C 1
75 #define PAN_WRITEOUT_Z 2
76 #define PAN_WRITEOUT_S 4
77 
78 /* Generic in-memory data type repesenting a single logical instruction, rather
79  * than a single instruction group. This is the preferred form for code gen.
80  * Multiple midgard_insturctions will later be combined during scheduling,
81  * though this is not represented in this structure.  Its format bridges
82  * the low-level binary representation with the higher level semantic meaning.
83  *
84  * Notably, it allows registers to be specified as block local SSA, for code
85  * emitted before the register allocation pass.
86  */
87 
88 #define MIR_SRC_COUNT 4
89 #define MIR_VEC_COMPONENTS 16
90 
91 typedef struct midgard_instruction {
92         /* Must be first for casting */
93         struct list_head link;
94 
95         unsigned type; /* ALU, load/store, texture */
96 
97         /* Instruction arguments represented as block-local SSA
98          * indices, rather than registers. ~0 means unused. */
99         unsigned src[MIR_SRC_COUNT];
100         unsigned dest;
101 
102         /* vec16 swizzle, unpacked, per source */
103         unsigned swizzle[MIR_SRC_COUNT][MIR_VEC_COMPONENTS];
104 
105         /* Types! */
106         nir_alu_type src_types[MIR_SRC_COUNT];
107         nir_alu_type dest_type;
108 
109         /* Packing ops have non-32-bit dest types even though they functionally
110          * work at the 32-bit level, use this as a signal to disable copyprop.
111          * We maybe need synthetic pack ops instead. */
112         bool is_pack;
113 
114         /* Modifiers, depending on type */
115         union {
116                 struct {
117                         bool src_abs[MIR_SRC_COUNT];
118                         bool src_neg[MIR_SRC_COUNT];
119                 };
120 
121                 struct {
122                         bool src_shift[MIR_SRC_COUNT];
123                 };
124         };
125 
126         /* Out of the union for csel (could maybe be fixed..) */
127         bool src_invert[MIR_SRC_COUNT];
128 
129         /* If the op supports it */
130         enum midgard_roundmode roundmode;
131 
132         /* For textures: should helpers execute this instruction (instead of
133          * just helping with derivatives)? Should helpers terminate after? */
134         bool helper_terminate;
135         bool helper_execute;
136 
137         /* I.e. (1 << alu_bit) */
138         int unit;
139 
140         bool has_constants;
141         midgard_constants constants;
142         uint16_t inline_constant;
143         bool has_blend_constant;
144         bool has_inline_constant;
145 
146         bool compact_branch;
147         uint8_t writeout;
148         bool last_writeout;
149 
150         /* Masks in a saneish format. One bit per channel, not packed fancy.
151          * Use this instead of the op specific ones, and switch over at emit
152          * time */
153 
154         uint16_t mask;
155 
156         /* Hint for the register allocator not to spill the destination written
157          * from this instruction (because it is a spill/unspill node itself).
158          * Bitmask of spilled classes */
159 
160         unsigned no_spill;
161 
162         /* Generic hint for intra-pass use */
163         bool hint;
164 
165         /* During scheduling, the backwards dependency graph
166          * (DAG). nr_dependencies is the number of unscheduled
167          * instructions that must still be scheduled after
168          * (before) this instruction. dependents are which
169          * instructions need to be scheduled before (after) this
170          * instruction. */
171 
172         unsigned nr_dependencies;
173         BITSET_WORD *dependents;
174 
175         /* Use this in conjunction with `type` */
176         unsigned op;
177 
178         /* This refers to midgard_outmod_float or midgard_outmod_int.
179          * In case of a ALU op, use midgard_is_integer_out_op() to know which
180          * one is used.
181          * If it's a texture op, it's always midgard_outmod_float. */
182         unsigned outmod;
183 
184         union {
185                 midgard_load_store_word load_store;
186                 midgard_texture_word texture;
187 
188                 midgard_branch branch;
189         };
190 } midgard_instruction;
191 
192 typedef struct midgard_block {
193         pan_block base;
194 
195         bool scheduled;
196 
197         /* List of midgard_bundles emitted (after the scheduler has run) */
198         struct util_dynarray bundles;
199 
200         /* Number of quadwords _actually_ emitted, as determined after scheduling */
201         unsigned quadword_count;
202 
203         /* Indicates this is a fixed-function fragment epilogue block */
204         bool epilogue;
205 
206         /* Are helper invocations required by this block? */
207         bool helpers_in;
208 } midgard_block;
209 
210 typedef struct midgard_bundle {
211         /* Tag for the overall bundle */
212         int tag;
213 
214         /* Instructions contained by the bundle. instruction_count <= 6 (vmul,
215          * sadd, vadd, smul, vlut, branch) */
216         int instruction_count;
217         midgard_instruction *instructions[6];
218 
219         /* Bundle-wide ALU configuration */
220         int padding;
221         int control;
222         bool has_embedded_constants;
223         midgard_constants constants;
224         bool has_blend_constant;
225         bool last_writeout;
226 } midgard_bundle;
227 
228 enum midgard_rt_id {
229         MIDGARD_COLOR_RT0 = 0,
230         MIDGARD_COLOR_RT1,
231         MIDGARD_COLOR_RT2,
232         MIDGARD_COLOR_RT3,
233         MIDGARD_COLOR_RT4,
234         MIDGARD_COLOR_RT5,
235         MIDGARD_COLOR_RT6,
236         MIDGARD_COLOR_RT7,
237         MIDGARD_ZS_RT,
238         MIDGARD_NUM_RTS,
239 };
240 
241 typedef struct compiler_context {
242         nir_shader *nir;
243         gl_shader_stage stage;
244 
245         /* Is internally a blend shader? Depends on stage == FRAGMENT */
246         bool is_blend;
247 
248         /* Render target number for a keyed blend shader. Depends on is_blend */
249         unsigned blend_rt;
250 
251         /* Index to precolour to r0 for an input blend colour */
252         unsigned blend_input;
253 
254         /* Index to precolour to r2 for a dual-source blend colour */
255         unsigned blend_src1;
256 
257         /* Tracking for blend constant patching */
258         int blend_constant_offset;
259 
260         /* Number of bytes used for Thread Local Storage */
261         unsigned tls_size;
262 
263         /* Count of spills and fills for shaderdb */
264         unsigned spills;
265         unsigned fills;
266 
267         /* Current NIR function */
268         nir_function *func;
269 
270         /* Allocated compiler temporary counter */
271         unsigned temp_alloc;
272 
273         /* Unordered list of midgard_blocks */
274         int block_count;
275         struct list_head blocks;
276 
277         /* TODO merge with block_count? */
278         unsigned block_source_count;
279 
280         /* List of midgard_instructions emitted for the current block */
281         midgard_block *current_block;
282 
283         /* If there is a preset after block, use this, otherwise emit_block will create one if NULL */
284         midgard_block *after_block;
285 
286         /* The current "depth" of the loop, for disambiguating breaks/continues
287          * when using nested loops */
288         int current_loop_depth;
289 
290         /* Total number of loops for shader-db */
291         unsigned loop_count;
292 
293         /* Constants which have been loaded, for later inlining */
294         struct hash_table_u64 *ssa_constants;
295 
296         int temp_count;
297         int max_hash;
298 
299         /* Set of NIR indices that were already emitted as outmods */
300         BITSET_WORD *already_emitted;
301 
302         /* Just the count of the max register used. Higher count => higher
303          * register pressure */
304         int work_registers;
305 
306         /* The number of uniforms allowable for the fast path */
307         int uniform_cutoff;
308 
309         /* Count of instructions emitted from NIR overall, across all blocks */
310         int instruction_count;
311 
312         /* Alpha ref value passed in */
313         float alpha_ref;
314 
315         unsigned quadword_count;
316 
317         /* Bitmask of valid metadata */
318         unsigned metadata;
319 
320         /* Model-specific quirk set */
321         uint32_t quirks;
322 
323         /* Writeout instructions for each render target */
324         midgard_instruction *writeout_branch[MIDGARD_NUM_RTS];
325 
326         struct panfrost_sysvals sysvals;
327 } compiler_context;
328 
329 /* Per-block live_in/live_out */
330 #define MIDGARD_METADATA_LIVENESS (1 << 0)
331 
332 /* Helpers for manipulating the above structures (forming the driver IR) */
333 
334 /* Append instruction to end of current block */
335 
336 static inline midgard_instruction *
mir_upload_ins(struct compiler_context * ctx,struct midgard_instruction ins)337 mir_upload_ins(struct compiler_context *ctx, struct midgard_instruction ins)
338 {
339         midgard_instruction *heap = ralloc(ctx, struct midgard_instruction);
340         memcpy(heap, &ins, sizeof(ins));
341         return heap;
342 }
343 
344 static inline midgard_instruction *
emit_mir_instruction(struct compiler_context * ctx,struct midgard_instruction ins)345 emit_mir_instruction(struct compiler_context *ctx, struct midgard_instruction ins)
346 {
347         midgard_instruction *u = mir_upload_ins(ctx, ins);
348         list_addtail(&u->link, &ctx->current_block->base.instructions);
349         return u;
350 }
351 
352 static inline struct midgard_instruction *
mir_insert_instruction_before(struct compiler_context * ctx,struct midgard_instruction * tag,struct midgard_instruction ins)353 mir_insert_instruction_before(struct compiler_context *ctx,
354                               struct midgard_instruction *tag,
355                               struct midgard_instruction ins)
356 {
357         struct midgard_instruction *u = mir_upload_ins(ctx, ins);
358         list_addtail(&u->link, &tag->link);
359         return u;
360 }
361 
362 static inline void
mir_remove_instruction(struct midgard_instruction * ins)363 mir_remove_instruction(struct midgard_instruction *ins)
364 {
365         list_del(&ins->link);
366 }
367 
368 static inline midgard_instruction*
mir_prev_op(struct midgard_instruction * ins)369 mir_prev_op(struct midgard_instruction *ins)
370 {
371         return list_last_entry(&(ins->link), midgard_instruction, link);
372 }
373 
374 static inline midgard_instruction*
mir_next_op(struct midgard_instruction * ins)375 mir_next_op(struct midgard_instruction *ins)
376 {
377         return list_first_entry(&(ins->link), midgard_instruction, link);
378 }
379 
380 #define mir_foreach_block(ctx, v) \
381         list_for_each_entry(pan_block, v, &ctx->blocks, link)
382 
383 #define mir_foreach_block_from(ctx, from, v) \
384         list_for_each_entry_from(pan_block, v, &from->base, &ctx->blocks, link)
385 
386 #define mir_foreach_instr_in_block(block, v) \
387         list_for_each_entry(struct midgard_instruction, v, &block->base.instructions, link)
388 #define mir_foreach_instr_in_block_rev(block, v) \
389         list_for_each_entry_rev(struct midgard_instruction, v, &block->base.instructions, link)
390 
391 #define mir_foreach_instr_in_block_safe(block, v) \
392         list_for_each_entry_safe(struct midgard_instruction, v, &block->base.instructions, link)
393 
394 #define mir_foreach_instr_in_block_safe_rev(block, v) \
395         list_for_each_entry_safe_rev(struct midgard_instruction, v, &block->base.instructions, link)
396 
397 #define mir_foreach_instr_in_block_from(block, v, from) \
398         list_for_each_entry_from(struct midgard_instruction, v, from, &block->base.instructions, link)
399 
400 #define mir_foreach_instr_in_block_from_rev(block, v, from) \
401         list_for_each_entry_from_rev(struct midgard_instruction, v, from, &block->base.instructions, link)
402 
403 #define mir_foreach_bundle_in_block(block, v) \
404         util_dynarray_foreach(&block->bundles, midgard_bundle, v)
405 
406 #define mir_foreach_bundle_in_block_rev(block, v) \
407         util_dynarray_foreach_reverse(&block->bundles, midgard_bundle, v)
408 
409 #define mir_foreach_instr_in_block_scheduled_rev(block, v) \
410         midgard_instruction* v; \
411         signed i = 0; \
412         mir_foreach_bundle_in_block_rev(block, _bundle) \
413                 for (i = (_bundle->instruction_count - 1), v = _bundle->instructions[i]; \
414                                 i >= 0; \
415                                 --i, v = (i >= 0) ? _bundle->instructions[i] : NULL) \
416 
417 #define mir_foreach_instr_global(ctx, v) \
418         mir_foreach_block(ctx, v_block) \
419                 mir_foreach_instr_in_block(((midgard_block *) v_block), v)
420 
421 #define mir_foreach_instr_global_safe(ctx, v) \
422         mir_foreach_block(ctx, v_block) \
423                 mir_foreach_instr_in_block_safe(((midgard_block *) v_block), v)
424 
425 /* Based on set_foreach, expanded with automatic type casts */
426 
427 #define mir_foreach_predecessor(blk, v) \
428         struct set_entry *_entry_##v; \
429         struct midgard_block *v; \
430         for (_entry_##v = _mesa_set_next_entry(blk->base.predecessors, NULL), \
431                 v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL);  \
432                 _entry_##v != NULL; \
433                 _entry_##v = _mesa_set_next_entry(blk->base.predecessors, _entry_##v), \
434                 v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL))
435 
436 #define mir_foreach_src(ins, v) \
437         for (unsigned v = 0; v < ARRAY_SIZE(ins->src); ++v)
438 
439 static inline midgard_instruction *
mir_last_in_block(struct midgard_block * block)440 mir_last_in_block(struct midgard_block *block)
441 {
442         return list_last_entry(&block->base.instructions, struct midgard_instruction, link);
443 }
444 
445 static inline midgard_block *
mir_get_block(compiler_context * ctx,int idx)446 mir_get_block(compiler_context *ctx, int idx)
447 {
448         struct list_head *lst = &ctx->blocks;
449 
450         while ((idx--) + 1)
451                 lst = lst->next;
452 
453         return (struct midgard_block *) lst;
454 }
455 
456 static inline bool
mir_is_alu_bundle(midgard_bundle * bundle)457 mir_is_alu_bundle(midgard_bundle *bundle)
458 {
459         return IS_ALU(bundle->tag);
460 }
461 
462 static inline unsigned
make_compiler_temp(compiler_context * ctx)463 make_compiler_temp(compiler_context *ctx)
464 {
465         return (ctx->func->impl->ssa_alloc + ctx->temp_alloc++) << 1;
466 }
467 
468 static inline unsigned
make_compiler_temp_reg(compiler_context * ctx)469 make_compiler_temp_reg(compiler_context *ctx)
470 {
471         return ((ctx->func->impl->reg_alloc + ctx->temp_alloc++) << 1) | PAN_IS_REG;
472 }
473 
474 static inline unsigned
nir_ssa_index(nir_ssa_def * ssa)475 nir_ssa_index(nir_ssa_def *ssa)
476 {
477         return (ssa->index << 1) | 0;
478 }
479 
480 static inline unsigned
nir_src_index(compiler_context * ctx,nir_src * src)481 nir_src_index(compiler_context *ctx, nir_src *src)
482 {
483         if (src->is_ssa)
484                 return nir_ssa_index(src->ssa);
485         else {
486                 assert(!src->reg.indirect);
487                 return (src->reg.reg->index << 1) | PAN_IS_REG;
488         }
489 }
490 
491 static inline unsigned
nir_dest_index(nir_dest * dst)492 nir_dest_index(nir_dest *dst)
493 {
494         if (dst->is_ssa)
495                 return (dst->ssa.index << 1) | 0;
496         else {
497                 assert(!dst->reg.indirect);
498                 return (dst->reg.reg->index << 1) | PAN_IS_REG;
499         }
500 }
501 
502 
503 
504 /* MIR manipulation */
505 
506 void mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new);
507 void mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new);
508 void mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new);
509 void mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old, unsigned new);
510 void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new);
511 void mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old, unsigned new, unsigned *swizzle);
512 bool mir_single_use(compiler_context *ctx, unsigned value);
513 unsigned mir_use_count(compiler_context *ctx, unsigned value);
514 uint16_t mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node);
515 uint16_t mir_bytemask_of_read_components_index(midgard_instruction *ins, unsigned i);
516 uint16_t mir_from_bytemask(uint16_t bytemask, unsigned bits);
517 uint16_t mir_bytemask(midgard_instruction *ins);
518 uint16_t mir_round_bytemask_up(uint16_t mask, unsigned bits);
519 void mir_set_bytemask(midgard_instruction *ins, uint16_t bytemask);
520 signed mir_upper_override(midgard_instruction *ins, unsigned inst_size);
521 unsigned mir_components_for_type(nir_alu_type T);
522 unsigned max_bitsize_for_alu(midgard_instruction *ins);
523 midgard_reg_mode reg_mode_for_bitsize(unsigned bitsize);
524 
525 /* MIR printing */
526 
527 void mir_print_instruction(midgard_instruction *ins);
528 void mir_print_bundle(midgard_bundle *ctx);
529 void mir_print_block(midgard_block *block);
530 void mir_print_shader(compiler_context *ctx);
531 bool mir_nontrivial_mod(midgard_instruction *ins, unsigned i, bool check_swizzle);
532 bool mir_nontrivial_outmod(midgard_instruction *ins);
533 
534 void mir_insert_instruction_before_scheduled(compiler_context *ctx, midgard_block *block, midgard_instruction *tag, midgard_instruction ins);
535 void mir_insert_instruction_after_scheduled(compiler_context *ctx, midgard_block *block, midgard_instruction *tag, midgard_instruction ins);
536 void mir_flip(midgard_instruction *ins);
537 void mir_compute_temp_count(compiler_context *ctx);
538 
539 void mir_set_offset(compiler_context *ctx, midgard_instruction *ins, nir_src *offset, bool is_shared);
540 
541 /* 'Intrinsic' move for aliasing */
542 
543 static inline midgard_instruction
v_mov(unsigned src,unsigned dest)544 v_mov(unsigned src, unsigned dest)
545 {
546         midgard_instruction ins = {
547                 .type = TAG_ALU_4,
548                 .mask = 0xF,
549                 .src = { ~0, src, ~0, ~0 },
550                 .src_types = { 0, nir_type_uint32 },
551                 .swizzle = SWIZZLE_IDENTITY,
552                 .dest = dest,
553                 .dest_type = nir_type_uint32,
554                 .op = midgard_alu_op_imov,
555                 .outmod = midgard_outmod_int_wrap
556         };
557 
558         return ins;
559 }
560 
561 /* Broad types of register classes so we can handle special
562  * registers */
563 
564 #define REG_CLASS_WORK          0
565 #define REG_CLASS_LDST          1
566 #define REG_CLASS_TEXR          3
567 #define REG_CLASS_TEXW          4
568 
569 /* Like a move, but to thread local storage! */
570 
571 static inline midgard_instruction
v_load_store_scratch(unsigned srcdest,unsigned index,bool is_store,unsigned mask)572 v_load_store_scratch(
573                 unsigned srcdest,
574                 unsigned index,
575                 bool is_store,
576                 unsigned mask)
577 {
578         /* We index by 32-bit vec4s */
579         unsigned byte = (index * 4 * 4);
580 
581         midgard_instruction ins = {
582                 .type = TAG_LOAD_STORE_4,
583                 .mask = mask,
584                 .dest_type = nir_type_uint32,
585                 .dest = ~0,
586                 .src = { ~0, ~0, ~0, ~0 },
587                 .swizzle = SWIZZLE_IDENTITY_4,
588                 .op = is_store ? midgard_op_st_int4 : midgard_op_ld_int4,
589                 .load_store = {
590                         /* For register spilling - to thread local storage */
591                         .arg_1 = 0xEA,
592                         .arg_2 = 0x1E,
593                 },
594 
595                 /* If we spill an unspill, RA goes into an infinite loop */
596                 .no_spill = (1 << REG_CLASS_WORK)
597         };
598 
599         ins.constants.u32[0] = byte;
600 
601         if (is_store) {
602                 ins.src[0] = srcdest;
603                 ins.src_types[0] = nir_type_uint32;
604 
605                 /* Ensure we are tightly swizzled so liveness analysis is
606                  * correct */
607 
608                 for (unsigned i = 0; i < 4; ++i) {
609                         if (!(mask & (1 << i)))
610                                 ins.swizzle[0][i] = COMPONENT_X;
611                 }
612         } else
613                 ins.dest = srcdest;
614 
615         return ins;
616 }
617 
618 static inline bool
mir_has_arg(midgard_instruction * ins,unsigned arg)619 mir_has_arg(midgard_instruction *ins, unsigned arg)
620 {
621         if (!ins)
622                 return false;
623 
624         mir_foreach_src(ins, i) {
625                 if (ins->src[i] == arg)
626                         return true;
627         }
628 
629         return false;
630 }
631 
632 /* Scheduling */
633 
634 void midgard_schedule_program(compiler_context *ctx);
635 
636 void mir_ra(compiler_context *ctx);
637 void mir_squeeze_index(compiler_context *ctx);
638 void mir_lower_special_reads(compiler_context *ctx);
639 void mir_liveness_ins_update(uint16_t *live, midgard_instruction *ins, unsigned max);
640 void mir_compute_liveness(compiler_context *ctx);
641 void mir_invalidate_liveness(compiler_context *ctx);
642 bool mir_is_live_after(compiler_context *ctx, midgard_block *block, midgard_instruction *start, int src);
643 
644 void mir_create_pipeline_registers(compiler_context *ctx);
645 void midgard_promote_uniforms(compiler_context *ctx);
646 
647 void
648 midgard_emit_derivatives(compiler_context *ctx, nir_alu_instr *instr);
649 
650 void
651 midgard_lower_derivatives(compiler_context *ctx, midgard_block *block);
652 
653 bool mir_op_computes_derivatives(gl_shader_stage stage, unsigned op);
654 
655 void mir_analyze_helper_terminate(compiler_context *ctx);
656 void mir_analyze_helper_requirements(compiler_context *ctx);
657 
658 /* Final emission */
659 
660 void emit_binary_bundle(
661         compiler_context *ctx,
662         midgard_block *block,
663         midgard_bundle *bundle,
664         struct util_dynarray *emission,
665         int next_tag);
666 
667 bool
668 nir_undef_to_zero(nir_shader *shader);
669 bool nir_fuse_io_16(nir_shader *shader);
670 
671 void midgard_nir_lod_errata(nir_shader *shader);
672 
673 unsigned midgard_get_first_tag_from_block(compiler_context *ctx, unsigned block_idx);
674 
675 /* Optimizations */
676 
677 bool midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block);
678 bool midgard_opt_combine_projection(compiler_context *ctx, midgard_block *block);
679 bool midgard_opt_varying_projection(compiler_context *ctx, midgard_block *block);
680 bool midgard_opt_dead_code_eliminate(compiler_context *ctx);
681 bool midgard_opt_dead_move_eliminate(compiler_context *ctx, midgard_block *block);
682 
683 #endif
684