1 /*
2  * Copyright © 2016-2017 Broadcom
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "broadcom/common/v3d_device_info.h"
25 #include "v3d_compiler.h"
26 #include "util/u_prim.h"
27 #include "compiler/nir/nir_schedule.h"
28 
29 int
vir_get_nsrc(struct qinst * inst)30 vir_get_nsrc(struct qinst *inst)
31 {
32         switch (inst->qpu.type) {
33         case V3D_QPU_INSTR_TYPE_BRANCH:
34                 return 0;
35         case V3D_QPU_INSTR_TYPE_ALU:
36                 if (inst->qpu.alu.add.op != V3D_QPU_A_NOP)
37                         return v3d_qpu_add_op_num_src(inst->qpu.alu.add.op);
38                 else
39                         return v3d_qpu_mul_op_num_src(inst->qpu.alu.mul.op);
40         }
41 
42         return 0;
43 }
44 
45 /**
46  * Returns whether the instruction has any side effects that must be
47  * preserved.
48  */
49 bool
vir_has_side_effects(struct v3d_compile * c,struct qinst * inst)50 vir_has_side_effects(struct v3d_compile *c, struct qinst *inst)
51 {
52         switch (inst->qpu.type) {
53         case V3D_QPU_INSTR_TYPE_BRANCH:
54                 return true;
55         case V3D_QPU_INSTR_TYPE_ALU:
56                 switch (inst->qpu.alu.add.op) {
57                 case V3D_QPU_A_SETREVF:
58                 case V3D_QPU_A_SETMSF:
59                 case V3D_QPU_A_VPMSETUP:
60                 case V3D_QPU_A_STVPMV:
61                 case V3D_QPU_A_STVPMD:
62                 case V3D_QPU_A_STVPMP:
63                 case V3D_QPU_A_VPMWT:
64                 case V3D_QPU_A_TMUWT:
65                         return true;
66                 default:
67                         break;
68                 }
69 
70                 switch (inst->qpu.alu.mul.op) {
71                 case V3D_QPU_M_MULTOP:
72                         return true;
73                 default:
74                         break;
75                 }
76         }
77 
78         if (inst->qpu.sig.ldtmu ||
79             inst->qpu.sig.ldvary ||
80             inst->qpu.sig.ldtlbu ||
81             inst->qpu.sig.ldtlb ||
82             inst->qpu.sig.wrtmuc ||
83             inst->qpu.sig.thrsw) {
84                 return true;
85         }
86 
87         return false;
88 }
89 
90 bool
vir_is_raw_mov(struct qinst * inst)91 vir_is_raw_mov(struct qinst *inst)
92 {
93         if (inst->qpu.type != V3D_QPU_INSTR_TYPE_ALU ||
94             (inst->qpu.alu.mul.op != V3D_QPU_M_FMOV &&
95              inst->qpu.alu.mul.op != V3D_QPU_M_MOV)) {
96                 return false;
97         }
98 
99         if (inst->qpu.alu.add.output_pack != V3D_QPU_PACK_NONE ||
100             inst->qpu.alu.mul.output_pack != V3D_QPU_PACK_NONE) {
101                 return false;
102         }
103 
104         if (inst->qpu.alu.add.a_unpack != V3D_QPU_UNPACK_NONE ||
105             inst->qpu.alu.add.b_unpack != V3D_QPU_UNPACK_NONE ||
106             inst->qpu.alu.mul.a_unpack != V3D_QPU_UNPACK_NONE ||
107             inst->qpu.alu.mul.b_unpack != V3D_QPU_UNPACK_NONE) {
108                 return false;
109         }
110 
111         if (inst->qpu.flags.ac != V3D_QPU_COND_NONE ||
112             inst->qpu.flags.mc != V3D_QPU_COND_NONE)
113                 return false;
114 
115         return true;
116 }
117 
118 bool
vir_is_add(struct qinst * inst)119 vir_is_add(struct qinst *inst)
120 {
121         return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
122                 inst->qpu.alu.add.op != V3D_QPU_A_NOP);
123 }
124 
125 bool
vir_is_mul(struct qinst * inst)126 vir_is_mul(struct qinst *inst)
127 {
128         return (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
129                 inst->qpu.alu.mul.op != V3D_QPU_M_NOP);
130 }
131 
132 bool
vir_is_tex(struct qinst * inst)133 vir_is_tex(struct qinst *inst)
134 {
135         if (inst->dst.file == QFILE_MAGIC)
136                 return v3d_qpu_magic_waddr_is_tmu(inst->dst.index);
137 
138         if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
139             inst->qpu.alu.add.op == V3D_QPU_A_TMUWT) {
140                 return true;
141         }
142 
143         return false;
144 }
145 
146 bool
vir_writes_r3(const struct v3d_device_info * devinfo,struct qinst * inst)147 vir_writes_r3(const struct v3d_device_info *devinfo, struct qinst *inst)
148 {
149         for (int i = 0; i < vir_get_nsrc(inst); i++) {
150                 switch (inst->src[i].file) {
151                 case QFILE_VPM:
152                         return true;
153                 default:
154                         break;
155                 }
156         }
157 
158         if (devinfo->ver < 41 && (inst->qpu.sig.ldvary ||
159                                   inst->qpu.sig.ldtlb ||
160                                   inst->qpu.sig.ldtlbu ||
161                                   inst->qpu.sig.ldvpm)) {
162                 return true;
163         }
164 
165         return false;
166 }
167 
168 bool
vir_writes_r4(const struct v3d_device_info * devinfo,struct qinst * inst)169 vir_writes_r4(const struct v3d_device_info *devinfo, struct qinst *inst)
170 {
171         switch (inst->dst.file) {
172         case QFILE_MAGIC:
173                 switch (inst->dst.index) {
174                 case V3D_QPU_WADDR_RECIP:
175                 case V3D_QPU_WADDR_RSQRT:
176                 case V3D_QPU_WADDR_EXP:
177                 case V3D_QPU_WADDR_LOG:
178                 case V3D_QPU_WADDR_SIN:
179                         return true;
180                 }
181                 break;
182         default:
183                 break;
184         }
185 
186         if (devinfo->ver < 41 && inst->qpu.sig.ldtmu)
187                 return true;
188 
189         return false;
190 }
191 
192 void
vir_set_unpack(struct qinst * inst,int src,enum v3d_qpu_input_unpack unpack)193 vir_set_unpack(struct qinst *inst, int src,
194                enum v3d_qpu_input_unpack unpack)
195 {
196         assert(src == 0 || src == 1);
197 
198         if (vir_is_add(inst)) {
199                 if (src == 0)
200                         inst->qpu.alu.add.a_unpack = unpack;
201                 else
202                         inst->qpu.alu.add.b_unpack = unpack;
203         } else {
204                 assert(vir_is_mul(inst));
205                 if (src == 0)
206                         inst->qpu.alu.mul.a_unpack = unpack;
207                 else
208                         inst->qpu.alu.mul.b_unpack = unpack;
209         }
210 }
211 
212 void
vir_set_cond(struct qinst * inst,enum v3d_qpu_cond cond)213 vir_set_cond(struct qinst *inst, enum v3d_qpu_cond cond)
214 {
215         if (vir_is_add(inst)) {
216                 inst->qpu.flags.ac = cond;
217         } else {
218                 assert(vir_is_mul(inst));
219                 inst->qpu.flags.mc = cond;
220         }
221 }
222 
223 void
vir_set_pf(struct qinst * inst,enum v3d_qpu_pf pf)224 vir_set_pf(struct qinst *inst, enum v3d_qpu_pf pf)
225 {
226         if (vir_is_add(inst)) {
227                 inst->qpu.flags.apf = pf;
228         } else {
229                 assert(vir_is_mul(inst));
230                 inst->qpu.flags.mpf = pf;
231         }
232 }
233 
234 void
vir_set_uf(struct qinst * inst,enum v3d_qpu_uf uf)235 vir_set_uf(struct qinst *inst, enum v3d_qpu_uf uf)
236 {
237         if (vir_is_add(inst)) {
238                 inst->qpu.flags.auf = uf;
239         } else {
240                 assert(vir_is_mul(inst));
241                 inst->qpu.flags.muf = uf;
242         }
243 }
244 
245 #if 0
246 uint8_t
247 vir_channels_written(struct qinst *inst)
248 {
249         if (vir_is_mul(inst)) {
250                 switch (inst->dst.pack) {
251                 case QPU_PACK_MUL_NOP:
252                 case QPU_PACK_MUL_8888:
253                         return 0xf;
254                 case QPU_PACK_MUL_8A:
255                         return 0x1;
256                 case QPU_PACK_MUL_8B:
257                         return 0x2;
258                 case QPU_PACK_MUL_8C:
259                         return 0x4;
260                 case QPU_PACK_MUL_8D:
261                         return 0x8;
262                 }
263         } else {
264                 switch (inst->dst.pack) {
265                 case QPU_PACK_A_NOP:
266                 case QPU_PACK_A_8888:
267                 case QPU_PACK_A_8888_SAT:
268                 case QPU_PACK_A_32_SAT:
269                         return 0xf;
270                 case QPU_PACK_A_8A:
271                 case QPU_PACK_A_8A_SAT:
272                         return 0x1;
273                 case QPU_PACK_A_8B:
274                 case QPU_PACK_A_8B_SAT:
275                         return 0x2;
276                 case QPU_PACK_A_8C:
277                 case QPU_PACK_A_8C_SAT:
278                         return 0x4;
279                 case QPU_PACK_A_8D:
280                 case QPU_PACK_A_8D_SAT:
281                         return 0x8;
282                 case QPU_PACK_A_16A:
283                 case QPU_PACK_A_16A_SAT:
284                         return 0x3;
285                 case QPU_PACK_A_16B:
286                 case QPU_PACK_A_16B_SAT:
287                         return 0xc;
288                 }
289         }
290         unreachable("Bad pack field");
291 }
292 #endif
293 
294 struct qreg
vir_get_temp(struct v3d_compile * c)295 vir_get_temp(struct v3d_compile *c)
296 {
297         struct qreg reg;
298 
299         reg.file = QFILE_TEMP;
300         reg.index = c->num_temps++;
301 
302         if (c->num_temps > c->defs_array_size) {
303                 uint32_t old_size = c->defs_array_size;
304                 c->defs_array_size = MAX2(old_size * 2, 16);
305 
306                 c->defs = reralloc(c, c->defs, struct qinst *,
307                                    c->defs_array_size);
308                 memset(&c->defs[old_size], 0,
309                        sizeof(c->defs[0]) * (c->defs_array_size - old_size));
310 
311                 c->spillable = reralloc(c, c->spillable,
312                                         BITSET_WORD,
313                                         BITSET_WORDS(c->defs_array_size));
314                 for (int i = old_size; i < c->defs_array_size; i++)
315                         BITSET_SET(c->spillable, i);
316         }
317 
318         return reg;
319 }
320 
321 struct qinst *
vir_add_inst(enum v3d_qpu_add_op op,struct qreg dst,struct qreg src0,struct qreg src1)322 vir_add_inst(enum v3d_qpu_add_op op, struct qreg dst, struct qreg src0, struct qreg src1)
323 {
324         struct qinst *inst = calloc(1, sizeof(*inst));
325 
326         inst->qpu = v3d_qpu_nop();
327         inst->qpu.alu.add.op = op;
328 
329         inst->dst = dst;
330         inst->src[0] = src0;
331         inst->src[1] = src1;
332         inst->uniform = ~0;
333 
334         return inst;
335 }
336 
337 struct qinst *
vir_mul_inst(enum v3d_qpu_mul_op op,struct qreg dst,struct qreg src0,struct qreg src1)338 vir_mul_inst(enum v3d_qpu_mul_op op, struct qreg dst, struct qreg src0, struct qreg src1)
339 {
340         struct qinst *inst = calloc(1, sizeof(*inst));
341 
342         inst->qpu = v3d_qpu_nop();
343         inst->qpu.alu.mul.op = op;
344 
345         inst->dst = dst;
346         inst->src[0] = src0;
347         inst->src[1] = src1;
348         inst->uniform = ~0;
349 
350         return inst;
351 }
352 
353 struct qinst *
vir_branch_inst(struct v3d_compile * c,enum v3d_qpu_branch_cond cond)354 vir_branch_inst(struct v3d_compile *c, enum v3d_qpu_branch_cond cond)
355 {
356         struct qinst *inst = calloc(1, sizeof(*inst));
357 
358         inst->qpu = v3d_qpu_nop();
359         inst->qpu.type = V3D_QPU_INSTR_TYPE_BRANCH;
360         inst->qpu.branch.cond = cond;
361         inst->qpu.branch.msfign = V3D_QPU_MSFIGN_NONE;
362         inst->qpu.branch.bdi = V3D_QPU_BRANCH_DEST_REL;
363         inst->qpu.branch.ub = true;
364         inst->qpu.branch.bdu = V3D_QPU_BRANCH_DEST_REL;
365 
366         inst->dst = vir_nop_reg();
367         inst->uniform = vir_get_uniform_index(c, QUNIFORM_CONSTANT, 0);
368 
369         return inst;
370 }
371 
372 static void
vir_emit(struct v3d_compile * c,struct qinst * inst)373 vir_emit(struct v3d_compile *c, struct qinst *inst)
374 {
375         switch (c->cursor.mode) {
376         case vir_cursor_add:
377                 list_add(&inst->link, c->cursor.link);
378                 break;
379         case vir_cursor_addtail:
380                 list_addtail(&inst->link, c->cursor.link);
381                 break;
382         }
383 
384         c->cursor = vir_after_inst(inst);
385         c->live_intervals_valid = false;
386 }
387 
388 /* Updates inst to write to a new temporary, emits it, and notes the def. */
389 struct qreg
vir_emit_def(struct v3d_compile * c,struct qinst * inst)390 vir_emit_def(struct v3d_compile *c, struct qinst *inst)
391 {
392         assert(inst->dst.file == QFILE_NULL);
393 
394         /* If we're emitting an instruction that's a def, it had better be
395          * writing a register.
396          */
397         if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU) {
398                 assert(inst->qpu.alu.add.op == V3D_QPU_A_NOP ||
399                        v3d_qpu_add_op_has_dst(inst->qpu.alu.add.op));
400                 assert(inst->qpu.alu.mul.op == V3D_QPU_M_NOP ||
401                        v3d_qpu_mul_op_has_dst(inst->qpu.alu.mul.op));
402         }
403 
404         inst->dst = vir_get_temp(c);
405 
406         if (inst->dst.file == QFILE_TEMP)
407                 c->defs[inst->dst.index] = inst;
408 
409         vir_emit(c, inst);
410 
411         return inst->dst;
412 }
413 
414 struct qinst *
vir_emit_nondef(struct v3d_compile * c,struct qinst * inst)415 vir_emit_nondef(struct v3d_compile *c, struct qinst *inst)
416 {
417         if (inst->dst.file == QFILE_TEMP)
418                 c->defs[inst->dst.index] = NULL;
419 
420         vir_emit(c, inst);
421 
422         return inst;
423 }
424 
425 struct qblock *
vir_new_block(struct v3d_compile * c)426 vir_new_block(struct v3d_compile *c)
427 {
428         struct qblock *block = rzalloc(c, struct qblock);
429 
430         list_inithead(&block->instructions);
431 
432         block->predecessors = _mesa_set_create(block,
433                                                _mesa_hash_pointer,
434                                                _mesa_key_pointer_equal);
435 
436         block->index = c->next_block_index++;
437 
438         return block;
439 }
440 
441 void
vir_set_emit_block(struct v3d_compile * c,struct qblock * block)442 vir_set_emit_block(struct v3d_compile *c, struct qblock *block)
443 {
444         c->cur_block = block;
445         c->cursor = vir_after_block(block);
446         list_addtail(&block->link, &c->blocks);
447 }
448 
449 struct qblock *
vir_entry_block(struct v3d_compile * c)450 vir_entry_block(struct v3d_compile *c)
451 {
452         return list_first_entry(&c->blocks, struct qblock, link);
453 }
454 
455 struct qblock *
vir_exit_block(struct v3d_compile * c)456 vir_exit_block(struct v3d_compile *c)
457 {
458         return list_last_entry(&c->blocks, struct qblock, link);
459 }
460 
461 void
vir_link_blocks(struct qblock * predecessor,struct qblock * successor)462 vir_link_blocks(struct qblock *predecessor, struct qblock *successor)
463 {
464         _mesa_set_add(successor->predecessors, predecessor);
465         if (predecessor->successors[0]) {
466                 assert(!predecessor->successors[1]);
467                 predecessor->successors[1] = successor;
468         } else {
469                 predecessor->successors[0] = successor;
470         }
471 }
472 
473 const struct v3d_compiler *
v3d_compiler_init(const struct v3d_device_info * devinfo)474 v3d_compiler_init(const struct v3d_device_info *devinfo)
475 {
476         struct v3d_compiler *compiler = rzalloc(NULL, struct v3d_compiler);
477         if (!compiler)
478                 return NULL;
479 
480         compiler->devinfo = devinfo;
481 
482         if (!vir_init_reg_sets(compiler)) {
483                 ralloc_free(compiler);
484                 return NULL;
485         }
486 
487         return compiler;
488 }
489 
490 void
v3d_compiler_free(const struct v3d_compiler * compiler)491 v3d_compiler_free(const struct v3d_compiler *compiler)
492 {
493         ralloc_free((void *)compiler);
494 }
495 
496 static struct v3d_compile *
vir_compile_init(const struct v3d_compiler * compiler,struct v3d_key * key,nir_shader * s,void (* debug_output)(const char * msg,void * debug_output_data),void * debug_output_data,int program_id,int variant_id,bool fallback_scheduler)497 vir_compile_init(const struct v3d_compiler *compiler,
498                  struct v3d_key *key,
499                  nir_shader *s,
500                  void (*debug_output)(const char *msg,
501                                       void *debug_output_data),
502                  void *debug_output_data,
503                  int program_id, int variant_id,
504                  bool fallback_scheduler)
505 {
506         struct v3d_compile *c = rzalloc(NULL, struct v3d_compile);
507 
508         c->compiler = compiler;
509         c->devinfo = compiler->devinfo;
510         c->key = key;
511         c->program_id = program_id;
512         c->variant_id = variant_id;
513         c->threads = 4;
514         c->debug_output = debug_output;
515         c->debug_output_data = debug_output_data;
516         c->compilation_result = V3D_COMPILATION_SUCCEEDED;
517         c->fallback_scheduler = fallback_scheduler;
518 
519         s = nir_shader_clone(c, s);
520         c->s = s;
521 
522         list_inithead(&c->blocks);
523         vir_set_emit_block(c, vir_new_block(c));
524 
525         c->output_position_index = -1;
526         c->output_sample_mask_index = -1;
527 
528         c->def_ht = _mesa_hash_table_create(c, _mesa_hash_pointer,
529                                             _mesa_key_pointer_equal);
530 
531         return c;
532 }
533 
534 static int
type_size_vec4(const struct glsl_type * type,bool bindless)535 type_size_vec4(const struct glsl_type *type, bool bindless)
536 {
537         return glsl_count_attribute_slots(type, false);
538 }
539 
540 static void
v3d_lower_nir(struct v3d_compile * c)541 v3d_lower_nir(struct v3d_compile *c)
542 {
543         struct nir_lower_tex_options tex_options = {
544                 .lower_txd = true,
545                 .lower_tg4_broadcom_swizzle = true,
546 
547                 .lower_rect = false, /* XXX: Use this on V3D 3.x */
548                 .lower_txp = ~0,
549                 /* Apply swizzles to all samplers. */
550                 .swizzle_result = ~0,
551         };
552 
553         /* Lower the format swizzle and (for 32-bit returns)
554          * ARB_texture_swizzle-style swizzle.
555          */
556         for (int i = 0; i < ARRAY_SIZE(c->key->tex); i++) {
557                 for (int j = 0; j < 4; j++)
558                         tex_options.swizzles[i][j] = c->key->tex[i].swizzle[j];
559 
560                 if (c->key->tex[i].clamp_s)
561                         tex_options.saturate_s |= 1 << i;
562                 if (c->key->tex[i].clamp_t)
563                         tex_options.saturate_t |= 1 << i;
564                 if (c->key->tex[i].clamp_r)
565                         tex_options.saturate_r |= 1 << i;
566                 if (c->key->tex[i].return_size == 16) {
567                         tex_options.lower_tex_packing[i] =
568                                 nir_lower_tex_packing_16;
569                 }
570         }
571 
572         /* CS textures may not have return_size reflecting the shadow state. */
573         nir_foreach_uniform_variable(var, c->s) {
574                 const struct glsl_type *type = glsl_without_array(var->type);
575                 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
576 
577                 if (!glsl_type_is_sampler(type) ||
578                     !glsl_sampler_type_is_shadow(type))
579                         continue;
580 
581                 for (int i = 0; i < array_len; i++) {
582                         tex_options.lower_tex_packing[var->data.binding + i] =
583                                 nir_lower_tex_packing_16;
584                 }
585         }
586 
587         NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
588         NIR_PASS_V(c->s, nir_lower_system_values);
589 
590         NIR_PASS_V(c->s, nir_lower_vars_to_scratch,
591                    nir_var_function_temp,
592                    0,
593                    glsl_get_natural_size_align_bytes);
594         NIR_PASS_V(c->s, v3d_nir_lower_scratch);
595 }
596 
597 static void
v3d_set_prog_data_uniforms(struct v3d_compile * c,struct v3d_prog_data * prog_data)598 v3d_set_prog_data_uniforms(struct v3d_compile *c,
599                            struct v3d_prog_data *prog_data)
600 {
601         int count = c->num_uniforms;
602         struct v3d_uniform_list *ulist = &prog_data->uniforms;
603 
604         ulist->count = count;
605         ulist->data = ralloc_array(prog_data, uint32_t, count);
606         memcpy(ulist->data, c->uniform_data,
607                count * sizeof(*ulist->data));
608         ulist->contents = ralloc_array(prog_data, enum quniform_contents, count);
609         memcpy(ulist->contents, c->uniform_contents,
610                count * sizeof(*ulist->contents));
611 }
612 
613 static void
v3d_vs_set_prog_data(struct v3d_compile * c,struct v3d_vs_prog_data * prog_data)614 v3d_vs_set_prog_data(struct v3d_compile *c,
615                      struct v3d_vs_prog_data *prog_data)
616 {
617         /* The vertex data gets format converted by the VPM so that
618          * each attribute channel takes up a VPM column.  Precompute
619          * the sizes for the shader record.
620          */
621         for (int i = 0; i < ARRAY_SIZE(prog_data->vattr_sizes); i++) {
622                 prog_data->vattr_sizes[i] = c->vattr_sizes[i];
623                 prog_data->vpm_input_size += c->vattr_sizes[i];
624         }
625 
626         prog_data->uses_vid = (c->s->info.system_values_read &
627                                (1ull << SYSTEM_VALUE_VERTEX_ID));
628         prog_data->uses_iid = (c->s->info.system_values_read &
629                                (1ull << SYSTEM_VALUE_INSTANCE_ID));
630 
631         if (prog_data->uses_vid)
632                 prog_data->vpm_input_size++;
633         if (prog_data->uses_iid)
634                 prog_data->vpm_input_size++;
635 
636         /* Input/output segment size are in sectors (8 rows of 32 bits per
637          * channel).
638          */
639         prog_data->vpm_input_size = align(prog_data->vpm_input_size, 8) / 8;
640         prog_data->vpm_output_size = align(c->vpm_output_size, 8) / 8;
641 
642         /* Set us up for shared input/output segments.  This is apparently
643          * necessary for our VCM setup to avoid varying corruption.
644          */
645         prog_data->separate_segments = false;
646         prog_data->vpm_output_size = MAX2(prog_data->vpm_output_size,
647                                           prog_data->vpm_input_size);
648         prog_data->vpm_input_size = 0;
649 
650         /* Compute VCM cache size.  We set up our program to take up less than
651          * half of the VPM, so that any set of bin and render programs won't
652          * run out of space.  We need space for at least one input segment,
653          * and then allocate the rest to output segments (one for the current
654          * program, the rest to VCM).  The valid range of the VCM cache size
655          * field is 1-4 16-vertex batches, but GFXH-1744 limits us to 2-4
656          * batches.
657          */
658         assert(c->devinfo->vpm_size);
659         int sector_size = V3D_CHANNELS * sizeof(uint32_t) * 8;
660         int vpm_size_in_sectors = c->devinfo->vpm_size / sector_size;
661         int half_vpm = vpm_size_in_sectors / 2;
662         int vpm_output_sectors = half_vpm - prog_data->vpm_input_size;
663         int vpm_output_batches = vpm_output_sectors / prog_data->vpm_output_size;
664         assert(vpm_output_batches >= 2);
665         prog_data->vcm_cache_size = CLAMP(vpm_output_batches - 1, 2, 4);
666 }
667 
668 static void
v3d_gs_set_prog_data(struct v3d_compile * c,struct v3d_gs_prog_data * prog_data)669 v3d_gs_set_prog_data(struct v3d_compile *c,
670                      struct v3d_gs_prog_data *prog_data)
671 {
672         prog_data->num_inputs = c->num_inputs;
673         memcpy(prog_data->input_slots, c->input_slots,
674                c->num_inputs * sizeof(*c->input_slots));
675 
676         /* gl_PrimitiveIdIn is written by the GBG into the first word of the
677          * VPM output header automatically and the shader will overwrite
678          * it after reading it if necessary, so it doesn't add to the VPM
679          * size requirements.
680          */
681         prog_data->uses_pid = (c->s->info.system_values_read &
682                                (1ull << SYSTEM_VALUE_PRIMITIVE_ID));
683 
684         /* Output segment size is in sectors (8 rows of 32 bits per channel) */
685         prog_data->vpm_output_size = align(c->vpm_output_size, 8) / 8;
686 
687         /* Compute SIMD dispatch width and update VPM output size accordingly
688          * to ensure we can fit our program in memory. Available widths are
689          * 16, 8, 4, 1.
690          *
691          * Notice that at draw time we will have to consider VPM memory
692          * requirements from other stages and choose a smaller dispatch
693          * width if needed to fit the program in VPM memory.
694          */
695         prog_data->simd_width = 16;
696         while ((prog_data->simd_width > 1 && prog_data->vpm_output_size > 16) ||
697                prog_data->simd_width == 2) {
698                 prog_data->simd_width >>= 1;
699                 prog_data->vpm_output_size =
700                         align(prog_data->vpm_output_size, 2) / 2;
701         }
702         assert(prog_data->vpm_output_size <= 16);
703         assert(prog_data->simd_width != 2);
704 
705         prog_data->out_prim_type = c->s->info.gs.output_primitive;
706         prog_data->num_invocations = c->s->info.gs.invocations;
707 }
708 
709 static void
v3d_set_fs_prog_data_inputs(struct v3d_compile * c,struct v3d_fs_prog_data * prog_data)710 v3d_set_fs_prog_data_inputs(struct v3d_compile *c,
711                             struct v3d_fs_prog_data *prog_data)
712 {
713         prog_data->num_inputs = c->num_inputs;
714         memcpy(prog_data->input_slots, c->input_slots,
715                c->num_inputs * sizeof(*c->input_slots));
716 
717         STATIC_ASSERT(ARRAY_SIZE(prog_data->flat_shade_flags) >
718                       (V3D_MAX_FS_INPUTS - 1) / 24);
719         for (int i = 0; i < V3D_MAX_FS_INPUTS; i++) {
720                 if (BITSET_TEST(c->flat_shade_flags, i))
721                         prog_data->flat_shade_flags[i / 24] |= 1 << (i % 24);
722 
723                 if (BITSET_TEST(c->noperspective_flags, i))
724                         prog_data->noperspective_flags[i / 24] |= 1 << (i % 24);
725 
726                 if (BITSET_TEST(c->centroid_flags, i))
727                         prog_data->centroid_flags[i / 24] |= 1 << (i % 24);
728         }
729 }
730 
731 static void
v3d_fs_set_prog_data(struct v3d_compile * c,struct v3d_fs_prog_data * prog_data)732 v3d_fs_set_prog_data(struct v3d_compile *c,
733                      struct v3d_fs_prog_data *prog_data)
734 {
735         v3d_set_fs_prog_data_inputs(c, prog_data);
736         prog_data->writes_z = c->writes_z;
737         prog_data->disable_ez = !c->s->info.fs.early_fragment_tests;
738         prog_data->uses_center_w = c->uses_center_w;
739         prog_data->uses_implicit_point_line_varyings =
740                 c->uses_implicit_point_line_varyings;
741         prog_data->lock_scoreboard_on_first_thrsw =
742                 c->lock_scoreboard_on_first_thrsw;
743 }
744 
745 static void
v3d_cs_set_prog_data(struct v3d_compile * c,struct v3d_compute_prog_data * prog_data)746 v3d_cs_set_prog_data(struct v3d_compile *c,
747                      struct v3d_compute_prog_data *prog_data)
748 {
749         prog_data->shared_size = c->s->info.cs.shared_size;
750 }
751 
752 static void
v3d_set_prog_data(struct v3d_compile * c,struct v3d_prog_data * prog_data)753 v3d_set_prog_data(struct v3d_compile *c,
754                   struct v3d_prog_data *prog_data)
755 {
756         prog_data->threads = c->threads;
757         prog_data->single_seg = !c->last_thrsw;
758         prog_data->spill_size = c->spill_size;
759         prog_data->tmu_dirty_rcl = c->tmu_dirty_rcl;
760 
761         v3d_set_prog_data_uniforms(c, prog_data);
762 
763         switch (c->s->info.stage) {
764         case MESA_SHADER_VERTEX:
765                 v3d_vs_set_prog_data(c, (struct v3d_vs_prog_data *)prog_data);
766                 break;
767         case MESA_SHADER_GEOMETRY:
768                 v3d_gs_set_prog_data(c, (struct v3d_gs_prog_data *)prog_data);
769                 break;
770         case MESA_SHADER_FRAGMENT:
771                 v3d_fs_set_prog_data(c, (struct v3d_fs_prog_data *)prog_data);
772                 break;
773         case MESA_SHADER_COMPUTE:
774                 v3d_cs_set_prog_data(c, (struct v3d_compute_prog_data *)prog_data);
775                 break;
776         default:
777                 unreachable("unsupported shader stage");
778         }
779 }
780 
781 static uint64_t *
v3d_return_qpu_insts(struct v3d_compile * c,uint32_t * final_assembly_size)782 v3d_return_qpu_insts(struct v3d_compile *c, uint32_t *final_assembly_size)
783 {
784         *final_assembly_size = c->qpu_inst_count * sizeof(uint64_t);
785 
786         uint64_t *qpu_insts = malloc(*final_assembly_size);
787         if (!qpu_insts)
788                 return NULL;
789 
790         memcpy(qpu_insts, c->qpu_insts, *final_assembly_size);
791 
792         vir_compile_destroy(c);
793 
794         return qpu_insts;
795 }
796 
797 static void
v3d_nir_lower_vs_early(struct v3d_compile * c)798 v3d_nir_lower_vs_early(struct v3d_compile *c)
799 {
800         /* Split our I/O vars and dead code eliminate the unused
801          * components.
802          */
803         NIR_PASS_V(c->s, nir_lower_io_to_scalar_early,
804                    nir_var_shader_in | nir_var_shader_out);
805         uint64_t used_outputs[4] = {0};
806         for (int i = 0; i < c->vs_key->num_used_outputs; i++) {
807                 int slot = v3d_slot_get_slot(c->vs_key->used_outputs[i]);
808                 int comp = v3d_slot_get_component(c->vs_key->used_outputs[i]);
809                 used_outputs[comp] |= 1ull << slot;
810         }
811         NIR_PASS_V(c->s, nir_remove_unused_io_vars,
812                    nir_var_shader_out, used_outputs, NULL); /* demotes to globals */
813         NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
814         v3d_optimize_nir(c->s);
815         NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in, NULL);
816 
817         /* This must go before nir_lower_io */
818         if (c->vs_key->per_vertex_point_size)
819                 NIR_PASS_V(c->s, nir_lower_point_size, 1.0f, 0.0f);
820 
821         NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
822                    type_size_vec4,
823                    (nir_lower_io_options)0);
824         /* clean up nir_lower_io's deref_var remains */
825         NIR_PASS_V(c->s, nir_opt_dce);
826 }
827 
828 static void
v3d_nir_lower_gs_early(struct v3d_compile * c)829 v3d_nir_lower_gs_early(struct v3d_compile *c)
830 {
831         /* Split our I/O vars and dead code eliminate the unused
832          * components.
833          */
834         NIR_PASS_V(c->s, nir_lower_io_to_scalar_early,
835                    nir_var_shader_in | nir_var_shader_out);
836         uint64_t used_outputs[4] = {0};
837         for (int i = 0; i < c->gs_key->num_used_outputs; i++) {
838                 int slot = v3d_slot_get_slot(c->gs_key->used_outputs[i]);
839                 int comp = v3d_slot_get_component(c->gs_key->used_outputs[i]);
840                 used_outputs[comp] |= 1ull << slot;
841         }
842         NIR_PASS_V(c->s, nir_remove_unused_io_vars,
843                    nir_var_shader_out, used_outputs, NULL); /* demotes to globals */
844         NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
845         v3d_optimize_nir(c->s);
846         NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_shader_in, NULL);
847 
848         /* This must go before nir_lower_io */
849         if (c->gs_key->per_vertex_point_size)
850                 NIR_PASS_V(c->s, nir_lower_point_size, 1.0f, 0.0f);
851 
852         NIR_PASS_V(c->s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
853                    type_size_vec4,
854                    (nir_lower_io_options)0);
855         /* clean up nir_lower_io's deref_var remains */
856         NIR_PASS_V(c->s, nir_opt_dce);
857 }
858 
859 static void
v3d_fixup_fs_output_types(struct v3d_compile * c)860 v3d_fixup_fs_output_types(struct v3d_compile *c)
861 {
862         nir_foreach_shader_out_variable(var, c->s) {
863                 uint32_t mask = 0;
864 
865                 switch (var->data.location) {
866                 case FRAG_RESULT_COLOR:
867                         mask = ~0;
868                         break;
869                 case FRAG_RESULT_DATA0:
870                 case FRAG_RESULT_DATA1:
871                 case FRAG_RESULT_DATA2:
872                 case FRAG_RESULT_DATA3:
873                         mask = 1 << (var->data.location - FRAG_RESULT_DATA0);
874                         break;
875                 }
876 
877                 if (c->fs_key->int_color_rb & mask) {
878                         var->type =
879                                 glsl_vector_type(GLSL_TYPE_INT,
880                                                  glsl_get_components(var->type));
881                 } else if (c->fs_key->uint_color_rb & mask) {
882                         var->type =
883                                 glsl_vector_type(GLSL_TYPE_UINT,
884                                                  glsl_get_components(var->type));
885                 }
886         }
887 }
888 
889 static void
v3d_nir_lower_fs_early(struct v3d_compile * c)890 v3d_nir_lower_fs_early(struct v3d_compile *c)
891 {
892         if (c->fs_key->int_color_rb || c->fs_key->uint_color_rb)
893                 v3d_fixup_fs_output_types(c);
894 
895         NIR_PASS_V(c->s, v3d_nir_lower_logic_ops, c);
896 
897         if (c->fs_key->line_smoothing) {
898                 v3d_nir_lower_line_smooth(c->s);
899                 NIR_PASS_V(c->s, nir_lower_global_vars_to_local);
900                 /* The lowering pass can introduce new sysval reads */
901                 nir_shader_gather_info(c->s, nir_shader_get_entrypoint(c->s));
902         }
903 
904         /* If the shader has no non-TLB side effects, we can promote it to
905          * enabling early_fragment_tests even if the user didn't.
906          */
907         if (!(c->s->info.num_images ||
908               c->s->info.num_ssbos)) {
909                 c->s->info.fs.early_fragment_tests = true;
910         }
911 }
912 
913 static void
v3d_nir_lower_gs_late(struct v3d_compile * c)914 v3d_nir_lower_gs_late(struct v3d_compile *c)
915 {
916         if (c->key->ucp_enables) {
917                 NIR_PASS_V(c->s, nir_lower_clip_gs, c->key->ucp_enables,
918                            false, NULL);
919         }
920 
921         /* Note: GS output scalarizing must happen after nir_lower_clip_gs. */
922         NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
923 }
924 
925 static void
v3d_nir_lower_vs_late(struct v3d_compile * c)926 v3d_nir_lower_vs_late(struct v3d_compile *c)
927 {
928         if (c->vs_key->clamp_color)
929                 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
930 
931         if (c->key->ucp_enables) {
932                 NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables,
933                            false, false, NULL);
934                 NIR_PASS_V(c->s, nir_lower_io_to_scalar,
935                            nir_var_shader_out);
936         }
937 
938         /* Note: VS output scalarizing must happen after nir_lower_clip_vs. */
939         NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
940 }
941 
942 static void
v3d_nir_lower_fs_late(struct v3d_compile * c)943 v3d_nir_lower_fs_late(struct v3d_compile *c)
944 {
945         if (c->fs_key->light_twoside)
946                 NIR_PASS_V(c->s, nir_lower_two_sided_color, true);
947 
948         if (c->fs_key->clamp_color)
949                 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
950 
951         if (c->fs_key->alpha_test) {
952                 NIR_PASS_V(c->s, nir_lower_alpha_test,
953                            c->fs_key->alpha_test_func,
954                            false, NULL);
955         }
956 
957         /* In OpenGL the fragment shader can't read gl_ClipDistance[], but
958          * Vulkan allows it, in which case the SPIR-V compiler will declare
959          * VARING_SLOT_CLIP_DIST0 as compact array variable. Pass true as
960          * the last parameter to always operate with a compact array in both
961          * OpenGL and Vulkan so we do't have to care about the API we
962          * are using.
963          */
964         if (c->key->ucp_enables)
965                 NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables, true);
966 
967         /* Note: FS input scalarizing must happen after
968          * nir_lower_two_sided_color, which only handles a vec4 at a time.
969          */
970         NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
971 }
972 
973 static uint32_t
vir_get_max_temps(struct v3d_compile * c)974 vir_get_max_temps(struct v3d_compile *c)
975 {
976         int max_ip = 0;
977         vir_for_each_inst_inorder(inst, c)
978                 max_ip++;
979 
980         uint32_t *pressure = rzalloc_array(NULL, uint32_t, max_ip);
981 
982         for (int t = 0; t < c->num_temps; t++) {
983                 for (int i = c->temp_start[t]; (i < c->temp_end[t] &&
984                                                 i < max_ip); i++) {
985                         if (i > max_ip)
986                                 break;
987                         pressure[i]++;
988                 }
989         }
990 
991         uint32_t max_temps = 0;
992         for (int i = 0; i < max_ip; i++)
993                 max_temps = MAX2(max_temps, pressure[i]);
994 
995         ralloc_free(pressure);
996 
997         return max_temps;
998 }
999 
1000 enum v3d_dependency_class {
1001         V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_0
1002 };
1003 
1004 static bool
v3d_intrinsic_dependency_cb(nir_intrinsic_instr * intr,nir_schedule_dependency * dep,void * user_data)1005 v3d_intrinsic_dependency_cb(nir_intrinsic_instr *intr,
1006                             nir_schedule_dependency *dep,
1007                             void *user_data)
1008 {
1009         struct v3d_compile *c = user_data;
1010 
1011         switch (intr->intrinsic) {
1012         case nir_intrinsic_store_output:
1013                 /* Writing to location 0 overwrites the value passed in for
1014                  * gl_PrimitiveID on geometry shaders
1015                  */
1016                 if (c->s->info.stage != MESA_SHADER_GEOMETRY ||
1017                     nir_intrinsic_base(intr) != 0)
1018                         break;
1019 
1020                 nir_const_value *const_value =
1021                         nir_src_as_const_value(intr->src[1]);
1022 
1023                 if (const_value == NULL)
1024                         break;
1025 
1026                 uint64_t offset =
1027                         nir_const_value_as_uint(*const_value,
1028                                                 nir_src_bit_size(intr->src[1]));
1029                 if (offset != 0)
1030                         break;
1031 
1032                 dep->klass = V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_0;
1033                 dep->type = NIR_SCHEDULE_WRITE_DEPENDENCY;
1034                 return true;
1035 
1036         case nir_intrinsic_load_primitive_id:
1037                 if (c->s->info.stage != MESA_SHADER_GEOMETRY)
1038                         break;
1039 
1040                 dep->klass = V3D_DEPENDENCY_CLASS_GS_VPM_OUTPUT_0;
1041                 dep->type = NIR_SCHEDULE_READ_DEPENDENCY;
1042                 return true;
1043 
1044         default:
1045                 break;
1046         }
1047 
1048         return false;
1049 }
1050 
1051 static void
v3d_attempt_compile(struct v3d_compile * c)1052 v3d_attempt_compile(struct v3d_compile *c)
1053 {
1054         switch (c->s->info.stage) {
1055         case MESA_SHADER_VERTEX:
1056                 c->vs_key = (struct v3d_vs_key *) c->key;
1057                 break;
1058         case MESA_SHADER_GEOMETRY:
1059                 c->gs_key = (struct v3d_gs_key *) c->key;
1060                 break;
1061         case MESA_SHADER_FRAGMENT:
1062                 c->fs_key = (struct v3d_fs_key *) c->key;
1063                 break;
1064         case MESA_SHADER_COMPUTE:
1065                 break;
1066         default:
1067                 unreachable("unsupported shader stage");
1068         }
1069 
1070         switch (c->s->info.stage) {
1071         case MESA_SHADER_VERTEX:
1072                 v3d_nir_lower_vs_early(c);
1073                 break;
1074         case MESA_SHADER_GEOMETRY:
1075                 v3d_nir_lower_gs_early(c);
1076                 break;
1077         case MESA_SHADER_FRAGMENT:
1078                 v3d_nir_lower_fs_early(c);
1079                 break;
1080         default:
1081                 break;
1082         }
1083 
1084         v3d_lower_nir(c);
1085 
1086         switch (c->s->info.stage) {
1087         case MESA_SHADER_VERTEX:
1088                 v3d_nir_lower_vs_late(c);
1089                 break;
1090         case MESA_SHADER_GEOMETRY:
1091                 v3d_nir_lower_gs_late(c);
1092                 break;
1093         case MESA_SHADER_FRAGMENT:
1094                 v3d_nir_lower_fs_late(c);
1095                 break;
1096         default:
1097                 break;
1098         }
1099 
1100         NIR_PASS_V(c->s, v3d_nir_lower_io, c);
1101         NIR_PASS_V(c->s, v3d_nir_lower_txf_ms, c);
1102         NIR_PASS_V(c->s, v3d_nir_lower_image_load_store);
1103         NIR_PASS_V(c->s, nir_lower_idiv, nir_lower_idiv_fast);
1104 
1105         v3d_optimize_nir(c->s);
1106 
1107         /* Do late algebraic optimization to turn add(a, neg(b)) back into
1108          * subs, then the mandatory cleanup after algebraic.  Note that it may
1109          * produce fnegs, and if so then we need to keep running to squash
1110          * fneg(fneg(a)).
1111          */
1112         bool more_late_algebraic = true;
1113         while (more_late_algebraic) {
1114                 more_late_algebraic = false;
1115                 NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late);
1116                 NIR_PASS_V(c->s, nir_opt_constant_folding);
1117                 NIR_PASS_V(c->s, nir_copy_prop);
1118                 NIR_PASS_V(c->s, nir_opt_dce);
1119                 NIR_PASS_V(c->s, nir_opt_cse);
1120         }
1121 
1122         NIR_PASS_V(c->s, nir_lower_bool_to_int32);
1123         NIR_PASS_V(c->s, nir_convert_from_ssa, true);
1124 
1125         struct nir_schedule_options schedule_options = {
1126                 /* Schedule for about half our register space, to enable more
1127                  * shaders to hit 4 threads.
1128                  */
1129                 .threshold = 24,
1130 
1131                 /* Vertex shaders share the same memory for inputs and outputs,
1132                  * fragement and geometry shaders do not.
1133                  */
1134                 .stages_with_shared_io_memory =
1135                 (((1 << MESA_ALL_SHADER_STAGES) - 1) &
1136                  ~((1 << MESA_SHADER_FRAGMENT) |
1137                    (1 << MESA_SHADER_GEOMETRY))),
1138 
1139                 .fallback = c->fallback_scheduler,
1140 
1141                 .intrinsic_cb = v3d_intrinsic_dependency_cb,
1142                 .intrinsic_cb_data = c,
1143         };
1144         NIR_PASS_V(c->s, nir_schedule, &schedule_options);
1145 
1146         v3d_nir_to_vir(c);
1147 }
1148 
v3d_compile(const struct v3d_compiler * compiler,struct v3d_key * key,struct v3d_prog_data ** out_prog_data,nir_shader * s,void (* debug_output)(const char * msg,void * debug_output_data),void * debug_output_data,int program_id,int variant_id,uint32_t * final_assembly_size)1149 uint64_t *v3d_compile(const struct v3d_compiler *compiler,
1150                       struct v3d_key *key,
1151                       struct v3d_prog_data **out_prog_data,
1152                       nir_shader *s,
1153                       void (*debug_output)(const char *msg,
1154                                            void *debug_output_data),
1155                       void *debug_output_data,
1156                       int program_id, int variant_id,
1157                       uint32_t *final_assembly_size)
1158 {
1159         struct v3d_compile *c;
1160 
1161         for (int i = 0; true; i++) {
1162                 c = vir_compile_init(compiler, key, s,
1163                                      debug_output, debug_output_data,
1164                                      program_id, variant_id,
1165                                      i > 0 /* fallback_scheduler */);
1166 
1167                 v3d_attempt_compile(c);
1168 
1169                 if (i > 0 ||
1170                     c->compilation_result !=
1171                     V3D_COMPILATION_FAILED_REGISTER_ALLOCATION)
1172                         break;
1173 
1174                 char *debug_msg;
1175                 int ret = asprintf(&debug_msg,
1176                                    "Using fallback scheduler for %s",
1177                                    vir_get_stage_name(c));
1178 
1179                 if (ret >= 0) {
1180                         if (unlikely(V3D_DEBUG & V3D_DEBUG_PERF))
1181                                 fprintf(stderr, "%s\n", debug_msg);
1182 
1183                         c->debug_output(debug_msg, c->debug_output_data);
1184                         free(debug_msg);
1185                 }
1186 
1187                 vir_compile_destroy(c);
1188         }
1189 
1190         struct v3d_prog_data *prog_data;
1191 
1192         static const int prog_data_size[] = {
1193                 [MESA_SHADER_VERTEX] = sizeof(struct v3d_vs_prog_data),
1194                 [MESA_SHADER_GEOMETRY] = sizeof(struct v3d_gs_prog_data),
1195                 [MESA_SHADER_FRAGMENT] = sizeof(struct v3d_fs_prog_data),
1196                 [MESA_SHADER_COMPUTE] = sizeof(struct v3d_compute_prog_data),
1197         };
1198 
1199         assert(c->s->info.stage >= 0 &&
1200                c->s->info.stage < ARRAY_SIZE(prog_data_size) &&
1201                prog_data_size[c->s->info.stage]);
1202 
1203         prog_data = rzalloc_size(NULL, prog_data_size[c->s->info.stage]);
1204 
1205         v3d_set_prog_data(c, prog_data);
1206 
1207         *out_prog_data = prog_data;
1208 
1209         char *shaderdb;
1210         int ret = asprintf(&shaderdb,
1211                            "%s shader: %d inst, %d threads, %d loops, "
1212                            "%d uniforms, %d max-temps, %d:%d spills:fills, "
1213                            "%d sfu-stalls, %d inst-and-stalls",
1214                            vir_get_stage_name(c),
1215                            c->qpu_inst_count,
1216                            c->threads,
1217                            c->loops,
1218                            c->num_uniforms,
1219                            vir_get_max_temps(c),
1220                            c->spills,
1221                            c->fills,
1222                            c->qpu_inst_stalled_count,
1223                            c->qpu_inst_count + c->qpu_inst_stalled_count);
1224         if (ret >= 0) {
1225                 if (V3D_DEBUG & V3D_DEBUG_SHADERDB)
1226                         fprintf(stderr, "SHADER-DB: %s\n", shaderdb);
1227 
1228                 c->debug_output(shaderdb, c->debug_output_data);
1229                 free(shaderdb);
1230         }
1231 
1232        return v3d_return_qpu_insts(c, final_assembly_size);
1233 }
1234 
1235 void
vir_remove_instruction(struct v3d_compile * c,struct qinst * qinst)1236 vir_remove_instruction(struct v3d_compile *c, struct qinst *qinst)
1237 {
1238         if (qinst->dst.file == QFILE_TEMP)
1239                 c->defs[qinst->dst.index] = NULL;
1240 
1241         assert(&qinst->link != c->cursor.link);
1242 
1243         list_del(&qinst->link);
1244         free(qinst);
1245 
1246         c->live_intervals_valid = false;
1247 }
1248 
1249 struct qreg
vir_follow_movs(struct v3d_compile * c,struct qreg reg)1250 vir_follow_movs(struct v3d_compile *c, struct qreg reg)
1251 {
1252         /* XXX
1253         int pack = reg.pack;
1254 
1255         while (reg.file == QFILE_TEMP &&
1256                c->defs[reg.index] &&
1257                (c->defs[reg.index]->op == QOP_MOV ||
1258                 c->defs[reg.index]->op == QOP_FMOV) &&
1259                !c->defs[reg.index]->dst.pack &&
1260                !c->defs[reg.index]->src[0].pack) {
1261                 reg = c->defs[reg.index]->src[0];
1262         }
1263 
1264         reg.pack = pack;
1265         */
1266         return reg;
1267 }
1268 
1269 void
vir_compile_destroy(struct v3d_compile * c)1270 vir_compile_destroy(struct v3d_compile *c)
1271 {
1272         /* Defuse the assert that we aren't removing the cursor's instruction.
1273          */
1274         c->cursor.link = NULL;
1275 
1276         vir_for_each_block(block, c) {
1277                 while (!list_is_empty(&block->instructions)) {
1278                         struct qinst *qinst =
1279                                 list_first_entry(&block->instructions,
1280                                                  struct qinst, link);
1281                         vir_remove_instruction(c, qinst);
1282                 }
1283         }
1284 
1285         ralloc_free(c);
1286 }
1287 
1288 uint32_t
vir_get_uniform_index(struct v3d_compile * c,enum quniform_contents contents,uint32_t data)1289 vir_get_uniform_index(struct v3d_compile *c,
1290                       enum quniform_contents contents,
1291                       uint32_t data)
1292 {
1293         for (int i = 0; i < c->num_uniforms; i++) {
1294                 if (c->uniform_contents[i] == contents &&
1295                     c->uniform_data[i] == data) {
1296                         return i;
1297                 }
1298         }
1299 
1300         uint32_t uniform = c->num_uniforms++;
1301 
1302         if (uniform >= c->uniform_array_size) {
1303                 c->uniform_array_size = MAX2(MAX2(16, uniform + 1),
1304                                              c->uniform_array_size * 2);
1305 
1306                 c->uniform_data = reralloc(c, c->uniform_data,
1307                                            uint32_t,
1308                                            c->uniform_array_size);
1309                 c->uniform_contents = reralloc(c, c->uniform_contents,
1310                                                enum quniform_contents,
1311                                                c->uniform_array_size);
1312         }
1313 
1314         c->uniform_contents[uniform] = contents;
1315         c->uniform_data[uniform] = data;
1316 
1317         return uniform;
1318 }
1319 
1320 struct qreg
vir_uniform(struct v3d_compile * c,enum quniform_contents contents,uint32_t data)1321 vir_uniform(struct v3d_compile *c,
1322             enum quniform_contents contents,
1323             uint32_t data)
1324 {
1325         struct qinst *inst = vir_NOP(c);
1326         inst->qpu.sig.ldunif = true;
1327         inst->uniform = vir_get_uniform_index(c, contents, data);
1328         inst->dst = vir_get_temp(c);
1329         c->defs[inst->dst.index] = inst;
1330         return inst->dst;
1331 }
1332 
1333 #define OPTPASS(func)                                                   \
1334         do {                                                            \
1335                 bool stage_progress = func(c);                          \
1336                 if (stage_progress) {                                   \
1337                         progress = true;                                \
1338                         if (print_opt_debug) {                          \
1339                                 fprintf(stderr,                         \
1340                                         "VIR opt pass %2d: %s progress\n", \
1341                                         pass, #func);                   \
1342                         }                                               \
1343                         /*XXX vir_validate(c);*/                        \
1344                 }                                                       \
1345         } while (0)
1346 
1347 void
vir_optimize(struct v3d_compile * c)1348 vir_optimize(struct v3d_compile *c)
1349 {
1350         bool print_opt_debug = false;
1351         int pass = 1;
1352 
1353         while (true) {
1354                 bool progress = false;
1355 
1356                 OPTPASS(vir_opt_copy_propagate);
1357                 OPTPASS(vir_opt_redundant_flags);
1358                 OPTPASS(vir_opt_dead_code);
1359                 OPTPASS(vir_opt_small_immediates);
1360 
1361                 if (!progress)
1362                         break;
1363 
1364                 pass++;
1365         }
1366 }
1367 
1368 const char *
vir_get_stage_name(struct v3d_compile * c)1369 vir_get_stage_name(struct v3d_compile *c)
1370 {
1371         if (c->vs_key && c->vs_key->is_coord)
1372                 return "MESA_SHADER_VERTEX_BIN";
1373         else if (c->gs_key && c->gs_key->is_coord)
1374                 return "MESA_SHADER_GEOMETRY_BIN";
1375         else
1376                 return gl_shader_stage_name(c->s->info.stage);
1377 }
1378