1 /*
2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <inttypes.h>
26 #include "util/format/u_format.h"
27 #include "util/crc32.h"
28 #include "util/u_helpers.h"
29 #include "util/u_math.h"
30 #include "util/u_memory.h"
31 #include "util/ralloc.h"
32 #include "util/hash_table.h"
33 #include "tgsi/tgsi_dump.h"
34 #include "tgsi/tgsi_parse.h"
35 #include "compiler/nir/nir.h"
36 #include "compiler/nir/nir_builder.h"
37 #include "compiler/nir_types.h"
38 #include "nir/tgsi_to_nir.h"
39 #include "vc4_context.h"
40 #include "vc4_qpu.h"
41 #include "vc4_qir.h"
42
43 static struct qreg
44 ntq_get_src(struct vc4_compile *c, nir_src src, int i);
45 static void
46 ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
47
48 static int
type_size(const struct glsl_type * type,bool bindless)49 type_size(const struct glsl_type *type, bool bindless)
50 {
51 return glsl_count_attribute_slots(type, false);
52 }
53
54 static void
resize_qreg_array(struct vc4_compile * c,struct qreg ** regs,uint32_t * size,uint32_t decl_size)55 resize_qreg_array(struct vc4_compile *c,
56 struct qreg **regs,
57 uint32_t *size,
58 uint32_t decl_size)
59 {
60 if (*size >= decl_size)
61 return;
62
63 uint32_t old_size = *size;
64 *size = MAX2(*size * 2, decl_size);
65 *regs = reralloc(c, *regs, struct qreg, *size);
66 if (!*regs) {
67 fprintf(stderr, "Malloc failure\n");
68 abort();
69 }
70
71 for (uint32_t i = old_size; i < *size; i++)
72 (*regs)[i] = c->undef;
73 }
74
75 static void
ntq_emit_thrsw(struct vc4_compile * c)76 ntq_emit_thrsw(struct vc4_compile *c)
77 {
78 if (!c->fs_threaded)
79 return;
80
81 /* Always thread switch after each texture operation for now.
82 *
83 * We could do better by batching a bunch of texture fetches up and
84 * then doing one thread switch and collecting all their results
85 * afterward.
86 */
87 qir_emit_nondef(c, qir_inst(QOP_THRSW, c->undef,
88 c->undef, c->undef));
89 c->last_thrsw_at_top_level = (c->execute.file == QFILE_NULL);
90 }
91
92 static struct qreg
indirect_uniform_load(struct vc4_compile * c,nir_intrinsic_instr * intr)93 indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
94 {
95 struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0);
96
97 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
98 uint32_t range = nir_intrinsic_range(intr);
99 indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0));
100 indirect_offset = qir_MIN_NOIMM(c, indirect_offset,
101 qir_uniform_ui(c, range - 4));
102
103 qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
104 indirect_offset,
105 qir_uniform(c, QUNIFORM_UBO0_ADDR,
106 nir_intrinsic_base(intr)));
107
108 c->num_texture_samples++;
109
110 ntq_emit_thrsw(c);
111
112 return qir_TEX_RESULT(c);
113 }
114
115 static struct qreg
vc4_ubo_load(struct vc4_compile * c,nir_intrinsic_instr * intr)116 vc4_ubo_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
117 {
118 ASSERTED int buffer_index = nir_src_as_uint(intr->src[0]);
119 assert(buffer_index == 1);
120 assert(c->stage == QSTAGE_FRAG);
121
122 struct qreg offset = ntq_get_src(c, intr->src[1], 0);
123
124 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
125 offset = qir_MAX(c, offset, qir_uniform_ui(c, 0));
126 offset = qir_MIN_NOIMM(c, offset,
127 qir_uniform_ui(c, c->fs_key->ubo_1_size - 4));
128
129 qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
130 offset,
131 qir_uniform(c, QUNIFORM_UBO1_ADDR, 0));
132
133 c->num_texture_samples++;
134
135 ntq_emit_thrsw(c);
136
137 return qir_TEX_RESULT(c);
138 }
139
140 nir_ssa_def *
vc4_nir_get_swizzled_channel(nir_builder * b,nir_ssa_def ** srcs,int swiz)141 vc4_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz)
142 {
143 switch (swiz) {
144 default:
145 case PIPE_SWIZZLE_NONE:
146 fprintf(stderr, "warning: unknown swizzle\n");
147 FALLTHROUGH;
148 case PIPE_SWIZZLE_0:
149 return nir_imm_float(b, 0.0);
150 case PIPE_SWIZZLE_1:
151 return nir_imm_float(b, 1.0);
152 case PIPE_SWIZZLE_X:
153 case PIPE_SWIZZLE_Y:
154 case PIPE_SWIZZLE_Z:
155 case PIPE_SWIZZLE_W:
156 return srcs[swiz];
157 }
158 }
159
160 static struct qreg *
ntq_init_ssa_def(struct vc4_compile * c,nir_ssa_def * def)161 ntq_init_ssa_def(struct vc4_compile *c, nir_ssa_def *def)
162 {
163 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
164 def->num_components);
165 _mesa_hash_table_insert(c->def_ht, def, qregs);
166 return qregs;
167 }
168
169 /**
170 * This function is responsible for getting QIR results into the associated
171 * storage for a NIR instruction.
172 *
173 * If it's a NIR SSA def, then we just set the associated hash table entry to
174 * the new result.
175 *
176 * If it's a NIR reg, then we need to update the existing qreg assigned to the
177 * NIR destination with the incoming value. To do that without introducing
178 * new MOVs, we require that the incoming qreg either be a uniform, or be
179 * SSA-defined by the previous QIR instruction in the block and rewritable by
180 * this function. That lets us sneak ahead and insert the SF flag beforehand
181 * (knowing that the previous instruction doesn't depend on flags) and rewrite
182 * its destination to be the NIR reg's destination
183 */
184 static void
ntq_store_dest(struct vc4_compile * c,nir_dest * dest,int chan,struct qreg result)185 ntq_store_dest(struct vc4_compile *c, nir_dest *dest, int chan,
186 struct qreg result)
187 {
188 struct qinst *last_inst = NULL;
189 if (!list_is_empty(&c->cur_block->instructions))
190 last_inst = (struct qinst *)c->cur_block->instructions.prev;
191
192 assert(result.file == QFILE_UNIF ||
193 (result.file == QFILE_TEMP &&
194 last_inst && last_inst == c->defs[result.index]));
195
196 if (dest->is_ssa) {
197 assert(chan < dest->ssa.num_components);
198
199 struct qreg *qregs;
200 struct hash_entry *entry =
201 _mesa_hash_table_search(c->def_ht, &dest->ssa);
202
203 if (entry)
204 qregs = entry->data;
205 else
206 qregs = ntq_init_ssa_def(c, &dest->ssa);
207
208 qregs[chan] = result;
209 } else {
210 nir_register *reg = dest->reg.reg;
211 assert(dest->reg.base_offset == 0);
212 assert(reg->num_array_elems == 0);
213 struct hash_entry *entry =
214 _mesa_hash_table_search(c->def_ht, reg);
215 struct qreg *qregs = entry->data;
216
217 /* Insert a MOV if the source wasn't an SSA def in the
218 * previous instruction.
219 */
220 if (result.file == QFILE_UNIF) {
221 result = qir_MOV(c, result);
222 last_inst = c->defs[result.index];
223 }
224
225 /* We know they're both temps, so just rewrite index. */
226 c->defs[last_inst->dst.index] = NULL;
227 last_inst->dst.index = qregs[chan].index;
228
229 /* If we're in control flow, then make this update of the reg
230 * conditional on the execution mask.
231 */
232 if (c->execute.file != QFILE_NULL) {
233 last_inst->dst.index = qregs[chan].index;
234
235 /* Set the flags to the current exec mask. To insert
236 * the SF, we temporarily remove our SSA instruction.
237 */
238 list_del(&last_inst->link);
239 qir_SF(c, c->execute);
240 list_addtail(&last_inst->link,
241 &c->cur_block->instructions);
242
243 last_inst->cond = QPU_COND_ZS;
244 last_inst->cond_is_exec_mask = true;
245 }
246 }
247 }
248
249 static struct qreg *
ntq_get_dest(struct vc4_compile * c,nir_dest * dest)250 ntq_get_dest(struct vc4_compile *c, nir_dest *dest)
251 {
252 if (dest->is_ssa) {
253 struct qreg *qregs = ntq_init_ssa_def(c, &dest->ssa);
254 for (int i = 0; i < dest->ssa.num_components; i++)
255 qregs[i] = c->undef;
256 return qregs;
257 } else {
258 nir_register *reg = dest->reg.reg;
259 assert(dest->reg.base_offset == 0);
260 assert(reg->num_array_elems == 0);
261 struct hash_entry *entry =
262 _mesa_hash_table_search(c->def_ht, reg);
263 return entry->data;
264 }
265 }
266
267 static struct qreg
ntq_get_src(struct vc4_compile * c,nir_src src,int i)268 ntq_get_src(struct vc4_compile *c, nir_src src, int i)
269 {
270 struct hash_entry *entry;
271 if (src.is_ssa) {
272 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
273 assert(i < src.ssa->num_components);
274 } else {
275 nir_register *reg = src.reg.reg;
276 entry = _mesa_hash_table_search(c->def_ht, reg);
277 assert(reg->num_array_elems == 0);
278 assert(src.reg.base_offset == 0);
279 assert(i < reg->num_components);
280 }
281
282 struct qreg *qregs = entry->data;
283 return qregs[i];
284 }
285
286 static struct qreg
ntq_get_alu_src(struct vc4_compile * c,nir_alu_instr * instr,unsigned src)287 ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr,
288 unsigned src)
289 {
290 assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
291 unsigned chan = ffs(instr->dest.write_mask) - 1;
292 struct qreg r = ntq_get_src(c, instr->src[src].src,
293 instr->src[src].swizzle[chan]);
294
295 assert(!instr->src[src].abs);
296 assert(!instr->src[src].negate);
297
298 return r;
299 };
300
301 static inline struct qreg
qir_SAT(struct vc4_compile * c,struct qreg val)302 qir_SAT(struct vc4_compile *c, struct qreg val)
303 {
304 return qir_FMAX(c,
305 qir_FMIN(c, val, qir_uniform_f(c, 1.0)),
306 qir_uniform_f(c, 0.0));
307 }
308
309 static struct qreg
ntq_rcp(struct vc4_compile * c,struct qreg x)310 ntq_rcp(struct vc4_compile *c, struct qreg x)
311 {
312 struct qreg r = qir_RCP(c, x);
313
314 /* Apply a Newton-Raphson step to improve the accuracy. */
315 r = qir_FMUL(c, r, qir_FSUB(c,
316 qir_uniform_f(c, 2.0),
317 qir_FMUL(c, x, r)));
318
319 return r;
320 }
321
322 static struct qreg
ntq_rsq(struct vc4_compile * c,struct qreg x)323 ntq_rsq(struct vc4_compile *c, struct qreg x)
324 {
325 struct qreg r = qir_RSQ(c, x);
326
327 /* Apply a Newton-Raphson step to improve the accuracy. */
328 r = qir_FMUL(c, r, qir_FSUB(c,
329 qir_uniform_f(c, 1.5),
330 qir_FMUL(c,
331 qir_uniform_f(c, 0.5),
332 qir_FMUL(c, x,
333 qir_FMUL(c, r, r)))));
334
335 return r;
336 }
337
338 static struct qreg
ntq_umul(struct vc4_compile * c,struct qreg src0,struct qreg src1)339 ntq_umul(struct vc4_compile *c, struct qreg src0, struct qreg src1)
340 {
341 struct qreg src0_hi = qir_SHR(c, src0,
342 qir_uniform_ui(c, 24));
343 struct qreg src1_hi = qir_SHR(c, src1,
344 qir_uniform_ui(c, 24));
345
346 struct qreg hilo = qir_MUL24(c, src0_hi, src1);
347 struct qreg lohi = qir_MUL24(c, src0, src1_hi);
348 struct qreg lolo = qir_MUL24(c, src0, src1);
349
350 return qir_ADD(c, lolo, qir_SHL(c,
351 qir_ADD(c, hilo, lohi),
352 qir_uniform_ui(c, 24)));
353 }
354
355 static struct qreg
ntq_scale_depth_texture(struct vc4_compile * c,struct qreg src)356 ntq_scale_depth_texture(struct vc4_compile *c, struct qreg src)
357 {
358 struct qreg depthf = qir_ITOF(c, qir_SHR(c, src,
359 qir_uniform_ui(c, 8)));
360 return qir_FMUL(c, depthf, qir_uniform_f(c, 1.0f/0xffffff));
361 }
362
363 /**
364 * Emits a lowered TXF_MS from an MSAA texture.
365 *
366 * The addressing math has been lowered in NIR, and now we just need to read
367 * it like a UBO.
368 */
369 static void
ntq_emit_txf(struct vc4_compile * c,nir_tex_instr * instr)370 ntq_emit_txf(struct vc4_compile *c, nir_tex_instr *instr)
371 {
372 uint32_t tile_width = 32;
373 uint32_t tile_height = 32;
374 uint32_t tile_size = (tile_height * tile_width *
375 VC4_MAX_SAMPLES * sizeof(uint32_t));
376
377 unsigned unit = instr->texture_index;
378 uint32_t w = align(c->key->tex[unit].msaa_width, tile_width);
379 uint32_t w_tiles = w / tile_width;
380 uint32_t h = align(c->key->tex[unit].msaa_height, tile_height);
381 uint32_t h_tiles = h / tile_height;
382 uint32_t size = w_tiles * h_tiles * tile_size;
383
384 struct qreg addr;
385 assert(instr->num_srcs == 1);
386 assert(instr->src[0].src_type == nir_tex_src_coord);
387 addr = ntq_get_src(c, instr->src[0].src, 0);
388
389 /* Perform the clamping required by kernel validation. */
390 addr = qir_MAX(c, addr, qir_uniform_ui(c, 0));
391 addr = qir_MIN_NOIMM(c, addr, qir_uniform_ui(c, size - 4));
392
393 qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
394 addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit));
395
396 ntq_emit_thrsw(c);
397
398 struct qreg tex = qir_TEX_RESULT(c);
399 c->num_texture_samples++;
400
401 enum pipe_format format = c->key->tex[unit].format;
402 if (util_format_is_depth_or_stencil(format)) {
403 struct qreg scaled = ntq_scale_depth_texture(c, tex);
404 for (int i = 0; i < 4; i++)
405 ntq_store_dest(c, &instr->dest, i, qir_MOV(c, scaled));
406 } else {
407 for (int i = 0; i < 4; i++)
408 ntq_store_dest(c, &instr->dest, i,
409 qir_UNPACK_8_F(c, tex, i));
410 }
411 }
412
413 static void
ntq_emit_tex(struct vc4_compile * c,nir_tex_instr * instr)414 ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr)
415 {
416 struct qreg s, t, r, lod, compare;
417 bool is_txb = false, is_txl = false;
418 unsigned unit = instr->texture_index;
419
420 if (instr->op == nir_texop_txf) {
421 ntq_emit_txf(c, instr);
422 return;
423 }
424
425 for (unsigned i = 0; i < instr->num_srcs; i++) {
426 switch (instr->src[i].src_type) {
427 case nir_tex_src_coord:
428 s = ntq_get_src(c, instr->src[i].src, 0);
429 if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D)
430 t = qir_uniform_f(c, 0.5);
431 else
432 t = ntq_get_src(c, instr->src[i].src, 1);
433 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
434 r = ntq_get_src(c, instr->src[i].src, 2);
435 break;
436 case nir_tex_src_bias:
437 lod = ntq_get_src(c, instr->src[i].src, 0);
438 is_txb = true;
439 break;
440 case nir_tex_src_lod:
441 lod = ntq_get_src(c, instr->src[i].src, 0);
442 is_txl = true;
443 break;
444 case nir_tex_src_comparator:
445 compare = ntq_get_src(c, instr->src[i].src, 0);
446 break;
447 default:
448 unreachable("unknown texture source");
449 }
450 }
451
452 if (c->stage != QSTAGE_FRAG && !is_txl) {
453 /* From the GLSL 1.20 spec:
454 *
455 * "If it is mip-mapped and running on the vertex shader,
456 * then the base texture is used."
457 */
458 is_txl = true;
459 lod = qir_uniform_ui(c, 0);
460 }
461
462 if (c->key->tex[unit].force_first_level) {
463 lod = qir_uniform(c, QUNIFORM_TEXTURE_FIRST_LEVEL, unit);
464 is_txl = true;
465 is_txb = false;
466 }
467
468 struct qreg texture_u[] = {
469 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit),
470 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit),
471 qir_uniform(c, QUNIFORM_CONSTANT, 0),
472 qir_uniform(c, QUNIFORM_CONSTANT, 0),
473 };
474 uint32_t next_texture_u = 0;
475
476 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE || is_txl) {
477 texture_u[2] = qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P2,
478 unit | (is_txl << 16));
479 }
480
481 struct qinst *tmu;
482 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
483 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0), r);
484 tmu->src[qir_get_tex_uniform_src(tmu)] =
485 texture_u[next_texture_u++];
486 } else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
487 c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP ||
488 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
489 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
490 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0),
491 qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR,
492 unit));
493 tmu->src[qir_get_tex_uniform_src(tmu)] =
494 texture_u[next_texture_u++];
495 }
496
497 if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) {
498 s = qir_SAT(c, s);
499 }
500
501 if (c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
502 t = qir_SAT(c, t);
503 }
504
505 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_T, 0), t);
506 tmu->src[qir_get_tex_uniform_src(tmu)] =
507 texture_u[next_texture_u++];
508
509 if (is_txl || is_txb) {
510 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_B, 0), lod);
511 tmu->src[qir_get_tex_uniform_src(tmu)] =
512 texture_u[next_texture_u++];
513 }
514
515 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_S, 0), s);
516 tmu->src[qir_get_tex_uniform_src(tmu)] = texture_u[next_texture_u++];
517
518 c->num_texture_samples++;
519
520 ntq_emit_thrsw(c);
521
522 struct qreg tex = qir_TEX_RESULT(c);
523
524 enum pipe_format format = c->key->tex[unit].format;
525
526 struct qreg *dest = ntq_get_dest(c, &instr->dest);
527 if (util_format_is_depth_or_stencil(format)) {
528 struct qreg normalized = ntq_scale_depth_texture(c, tex);
529 struct qreg depth_output;
530
531 struct qreg u0 = qir_uniform_f(c, 0.0f);
532 struct qreg u1 = qir_uniform_f(c, 1.0f);
533 if (c->key->tex[unit].compare_mode) {
534 /* From the GL_ARB_shadow spec:
535 *
536 * "Let Dt (D subscript t) be the depth texture
537 * value, in the range [0, 1]. Let R be the
538 * interpolated texture coordinate clamped to the
539 * range [0, 1]."
540 */
541 compare = qir_SAT(c, compare);
542
543 switch (c->key->tex[unit].compare_func) {
544 case PIPE_FUNC_NEVER:
545 depth_output = qir_uniform_f(c, 0.0f);
546 break;
547 case PIPE_FUNC_ALWAYS:
548 depth_output = u1;
549 break;
550 case PIPE_FUNC_EQUAL:
551 qir_SF(c, qir_FSUB(c, compare, normalized));
552 depth_output = qir_SEL(c, QPU_COND_ZS, u1, u0);
553 break;
554 case PIPE_FUNC_NOTEQUAL:
555 qir_SF(c, qir_FSUB(c, compare, normalized));
556 depth_output = qir_SEL(c, QPU_COND_ZC, u1, u0);
557 break;
558 case PIPE_FUNC_GREATER:
559 qir_SF(c, qir_FSUB(c, compare, normalized));
560 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
561 break;
562 case PIPE_FUNC_GEQUAL:
563 qir_SF(c, qir_FSUB(c, normalized, compare));
564 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
565 break;
566 case PIPE_FUNC_LESS:
567 qir_SF(c, qir_FSUB(c, compare, normalized));
568 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
569 break;
570 case PIPE_FUNC_LEQUAL:
571 qir_SF(c, qir_FSUB(c, normalized, compare));
572 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
573 break;
574 }
575 } else {
576 depth_output = normalized;
577 }
578
579 for (int i = 0; i < 4; i++)
580 dest[i] = depth_output;
581 } else {
582 for (int i = 0; i < 4; i++)
583 dest[i] = qir_UNPACK_8_F(c, tex, i);
584 }
585 }
586
587 /**
588 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
589 * to zero).
590 */
591 static struct qreg
ntq_ffract(struct vc4_compile * c,struct qreg src)592 ntq_ffract(struct vc4_compile *c, struct qreg src)
593 {
594 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
595 struct qreg diff = qir_FSUB(c, src, trunc);
596 qir_SF(c, diff);
597
598 qir_FADD_dest(c, diff,
599 diff, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
600
601 return qir_MOV(c, diff);
602 }
603
604 /**
605 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
606 * zero).
607 */
608 static struct qreg
ntq_ffloor(struct vc4_compile * c,struct qreg src)609 ntq_ffloor(struct vc4_compile *c, struct qreg src)
610 {
611 struct qreg result = qir_ITOF(c, qir_FTOI(c, src));
612
613 /* This will be < 0 if we truncated and the truncation was of a value
614 * that was < 0 in the first place.
615 */
616 qir_SF(c, qir_FSUB(c, src, result));
617
618 struct qinst *sub = qir_FSUB_dest(c, result,
619 result, qir_uniform_f(c, 1.0));
620 sub->cond = QPU_COND_NS;
621
622 return qir_MOV(c, result);
623 }
624
625 /**
626 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
627 * zero).
628 */
629 static struct qreg
ntq_fceil(struct vc4_compile * c,struct qreg src)630 ntq_fceil(struct vc4_compile *c, struct qreg src)
631 {
632 struct qreg result = qir_ITOF(c, qir_FTOI(c, src));
633
634 /* This will be < 0 if we truncated and the truncation was of a value
635 * that was > 0 in the first place.
636 */
637 qir_SF(c, qir_FSUB(c, result, src));
638
639 qir_FADD_dest(c, result,
640 result, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
641
642 return qir_MOV(c, result);
643 }
644
645 static struct qreg
ntq_shrink_sincos_input_range(struct vc4_compile * c,struct qreg x)646 ntq_shrink_sincos_input_range(struct vc4_compile *c, struct qreg x)
647 {
648 /* Since we're using a Taylor approximation, we want to have a small
649 * number of coefficients and take advantage of sin/cos repeating
650 * every 2pi. We keep our x as close to 0 as we can, since the series
651 * will be less accurate as |x| increases. (Also, be careful of
652 * shifting the input x value to be tricky with sin/cos relations,
653 * because getting accurate values for x==0 is very important for SDL
654 * rendering)
655 */
656 struct qreg scaled_x =
657 qir_FMUL(c, x,
658 qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
659 /* Note: FTOI truncates toward 0. */
660 struct qreg x_frac = qir_FSUB(c, scaled_x,
661 qir_ITOF(c, qir_FTOI(c, scaled_x)));
662 /* Map [0.5, 1] to [-0.5, 0] */
663 qir_SF(c, qir_FSUB(c, x_frac, qir_uniform_f(c, 0.5)));
664 qir_FSUB_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NC;
665 /* Map [-1, -0.5] to [0, 0.5] */
666 qir_SF(c, qir_FADD(c, x_frac, qir_uniform_f(c, 0.5)));
667 qir_FADD_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
668
669 return x_frac;
670 }
671
672 static struct qreg
ntq_fsin(struct vc4_compile * c,struct qreg src)673 ntq_fsin(struct vc4_compile *c, struct qreg src)
674 {
675 float coeff[] = {
676 2.0 * M_PI,
677 -pow(2.0 * M_PI, 3) / (3 * 2 * 1),
678 pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
679 -pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
680 pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
681 };
682
683 struct qreg x = ntq_shrink_sincos_input_range(c, src);
684 struct qreg x2 = qir_FMUL(c, x, x);
685 struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0]));
686 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
687 x = qir_FMUL(c, x, x2);
688 sum = qir_FADD(c,
689 sum,
690 qir_FMUL(c,
691 x,
692 qir_uniform_f(c, coeff[i])));
693 }
694 return sum;
695 }
696
697 static struct qreg
ntq_fcos(struct vc4_compile * c,struct qreg src)698 ntq_fcos(struct vc4_compile *c, struct qreg src)
699 {
700 float coeff[] = {
701 1.0f,
702 -pow(2.0 * M_PI, 2) / (2 * 1),
703 pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
704 -pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
705 pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
706 -pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
707 };
708
709 struct qreg x_frac = ntq_shrink_sincos_input_range(c, src);
710 struct qreg sum = qir_uniform_f(c, coeff[0]);
711 struct qreg x2 = qir_FMUL(c, x_frac, x_frac);
712 struct qreg x = x2; /* Current x^2, x^4, or x^6 */
713 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
714 if (i != 1)
715 x = qir_FMUL(c, x, x2);
716
717 sum = qir_FADD(c, qir_FMUL(c,
718 x,
719 qir_uniform_f(c, coeff[i])),
720 sum);
721 }
722 return sum;
723 }
724
725 static struct qreg
ntq_fsign(struct vc4_compile * c,struct qreg src)726 ntq_fsign(struct vc4_compile *c, struct qreg src)
727 {
728 struct qreg t = qir_get_temp(c);
729
730 qir_SF(c, src);
731 qir_MOV_dest(c, t, qir_uniform_f(c, 0.0));
732 qir_MOV_dest(c, t, qir_uniform_f(c, 1.0))->cond = QPU_COND_ZC;
733 qir_MOV_dest(c, t, qir_uniform_f(c, -1.0))->cond = QPU_COND_NS;
734 return qir_MOV(c, t);
735 }
736
737 static void
emit_vertex_input(struct vc4_compile * c,int attr)738 emit_vertex_input(struct vc4_compile *c, int attr)
739 {
740 enum pipe_format format = c->vs_key->attr_formats[attr];
741 uint32_t attr_size = util_format_get_blocksize(format);
742
743 c->vattr_sizes[attr] = align(attr_size, 4);
744 for (int i = 0; i < align(attr_size, 4) / 4; i++) {
745 c->inputs[attr * 4 + i] =
746 qir_MOV(c, qir_reg(QFILE_VPM, attr * 4 + i));
747 c->num_inputs++;
748 }
749 }
750
751 static void
emit_fragcoord_input(struct vc4_compile * c,int attr)752 emit_fragcoord_input(struct vc4_compile *c, int attr)
753 {
754 c->inputs[attr * 4 + 0] = qir_ITOF(c, qir_reg(QFILE_FRAG_X, 0));
755 c->inputs[attr * 4 + 1] = qir_ITOF(c, qir_reg(QFILE_FRAG_Y, 0));
756 c->inputs[attr * 4 + 2] =
757 qir_FMUL(c,
758 qir_ITOF(c, qir_FRAG_Z(c)),
759 qir_uniform_f(c, 1.0 / 0xffffff));
760 c->inputs[attr * 4 + 3] = qir_RCP(c, qir_FRAG_W(c));
761 }
762
763 static struct qreg
emit_fragment_varying(struct vc4_compile * c,gl_varying_slot slot,uint8_t swizzle)764 emit_fragment_varying(struct vc4_compile *c, gl_varying_slot slot,
765 uint8_t swizzle)
766 {
767 uint32_t i = c->num_input_slots++;
768 struct qreg vary = {
769 QFILE_VARY,
770 i
771 };
772
773 if (c->num_input_slots >= c->input_slots_array_size) {
774 c->input_slots_array_size =
775 MAX2(4, c->input_slots_array_size * 2);
776
777 c->input_slots = reralloc(c, c->input_slots,
778 struct vc4_varying_slot,
779 c->input_slots_array_size);
780 }
781
782 c->input_slots[i].slot = slot;
783 c->input_slots[i].swizzle = swizzle;
784
785 return qir_VARY_ADD_C(c, qir_FMUL(c, vary, qir_FRAG_W(c)));
786 }
787
788 static void
emit_fragment_input(struct vc4_compile * c,int attr,gl_varying_slot slot)789 emit_fragment_input(struct vc4_compile *c, int attr, gl_varying_slot slot)
790 {
791 for (int i = 0; i < 4; i++) {
792 c->inputs[attr * 4 + i] =
793 emit_fragment_varying(c, slot, i);
794 c->num_inputs++;
795 }
796 }
797
798 static void
add_output(struct vc4_compile * c,uint32_t decl_offset,uint8_t slot,uint8_t swizzle)799 add_output(struct vc4_compile *c,
800 uint32_t decl_offset,
801 uint8_t slot,
802 uint8_t swizzle)
803 {
804 uint32_t old_array_size = c->outputs_array_size;
805 resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
806 decl_offset + 1);
807
808 if (old_array_size != c->outputs_array_size) {
809 c->output_slots = reralloc(c,
810 c->output_slots,
811 struct vc4_varying_slot,
812 c->outputs_array_size);
813 }
814
815 c->output_slots[decl_offset].slot = slot;
816 c->output_slots[decl_offset].swizzle = swizzle;
817 }
818
819 static bool
ntq_src_is_only_ssa_def_user(nir_src * src)820 ntq_src_is_only_ssa_def_user(nir_src *src)
821 {
822 if (!src->is_ssa)
823 return false;
824
825 if (!list_is_empty(&src->ssa->if_uses))
826 return false;
827
828 return (src->ssa->uses.next == &src->use_link &&
829 src->ssa->uses.next->next == &src->ssa->uses);
830 }
831
832 /**
833 * In general, emits a nir_pack_unorm_4x8 as a series of MOVs with the pack
834 * bit set.
835 *
836 * However, as an optimization, it tries to find the instructions generating
837 * the sources to be packed and just emit the pack flag there, if possible.
838 */
839 static void
ntq_emit_pack_unorm_4x8(struct vc4_compile * c,nir_alu_instr * instr)840 ntq_emit_pack_unorm_4x8(struct vc4_compile *c, nir_alu_instr *instr)
841 {
842 struct qreg result = qir_get_temp(c);
843 struct nir_alu_instr *vec4 = NULL;
844
845 /* If packing from a vec4 op (as expected), identify it so that we can
846 * peek back at what generated its sources.
847 */
848 if (instr->src[0].src.is_ssa &&
849 instr->src[0].src.ssa->parent_instr->type == nir_instr_type_alu &&
850 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr)->op ==
851 nir_op_vec4) {
852 vec4 = nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
853 }
854
855 /* If the pack is replicating the same channel 4 times, use the 8888
856 * pack flag. This is common for blending using the alpha
857 * channel.
858 */
859 if (instr->src[0].swizzle[0] == instr->src[0].swizzle[1] &&
860 instr->src[0].swizzle[0] == instr->src[0].swizzle[2] &&
861 instr->src[0].swizzle[0] == instr->src[0].swizzle[3]) {
862 struct qreg rep = ntq_get_src(c,
863 instr->src[0].src,
864 instr->src[0].swizzle[0]);
865 ntq_store_dest(c, &instr->dest.dest, 0, qir_PACK_8888_F(c, rep));
866 return;
867 }
868
869 for (int i = 0; i < 4; i++) {
870 int swiz = instr->src[0].swizzle[i];
871 struct qreg src;
872 if (vec4) {
873 src = ntq_get_src(c, vec4->src[swiz].src,
874 vec4->src[swiz].swizzle[0]);
875 } else {
876 src = ntq_get_src(c, instr->src[0].src, swiz);
877 }
878
879 if (vec4 &&
880 ntq_src_is_only_ssa_def_user(&vec4->src[swiz].src) &&
881 src.file == QFILE_TEMP &&
882 c->defs[src.index] &&
883 qir_is_mul(c->defs[src.index]) &&
884 !c->defs[src.index]->dst.pack) {
885 struct qinst *rewrite = c->defs[src.index];
886 c->defs[src.index] = NULL;
887 rewrite->dst = result;
888 rewrite->dst.pack = QPU_PACK_MUL_8A + i;
889 continue;
890 }
891
892 qir_PACK_8_F(c, result, src, i);
893 }
894
895 ntq_store_dest(c, &instr->dest.dest, 0, qir_MOV(c, result));
896 }
897
898 /** Handles sign-extended bitfield extracts for 16 bits. */
899 static struct qreg
ntq_emit_ibfe(struct vc4_compile * c,struct qreg base,struct qreg offset,struct qreg bits)900 ntq_emit_ibfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
901 struct qreg bits)
902 {
903 assert(bits.file == QFILE_UNIF &&
904 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
905 c->uniform_data[bits.index] == 16);
906
907 assert(offset.file == QFILE_UNIF &&
908 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
909 int offset_bit = c->uniform_data[offset.index];
910 assert(offset_bit % 16 == 0);
911
912 return qir_UNPACK_16_I(c, base, offset_bit / 16);
913 }
914
915 /** Handles unsigned bitfield extracts for 8 bits. */
916 static struct qreg
ntq_emit_ubfe(struct vc4_compile * c,struct qreg base,struct qreg offset,struct qreg bits)917 ntq_emit_ubfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
918 struct qreg bits)
919 {
920 assert(bits.file == QFILE_UNIF &&
921 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
922 c->uniform_data[bits.index] == 8);
923
924 assert(offset.file == QFILE_UNIF &&
925 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
926 int offset_bit = c->uniform_data[offset.index];
927 assert(offset_bit % 8 == 0);
928
929 return qir_UNPACK_8_I(c, base, offset_bit / 8);
930 }
931
932 /**
933 * If compare_instr is a valid comparison instruction, emits the
934 * compare_instr's comparison and returns the sel_instr's return value based
935 * on the compare_instr's result.
936 */
937 static bool
ntq_emit_comparison(struct vc4_compile * c,struct qreg * dest,nir_alu_instr * compare_instr,nir_alu_instr * sel_instr)938 ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest,
939 nir_alu_instr *compare_instr,
940 nir_alu_instr *sel_instr)
941 {
942 enum qpu_cond cond;
943
944 switch (compare_instr->op) {
945 case nir_op_feq32:
946 case nir_op_ieq32:
947 case nir_op_seq:
948 cond = QPU_COND_ZS;
949 break;
950 case nir_op_fneu32:
951 case nir_op_ine32:
952 case nir_op_sne:
953 cond = QPU_COND_ZC;
954 break;
955 case nir_op_fge32:
956 case nir_op_ige32:
957 case nir_op_uge32:
958 case nir_op_sge:
959 cond = QPU_COND_NC;
960 break;
961 case nir_op_flt32:
962 case nir_op_ilt32:
963 case nir_op_slt:
964 cond = QPU_COND_NS;
965 break;
966 default:
967 return false;
968 }
969
970 struct qreg src0 = ntq_get_alu_src(c, compare_instr, 0);
971 struct qreg src1 = ntq_get_alu_src(c, compare_instr, 1);
972
973 unsigned unsized_type =
974 nir_alu_type_get_base_type(nir_op_infos[compare_instr->op].input_types[0]);
975 if (unsized_type == nir_type_float)
976 qir_SF(c, qir_FSUB(c, src0, src1));
977 else
978 qir_SF(c, qir_SUB(c, src0, src1));
979
980 switch (sel_instr->op) {
981 case nir_op_seq:
982 case nir_op_sne:
983 case nir_op_sge:
984 case nir_op_slt:
985 *dest = qir_SEL(c, cond,
986 qir_uniform_f(c, 1.0), qir_uniform_f(c, 0.0));
987 break;
988
989 case nir_op_b32csel:
990 *dest = qir_SEL(c, cond,
991 ntq_get_alu_src(c, sel_instr, 1),
992 ntq_get_alu_src(c, sel_instr, 2));
993 break;
994
995 default:
996 *dest = qir_SEL(c, cond,
997 qir_uniform_ui(c, ~0), qir_uniform_ui(c, 0));
998 break;
999 }
1000
1001 /* Make the temporary for nir_store_dest(). */
1002 *dest = qir_MOV(c, *dest);
1003
1004 return true;
1005 }
1006
1007 /**
1008 * Attempts to fold a comparison generating a boolean result into the
1009 * condition code for selecting between two values, instead of comparing the
1010 * boolean result against 0 to generate the condition code.
1011 */
ntq_emit_bcsel(struct vc4_compile * c,nir_alu_instr * instr,struct qreg * src)1012 static struct qreg ntq_emit_bcsel(struct vc4_compile *c, nir_alu_instr *instr,
1013 struct qreg *src)
1014 {
1015 if (!instr->src[0].src.is_ssa)
1016 goto out;
1017 if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
1018 goto out;
1019 nir_alu_instr *compare =
1020 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
1021 if (!compare)
1022 goto out;
1023
1024 struct qreg dest;
1025 if (ntq_emit_comparison(c, &dest, compare, instr))
1026 return dest;
1027
1028 out:
1029 qir_SF(c, src[0]);
1030 return qir_MOV(c, qir_SEL(c, QPU_COND_NS, src[1], src[2]));
1031 }
1032
1033 static struct qreg
ntq_fddx(struct vc4_compile * c,struct qreg src)1034 ntq_fddx(struct vc4_compile *c, struct qreg src)
1035 {
1036 /* Make sure that we have a bare temp to use for MUL rotation, so it
1037 * can be allocated to an accumulator.
1038 */
1039 if (src.pack || src.file != QFILE_TEMP)
1040 src = qir_MOV(c, src);
1041
1042 struct qreg from_left = qir_ROT_MUL(c, src, 1);
1043 struct qreg from_right = qir_ROT_MUL(c, src, 15);
1044
1045 /* Distinguish left/right pixels of the quad. */
1046 qir_SF(c, qir_AND(c, qir_reg(QFILE_QPU_ELEMENT, 0),
1047 qir_uniform_ui(c, 1)));
1048
1049 return qir_MOV(c, qir_SEL(c, QPU_COND_ZS,
1050 qir_FSUB(c, from_right, src),
1051 qir_FSUB(c, src, from_left)));
1052 }
1053
1054 static struct qreg
ntq_fddy(struct vc4_compile * c,struct qreg src)1055 ntq_fddy(struct vc4_compile *c, struct qreg src)
1056 {
1057 if (src.pack || src.file != QFILE_TEMP)
1058 src = qir_MOV(c, src);
1059
1060 struct qreg from_bottom = qir_ROT_MUL(c, src, 2);
1061 struct qreg from_top = qir_ROT_MUL(c, src, 14);
1062
1063 /* Distinguish top/bottom pixels of the quad. */
1064 qir_SF(c, qir_AND(c,
1065 qir_reg(QFILE_QPU_ELEMENT, 0),
1066 qir_uniform_ui(c, 2)));
1067
1068 return qir_MOV(c, qir_SEL(c, QPU_COND_ZS,
1069 qir_FSUB(c, from_top, src),
1070 qir_FSUB(c, src, from_bottom)));
1071 }
1072
1073 static void
ntq_emit_alu(struct vc4_compile * c,nir_alu_instr * instr)1074 ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr)
1075 {
1076 /* This should always be lowered to ALU operations for VC4. */
1077 assert(!instr->dest.saturate);
1078
1079 /* Vectors are special in that they have non-scalarized writemasks,
1080 * and just take the first swizzle channel for each argument in order
1081 * into each writemask channel.
1082 */
1083 if (instr->op == nir_op_vec2 ||
1084 instr->op == nir_op_vec3 ||
1085 instr->op == nir_op_vec4) {
1086 struct qreg srcs[4];
1087 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1088 srcs[i] = ntq_get_src(c, instr->src[i].src,
1089 instr->src[i].swizzle[0]);
1090 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1091 ntq_store_dest(c, &instr->dest.dest, i,
1092 qir_MOV(c, srcs[i]));
1093 return;
1094 }
1095
1096 if (instr->op == nir_op_pack_unorm_4x8) {
1097 ntq_emit_pack_unorm_4x8(c, instr);
1098 return;
1099 }
1100
1101 if (instr->op == nir_op_unpack_unorm_4x8) {
1102 struct qreg src = ntq_get_src(c, instr->src[0].src,
1103 instr->src[0].swizzle[0]);
1104 for (int i = 0; i < 4; i++) {
1105 if (instr->dest.write_mask & (1 << i))
1106 ntq_store_dest(c, &instr->dest.dest, i,
1107 qir_UNPACK_8_F(c, src, i));
1108 }
1109 return;
1110 }
1111
1112 /* General case: We can just grab the one used channel per src. */
1113 struct qreg src[nir_op_infos[instr->op].num_inputs];
1114 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1115 src[i] = ntq_get_alu_src(c, instr, i);
1116 }
1117
1118 struct qreg result;
1119
1120 switch (instr->op) {
1121 case nir_op_mov:
1122 result = qir_MOV(c, src[0]);
1123 break;
1124 case nir_op_fmul:
1125 result = qir_FMUL(c, src[0], src[1]);
1126 break;
1127 case nir_op_fadd:
1128 result = qir_FADD(c, src[0], src[1]);
1129 break;
1130 case nir_op_fsub:
1131 result = qir_FSUB(c, src[0], src[1]);
1132 break;
1133 case nir_op_fmin:
1134 result = qir_FMIN(c, src[0], src[1]);
1135 break;
1136 case nir_op_fmax:
1137 result = qir_FMAX(c, src[0], src[1]);
1138 break;
1139
1140 case nir_op_f2i32:
1141 case nir_op_f2u32:
1142 result = qir_FTOI(c, src[0]);
1143 break;
1144 case nir_op_i2f32:
1145 case nir_op_u2f32:
1146 result = qir_ITOF(c, src[0]);
1147 break;
1148 case nir_op_b2f32:
1149 result = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
1150 break;
1151 case nir_op_b2i32:
1152 result = qir_AND(c, src[0], qir_uniform_ui(c, 1));
1153 break;
1154 case nir_op_i2b32:
1155 case nir_op_f2b32:
1156 qir_SF(c, src[0]);
1157 result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC,
1158 qir_uniform_ui(c, ~0),
1159 qir_uniform_ui(c, 0)));
1160 break;
1161
1162 case nir_op_iadd:
1163 result = qir_ADD(c, src[0], src[1]);
1164 break;
1165 case nir_op_ushr:
1166 result = qir_SHR(c, src[0], src[1]);
1167 break;
1168 case nir_op_isub:
1169 result = qir_SUB(c, src[0], src[1]);
1170 break;
1171 case nir_op_ishr:
1172 result = qir_ASR(c, src[0], src[1]);
1173 break;
1174 case nir_op_ishl:
1175 result = qir_SHL(c, src[0], src[1]);
1176 break;
1177 case nir_op_imin:
1178 result = qir_MIN(c, src[0], src[1]);
1179 break;
1180 case nir_op_imax:
1181 result = qir_MAX(c, src[0], src[1]);
1182 break;
1183 case nir_op_iand:
1184 result = qir_AND(c, src[0], src[1]);
1185 break;
1186 case nir_op_ior:
1187 result = qir_OR(c, src[0], src[1]);
1188 break;
1189 case nir_op_ixor:
1190 result = qir_XOR(c, src[0], src[1]);
1191 break;
1192 case nir_op_inot:
1193 result = qir_NOT(c, src[0]);
1194 break;
1195
1196 case nir_op_imul:
1197 result = ntq_umul(c, src[0], src[1]);
1198 break;
1199
1200 case nir_op_seq:
1201 case nir_op_sne:
1202 case nir_op_sge:
1203 case nir_op_slt:
1204 case nir_op_feq32:
1205 case nir_op_fneu32:
1206 case nir_op_fge32:
1207 case nir_op_flt32:
1208 case nir_op_ieq32:
1209 case nir_op_ine32:
1210 case nir_op_ige32:
1211 case nir_op_uge32:
1212 case nir_op_ilt32:
1213 if (!ntq_emit_comparison(c, &result, instr, instr)) {
1214 fprintf(stderr, "Bad comparison instruction\n");
1215 }
1216 break;
1217
1218 case nir_op_b32csel:
1219 result = ntq_emit_bcsel(c, instr, src);
1220 break;
1221 case nir_op_fcsel:
1222 qir_SF(c, src[0]);
1223 result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC, src[1], src[2]));
1224 break;
1225
1226 case nir_op_frcp:
1227 result = ntq_rcp(c, src[0]);
1228 break;
1229 case nir_op_frsq:
1230 result = ntq_rsq(c, src[0]);
1231 break;
1232 case nir_op_fexp2:
1233 result = qir_EXP2(c, src[0]);
1234 break;
1235 case nir_op_flog2:
1236 result = qir_LOG2(c, src[0]);
1237 break;
1238
1239 case nir_op_ftrunc:
1240 result = qir_ITOF(c, qir_FTOI(c, src[0]));
1241 break;
1242 case nir_op_fceil:
1243 result = ntq_fceil(c, src[0]);
1244 break;
1245 case nir_op_ffract:
1246 result = ntq_ffract(c, src[0]);
1247 break;
1248 case nir_op_ffloor:
1249 result = ntq_ffloor(c, src[0]);
1250 break;
1251
1252 case nir_op_fsin:
1253 result = ntq_fsin(c, src[0]);
1254 break;
1255 case nir_op_fcos:
1256 result = ntq_fcos(c, src[0]);
1257 break;
1258
1259 case nir_op_fsign:
1260 result = ntq_fsign(c, src[0]);
1261 break;
1262
1263 case nir_op_fabs:
1264 result = qir_FMAXABS(c, src[0], src[0]);
1265 break;
1266 case nir_op_iabs:
1267 result = qir_MAX(c, src[0],
1268 qir_SUB(c, qir_uniform_ui(c, 0), src[0]));
1269 break;
1270
1271 case nir_op_ibitfield_extract:
1272 result = ntq_emit_ibfe(c, src[0], src[1], src[2]);
1273 break;
1274
1275 case nir_op_ubitfield_extract:
1276 result = ntq_emit_ubfe(c, src[0], src[1], src[2]);
1277 break;
1278
1279 case nir_op_usadd_4x8_vc4:
1280 result = qir_V8ADDS(c, src[0], src[1]);
1281 break;
1282
1283 case nir_op_ussub_4x8_vc4:
1284 result = qir_V8SUBS(c, src[0], src[1]);
1285 break;
1286
1287 case nir_op_umin_4x8_vc4:
1288 result = qir_V8MIN(c, src[0], src[1]);
1289 break;
1290
1291 case nir_op_umax_4x8_vc4:
1292 result = qir_V8MAX(c, src[0], src[1]);
1293 break;
1294
1295 case nir_op_umul_unorm_4x8_vc4:
1296 result = qir_V8MULD(c, src[0], src[1]);
1297 break;
1298
1299 case nir_op_fddx:
1300 case nir_op_fddx_coarse:
1301 case nir_op_fddx_fine:
1302 result = ntq_fddx(c, src[0]);
1303 break;
1304
1305 case nir_op_fddy:
1306 case nir_op_fddy_coarse:
1307 case nir_op_fddy_fine:
1308 result = ntq_fddy(c, src[0]);
1309 break;
1310
1311 default:
1312 fprintf(stderr, "unknown NIR ALU inst: ");
1313 nir_print_instr(&instr->instr, stderr);
1314 fprintf(stderr, "\n");
1315 abort();
1316 }
1317
1318 /* We have a scalar result, so the instruction should only have a
1319 * single channel written to.
1320 */
1321 assert(util_is_power_of_two_or_zero(instr->dest.write_mask));
1322 ntq_store_dest(c, &instr->dest.dest,
1323 ffs(instr->dest.write_mask) - 1, result);
1324 }
1325
1326 static void
emit_frag_end(struct vc4_compile * c)1327 emit_frag_end(struct vc4_compile *c)
1328 {
1329 struct qreg color;
1330 if (c->output_color_index != -1) {
1331 color = c->outputs[c->output_color_index];
1332 } else {
1333 color = qir_uniform_ui(c, 0);
1334 }
1335
1336 uint32_t discard_cond = QPU_COND_ALWAYS;
1337 if (c->s->info.fs.uses_discard) {
1338 qir_SF(c, c->discard);
1339 discard_cond = QPU_COND_ZS;
1340 }
1341
1342 if (c->fs_key->stencil_enabled) {
1343 qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1344 qir_uniform(c, QUNIFORM_STENCIL, 0));
1345 if (c->fs_key->stencil_twoside) {
1346 qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1347 qir_uniform(c, QUNIFORM_STENCIL, 1));
1348 }
1349 if (c->fs_key->stencil_full_writemasks) {
1350 qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1351 qir_uniform(c, QUNIFORM_STENCIL, 2));
1352 }
1353 }
1354
1355 if (c->output_sample_mask_index != -1) {
1356 qir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1357 }
1358
1359 if (c->fs_key->depth_enabled) {
1360 if (c->output_position_index != -1) {
1361 qir_FTOI_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
1362 qir_FMUL(c,
1363 c->outputs[c->output_position_index],
1364 qir_uniform_f(c, 0xffffff)))->cond = discard_cond;
1365 } else {
1366 qir_MOV_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
1367 qir_FRAG_Z(c))->cond = discard_cond;
1368 }
1369 }
1370
1371 if (!c->msaa_per_sample_output) {
1372 qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE, 0),
1373 color)->cond = discard_cond;
1374 } else {
1375 for (int i = 0; i < VC4_MAX_SAMPLES; i++) {
1376 qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE_MS, 0),
1377 c->sample_colors[i])->cond = discard_cond;
1378 }
1379 }
1380 }
1381
1382 static void
emit_scaled_viewport_write(struct vc4_compile * c,struct qreg rcp_w)1383 emit_scaled_viewport_write(struct vc4_compile *c, struct qreg rcp_w)
1384 {
1385 struct qreg packed = qir_get_temp(c);
1386
1387 for (int i = 0; i < 2; i++) {
1388 struct qreg scale =
1389 qir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i, 0);
1390
1391 struct qreg packed_chan = packed;
1392 packed_chan.pack = QPU_PACK_A_16A + i;
1393
1394 qir_FTOI_dest(c, packed_chan,
1395 qir_FMUL(c,
1396 qir_FMUL(c,
1397 c->outputs[c->output_position_index + i],
1398 scale),
1399 rcp_w));
1400 }
1401
1402 qir_VPM_WRITE(c, packed);
1403 }
1404
1405 static void
emit_zs_write(struct vc4_compile * c,struct qreg rcp_w)1406 emit_zs_write(struct vc4_compile *c, struct qreg rcp_w)
1407 {
1408 struct qreg zscale = qir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
1409 struct qreg zoffset = qir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
1410
1411 qir_VPM_WRITE(c, qir_FADD(c, qir_FMUL(c, qir_FMUL(c,
1412 c->outputs[c->output_position_index + 2],
1413 zscale),
1414 rcp_w),
1415 zoffset));
1416 }
1417
1418 static void
emit_rcp_wc_write(struct vc4_compile * c,struct qreg rcp_w)1419 emit_rcp_wc_write(struct vc4_compile *c, struct qreg rcp_w)
1420 {
1421 qir_VPM_WRITE(c, rcp_w);
1422 }
1423
1424 static void
emit_point_size_write(struct vc4_compile * c)1425 emit_point_size_write(struct vc4_compile *c)
1426 {
1427 struct qreg point_size;
1428
1429 if (c->output_point_size_index != -1)
1430 point_size = c->outputs[c->output_point_size_index];
1431 else
1432 point_size = qir_uniform_f(c, 1.0);
1433
1434 qir_VPM_WRITE(c, point_size);
1435 }
1436
1437 /**
1438 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1439 *
1440 * The simulator insists that there be at least one vertex attribute, so
1441 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1442 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1443 * to consume it here.
1444 */
1445 static void
emit_stub_vpm_read(struct vc4_compile * c)1446 emit_stub_vpm_read(struct vc4_compile *c)
1447 {
1448 if (c->num_inputs)
1449 return;
1450
1451 c->vattr_sizes[0] = 4;
1452 (void)qir_MOV(c, qir_reg(QFILE_VPM, 0));
1453 c->num_inputs++;
1454 }
1455
1456 static void
emit_vert_end(struct vc4_compile * c,struct vc4_varying_slot * fs_inputs,uint32_t num_fs_inputs)1457 emit_vert_end(struct vc4_compile *c,
1458 struct vc4_varying_slot *fs_inputs,
1459 uint32_t num_fs_inputs)
1460 {
1461 struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]);
1462
1463 emit_stub_vpm_read(c);
1464
1465 emit_scaled_viewport_write(c, rcp_w);
1466 emit_zs_write(c, rcp_w);
1467 emit_rcp_wc_write(c, rcp_w);
1468 if (c->vs_key->per_vertex_point_size)
1469 emit_point_size_write(c);
1470
1471 for (int i = 0; i < num_fs_inputs; i++) {
1472 struct vc4_varying_slot *input = &fs_inputs[i];
1473 int j;
1474
1475 for (j = 0; j < c->num_outputs; j++) {
1476 struct vc4_varying_slot *output =
1477 &c->output_slots[j];
1478
1479 if (input->slot == output->slot &&
1480 input->swizzle == output->swizzle) {
1481 qir_VPM_WRITE(c, c->outputs[j]);
1482 break;
1483 }
1484 }
1485 /* Emit padding if we didn't find a declared VS output for
1486 * this FS input.
1487 */
1488 if (j == c->num_outputs)
1489 qir_VPM_WRITE(c, qir_uniform_f(c, 0.0));
1490 }
1491 }
1492
1493 static void
emit_coord_end(struct vc4_compile * c)1494 emit_coord_end(struct vc4_compile *c)
1495 {
1496 struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]);
1497
1498 emit_stub_vpm_read(c);
1499
1500 for (int i = 0; i < 4; i++)
1501 qir_VPM_WRITE(c, c->outputs[c->output_position_index + i]);
1502
1503 emit_scaled_viewport_write(c, rcp_w);
1504 emit_zs_write(c, rcp_w);
1505 emit_rcp_wc_write(c, rcp_w);
1506 if (c->vs_key->per_vertex_point_size)
1507 emit_point_size_write(c);
1508 }
1509
1510 static void
vc4_optimize_nir(struct nir_shader * s)1511 vc4_optimize_nir(struct nir_shader *s)
1512 {
1513 bool progress;
1514 unsigned lower_flrp =
1515 (s->options->lower_flrp16 ? 16 : 0) |
1516 (s->options->lower_flrp32 ? 32 : 0) |
1517 (s->options->lower_flrp64 ? 64 : 0);
1518
1519 do {
1520 progress = false;
1521
1522 NIR_PASS_V(s, nir_lower_vars_to_ssa);
1523 NIR_PASS(progress, s, nir_lower_alu_to_scalar, NULL, NULL);
1524 NIR_PASS(progress, s, nir_lower_phis_to_scalar, false);
1525 NIR_PASS(progress, s, nir_copy_prop);
1526 NIR_PASS(progress, s, nir_opt_remove_phis);
1527 NIR_PASS(progress, s, nir_opt_dce);
1528 NIR_PASS(progress, s, nir_opt_dead_cf);
1529 NIR_PASS(progress, s, nir_opt_cse);
1530 NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true);
1531 NIR_PASS(progress, s, nir_opt_algebraic);
1532 NIR_PASS(progress, s, nir_opt_constant_folding);
1533 if (lower_flrp != 0) {
1534 bool lower_flrp_progress = false;
1535
1536 NIR_PASS(lower_flrp_progress, s, nir_lower_flrp,
1537 lower_flrp,
1538 false /* always_precise */);
1539 if (lower_flrp_progress) {
1540 NIR_PASS(progress, s, nir_opt_constant_folding);
1541 progress = true;
1542 }
1543
1544 /* Nothing should rematerialize any flrps, so we only
1545 * need to do this lowering once.
1546 */
1547 lower_flrp = 0;
1548 }
1549
1550 NIR_PASS(progress, s, nir_opt_undef);
1551 NIR_PASS(progress, s, nir_opt_loop_unroll);
1552 } while (progress);
1553 }
1554
1555 static int
driver_location_compare(const void * in_a,const void * in_b)1556 driver_location_compare(const void *in_a, const void *in_b)
1557 {
1558 const nir_variable *const *a = in_a;
1559 const nir_variable *const *b = in_b;
1560
1561 return (*a)->data.driver_location - (*b)->data.driver_location;
1562 }
1563
1564 static void
ntq_setup_inputs(struct vc4_compile * c)1565 ntq_setup_inputs(struct vc4_compile *c)
1566 {
1567 unsigned num_entries = 0;
1568 nir_foreach_shader_in_variable(var, c->s)
1569 num_entries++;
1570
1571 nir_variable *vars[num_entries];
1572
1573 unsigned i = 0;
1574 nir_foreach_shader_in_variable(var, c->s)
1575 vars[i++] = var;
1576
1577 /* Sort the variables so that we emit the input setup in
1578 * driver_location order. This is required for VPM reads, whose data
1579 * is fetched into the VPM in driver_location (TGSI register index)
1580 * order.
1581 */
1582 qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1583
1584 for (unsigned i = 0; i < num_entries; i++) {
1585 nir_variable *var = vars[i];
1586 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1587 unsigned loc = var->data.driver_location;
1588
1589 assert(array_len == 1);
1590 (void)array_len;
1591 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1592 (loc + 1) * 4);
1593
1594 if (c->stage == QSTAGE_FRAG) {
1595 if (var->data.location == VARYING_SLOT_POS) {
1596 emit_fragcoord_input(c, loc);
1597 } else if (util_varying_is_point_coord(var->data.location,
1598 c->fs_key->point_sprite_mask)) {
1599 c->inputs[loc * 4 + 0] = c->point_x;
1600 c->inputs[loc * 4 + 1] = c->point_y;
1601 } else {
1602 emit_fragment_input(c, loc, var->data.location);
1603 }
1604 } else {
1605 emit_vertex_input(c, loc);
1606 }
1607 }
1608 }
1609
1610 static void
ntq_setup_outputs(struct vc4_compile * c)1611 ntq_setup_outputs(struct vc4_compile *c)
1612 {
1613 nir_foreach_shader_out_variable(var, c->s) {
1614 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1615 unsigned loc = var->data.driver_location * 4;
1616
1617 assert(array_len == 1);
1618 (void)array_len;
1619
1620 for (int i = 0; i < 4; i++)
1621 add_output(c, loc + i, var->data.location, i);
1622
1623 if (c->stage == QSTAGE_FRAG) {
1624 switch (var->data.location) {
1625 case FRAG_RESULT_COLOR:
1626 case FRAG_RESULT_DATA0:
1627 c->output_color_index = loc;
1628 break;
1629 case FRAG_RESULT_DEPTH:
1630 c->output_position_index = loc;
1631 break;
1632 case FRAG_RESULT_SAMPLE_MASK:
1633 c->output_sample_mask_index = loc;
1634 break;
1635 }
1636 } else {
1637 switch (var->data.location) {
1638 case VARYING_SLOT_POS:
1639 c->output_position_index = loc;
1640 break;
1641 case VARYING_SLOT_PSIZ:
1642 c->output_point_size_index = loc;
1643 break;
1644 }
1645 }
1646 }
1647 }
1648
1649 /**
1650 * Sets up the mapping from nir_register to struct qreg *.
1651 *
1652 * Each nir_register gets a struct qreg per 32-bit component being stored.
1653 */
1654 static void
ntq_setup_registers(struct vc4_compile * c,struct exec_list * list)1655 ntq_setup_registers(struct vc4_compile *c, struct exec_list *list)
1656 {
1657 foreach_list_typed(nir_register, nir_reg, node, list) {
1658 unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1659 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1660 array_len *
1661 nir_reg->num_components);
1662
1663 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1664
1665 for (int i = 0; i < array_len * nir_reg->num_components; i++)
1666 qregs[i] = qir_get_temp(c);
1667 }
1668 }
1669
1670 static void
ntq_emit_load_const(struct vc4_compile * c,nir_load_const_instr * instr)1671 ntq_emit_load_const(struct vc4_compile *c, nir_load_const_instr *instr)
1672 {
1673 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1674 for (int i = 0; i < instr->def.num_components; i++)
1675 qregs[i] = qir_uniform_ui(c, instr->value[i].u32);
1676
1677 _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1678 }
1679
1680 static void
ntq_emit_ssa_undef(struct vc4_compile * c,nir_ssa_undef_instr * instr)1681 ntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr)
1682 {
1683 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1684
1685 /* QIR needs there to be *some* value, so pick 0 (same as for
1686 * ntq_setup_registers().
1687 */
1688 for (int i = 0; i < instr->def.num_components; i++)
1689 qregs[i] = qir_uniform_ui(c, 0);
1690 }
1691
1692 static void
ntq_emit_color_read(struct vc4_compile * c,nir_intrinsic_instr * instr)1693 ntq_emit_color_read(struct vc4_compile *c, nir_intrinsic_instr *instr)
1694 {
1695 assert(nir_src_as_uint(instr->src[0]) == 0);
1696
1697 /* Reads of the per-sample color need to be done in
1698 * order.
1699 */
1700 int sample_index = (nir_intrinsic_base(instr) -
1701 VC4_NIR_TLB_COLOR_READ_INPUT);
1702 for (int i = 0; i <= sample_index; i++) {
1703 if (c->color_reads[i].file == QFILE_NULL) {
1704 c->color_reads[i] =
1705 qir_TLB_COLOR_READ(c);
1706 }
1707 }
1708 ntq_store_dest(c, &instr->dest, 0,
1709 qir_MOV(c, c->color_reads[sample_index]));
1710 }
1711
1712 static void
ntq_emit_load_input(struct vc4_compile * c,nir_intrinsic_instr * instr)1713 ntq_emit_load_input(struct vc4_compile *c, nir_intrinsic_instr *instr)
1714 {
1715 assert(instr->num_components == 1);
1716 assert(nir_src_is_const(instr->src[0]) &&
1717 "vc4 doesn't support indirect inputs");
1718
1719 if (c->stage == QSTAGE_FRAG &&
1720 nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) {
1721 ntq_emit_color_read(c, instr);
1722 return;
1723 }
1724
1725 uint32_t offset = nir_intrinsic_base(instr) +
1726 nir_src_as_uint(instr->src[0]);
1727 int comp = nir_intrinsic_component(instr);
1728 ntq_store_dest(c, &instr->dest, 0,
1729 qir_MOV(c, c->inputs[offset * 4 + comp]));
1730 }
1731
1732 static void
ntq_emit_intrinsic(struct vc4_compile * c,nir_intrinsic_instr * instr)1733 ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
1734 {
1735 unsigned offset;
1736
1737 switch (instr->intrinsic) {
1738 case nir_intrinsic_load_uniform:
1739 assert(instr->num_components == 1);
1740 if (nir_src_is_const(instr->src[0])) {
1741 offset = nir_intrinsic_base(instr) +
1742 nir_src_as_uint(instr->src[0]);
1743 assert(offset % 4 == 0);
1744 /* We need dwords */
1745 offset = offset / 4;
1746 ntq_store_dest(c, &instr->dest, 0,
1747 qir_uniform(c, QUNIFORM_UNIFORM,
1748 offset));
1749 } else {
1750 ntq_store_dest(c, &instr->dest, 0,
1751 indirect_uniform_load(c, instr));
1752 }
1753 break;
1754
1755 case nir_intrinsic_load_ubo:
1756 assert(instr->num_components == 1);
1757 ntq_store_dest(c, &instr->dest, 0, vc4_ubo_load(c, instr));
1758 break;
1759
1760 case nir_intrinsic_load_user_clip_plane:
1761 for (int i = 0; i < nir_intrinsic_dest_components(instr); i++) {
1762 ntq_store_dest(c, &instr->dest, i,
1763 qir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1764 nir_intrinsic_ucp_id(instr) *
1765 4 + i));
1766 }
1767 break;
1768
1769 case nir_intrinsic_load_blend_const_color_r_float:
1770 case nir_intrinsic_load_blend_const_color_g_float:
1771 case nir_intrinsic_load_blend_const_color_b_float:
1772 case nir_intrinsic_load_blend_const_color_a_float:
1773 ntq_store_dest(c, &instr->dest, 0,
1774 qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_X +
1775 (instr->intrinsic -
1776 nir_intrinsic_load_blend_const_color_r_float),
1777 0));
1778 break;
1779
1780 case nir_intrinsic_load_blend_const_color_rgba8888_unorm:
1781 ntq_store_dest(c, &instr->dest, 0,
1782 qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_RGBA,
1783 0));
1784 break;
1785
1786 case nir_intrinsic_load_blend_const_color_aaaa8888_unorm:
1787 ntq_store_dest(c, &instr->dest, 0,
1788 qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_AAAA,
1789 0));
1790 break;
1791
1792 case nir_intrinsic_load_sample_mask_in:
1793 ntq_store_dest(c, &instr->dest, 0,
1794 qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0));
1795 break;
1796
1797 case nir_intrinsic_load_front_face:
1798 /* The register contains 0 (front) or 1 (back), and we need to
1799 * turn it into a NIR bool where true means front.
1800 */
1801 ntq_store_dest(c, &instr->dest, 0,
1802 qir_ADD(c,
1803 qir_uniform_ui(c, -1),
1804 qir_reg(QFILE_FRAG_REV_FLAG, 0)));
1805 break;
1806
1807 case nir_intrinsic_load_input:
1808 ntq_emit_load_input(c, instr);
1809 break;
1810
1811 case nir_intrinsic_store_output:
1812 assert(nir_src_is_const(instr->src[1]) &&
1813 "vc4 doesn't support indirect outputs");
1814 offset = nir_intrinsic_base(instr) +
1815 nir_src_as_uint(instr->src[1]);
1816
1817 /* MSAA color outputs are the only case where we have an
1818 * output that's not lowered to being a store of a single 32
1819 * bit value.
1820 */
1821 if (c->stage == QSTAGE_FRAG && instr->num_components == 4) {
1822 assert(offset == c->output_color_index);
1823 for (int i = 0; i < 4; i++) {
1824 c->sample_colors[i] =
1825 qir_MOV(c, ntq_get_src(c, instr->src[0],
1826 i));
1827 }
1828 } else {
1829 offset = offset * 4 + nir_intrinsic_component(instr);
1830 assert(instr->num_components == 1);
1831 c->outputs[offset] =
1832 qir_MOV(c, ntq_get_src(c, instr->src[0], 0));
1833 c->num_outputs = MAX2(c->num_outputs, offset + 1);
1834 }
1835 break;
1836
1837 case nir_intrinsic_discard:
1838 if (c->execute.file != QFILE_NULL) {
1839 qir_SF(c, c->execute);
1840 qir_MOV_cond(c, QPU_COND_ZS, c->discard,
1841 qir_uniform_ui(c, ~0));
1842 } else {
1843 qir_MOV_dest(c, c->discard, qir_uniform_ui(c, ~0));
1844 }
1845 break;
1846
1847 case nir_intrinsic_discard_if: {
1848 /* true (~0) if we're discarding */
1849 struct qreg cond = ntq_get_src(c, instr->src[0], 0);
1850
1851 if (c->execute.file != QFILE_NULL) {
1852 /* execute == 0 means the channel is active. Invert
1853 * the condition so that we can use zero as "executing
1854 * and discarding."
1855 */
1856 qir_SF(c, qir_AND(c, c->execute, qir_NOT(c, cond)));
1857 qir_MOV_cond(c, QPU_COND_ZS, c->discard, cond);
1858 } else {
1859 qir_OR_dest(c, c->discard, c->discard,
1860 ntq_get_src(c, instr->src[0], 0));
1861 }
1862
1863 break;
1864 }
1865
1866 case nir_intrinsic_load_texture_rect_scaling: {
1867 assert(nir_src_is_const(instr->src[0]));
1868 int sampler = nir_src_as_int(instr->src[0]);
1869
1870 ntq_store_dest(c, &instr->dest, 0,
1871 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_X, sampler));
1872 ntq_store_dest(c, &instr->dest, 1,
1873 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y, sampler));
1874 break;
1875 }
1876
1877 default:
1878 fprintf(stderr, "Unknown intrinsic: ");
1879 nir_print_instr(&instr->instr, stderr);
1880 fprintf(stderr, "\n");
1881 break;
1882 }
1883 }
1884
1885 /* Clears (activates) the execute flags for any channels whose jump target
1886 * matches this block.
1887 */
1888 static void
ntq_activate_execute_for_block(struct vc4_compile * c)1889 ntq_activate_execute_for_block(struct vc4_compile *c)
1890 {
1891 qir_SF(c, qir_SUB(c,
1892 c->execute,
1893 qir_uniform_ui(c, c->cur_block->index)));
1894 qir_MOV_cond(c, QPU_COND_ZS, c->execute, qir_uniform_ui(c, 0));
1895 }
1896
1897 static void
ntq_emit_if(struct vc4_compile * c,nir_if * if_stmt)1898 ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt)
1899 {
1900 if (!c->vc4->screen->has_control_flow) {
1901 fprintf(stderr,
1902 "IF statement support requires updated kernel.\n");
1903 return;
1904 }
1905
1906 nir_block *nir_else_block = nir_if_first_else_block(if_stmt);
1907 bool empty_else_block =
1908 (nir_else_block == nir_if_last_else_block(if_stmt) &&
1909 exec_list_is_empty(&nir_else_block->instr_list));
1910
1911 struct qblock *then_block = qir_new_block(c);
1912 struct qblock *after_block = qir_new_block(c);
1913 struct qblock *else_block;
1914 if (empty_else_block)
1915 else_block = after_block;
1916 else
1917 else_block = qir_new_block(c);
1918
1919 bool was_top_level = false;
1920 if (c->execute.file == QFILE_NULL) {
1921 c->execute = qir_MOV(c, qir_uniform_ui(c, 0));
1922 was_top_level = true;
1923 }
1924
1925 /* Set ZS for executing (execute == 0) and jumping (if->condition ==
1926 * 0) channels, and then update execute flags for those to point to
1927 * the ELSE block.
1928 */
1929 qir_SF(c, qir_OR(c,
1930 c->execute,
1931 ntq_get_src(c, if_stmt->condition, 0)));
1932 qir_MOV_cond(c, QPU_COND_ZS, c->execute,
1933 qir_uniform_ui(c, else_block->index));
1934
1935 /* Jump to ELSE if nothing is active for THEN, otherwise fall
1936 * through.
1937 */
1938 qir_SF(c, c->execute);
1939 qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZC);
1940 qir_link_blocks(c->cur_block, else_block);
1941 qir_link_blocks(c->cur_block, then_block);
1942
1943 /* Process the THEN block. */
1944 qir_set_emit_block(c, then_block);
1945 ntq_emit_cf_list(c, &if_stmt->then_list);
1946
1947 if (!empty_else_block) {
1948 /* Handle the end of the THEN block. First, all currently
1949 * active channels update their execute flags to point to
1950 * ENDIF
1951 */
1952 qir_SF(c, c->execute);
1953 qir_MOV_cond(c, QPU_COND_ZS, c->execute,
1954 qir_uniform_ui(c, after_block->index));
1955
1956 /* If everything points at ENDIF, then jump there immediately. */
1957 qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, after_block->index)));
1958 qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS);
1959 qir_link_blocks(c->cur_block, after_block);
1960 qir_link_blocks(c->cur_block, else_block);
1961
1962 qir_set_emit_block(c, else_block);
1963 ntq_activate_execute_for_block(c);
1964 ntq_emit_cf_list(c, &if_stmt->else_list);
1965 }
1966
1967 qir_link_blocks(c->cur_block, after_block);
1968
1969 qir_set_emit_block(c, after_block);
1970 if (was_top_level) {
1971 c->execute = c->undef;
1972 c->last_top_block = c->cur_block;
1973 } else {
1974 ntq_activate_execute_for_block(c);
1975 }
1976 }
1977
1978 static void
ntq_emit_jump(struct vc4_compile * c,nir_jump_instr * jump)1979 ntq_emit_jump(struct vc4_compile *c, nir_jump_instr *jump)
1980 {
1981 struct qblock *jump_block;
1982 switch (jump->type) {
1983 case nir_jump_break:
1984 jump_block = c->loop_break_block;
1985 break;
1986 case nir_jump_continue:
1987 jump_block = c->loop_cont_block;
1988 break;
1989 default:
1990 unreachable("Unsupported jump type\n");
1991 }
1992
1993 qir_SF(c, c->execute);
1994 qir_MOV_cond(c, QPU_COND_ZS, c->execute,
1995 qir_uniform_ui(c, jump_block->index));
1996
1997 /* Jump to the destination block if everyone has taken the jump. */
1998 qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, jump_block->index)));
1999 qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS);
2000 struct qblock *new_block = qir_new_block(c);
2001 qir_link_blocks(c->cur_block, jump_block);
2002 qir_link_blocks(c->cur_block, new_block);
2003 qir_set_emit_block(c, new_block);
2004 }
2005
2006 static void
ntq_emit_instr(struct vc4_compile * c,nir_instr * instr)2007 ntq_emit_instr(struct vc4_compile *c, nir_instr *instr)
2008 {
2009 switch (instr->type) {
2010 case nir_instr_type_alu:
2011 ntq_emit_alu(c, nir_instr_as_alu(instr));
2012 break;
2013
2014 case nir_instr_type_intrinsic:
2015 ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
2016 break;
2017
2018 case nir_instr_type_load_const:
2019 ntq_emit_load_const(c, nir_instr_as_load_const(instr));
2020 break;
2021
2022 case nir_instr_type_ssa_undef:
2023 ntq_emit_ssa_undef(c, nir_instr_as_ssa_undef(instr));
2024 break;
2025
2026 case nir_instr_type_tex:
2027 ntq_emit_tex(c, nir_instr_as_tex(instr));
2028 break;
2029
2030 case nir_instr_type_jump:
2031 ntq_emit_jump(c, nir_instr_as_jump(instr));
2032 break;
2033
2034 default:
2035 fprintf(stderr, "Unknown NIR instr type: ");
2036 nir_print_instr(instr, stderr);
2037 fprintf(stderr, "\n");
2038 abort();
2039 }
2040 }
2041
2042 static void
ntq_emit_block(struct vc4_compile * c,nir_block * block)2043 ntq_emit_block(struct vc4_compile *c, nir_block *block)
2044 {
2045 nir_foreach_instr(instr, block) {
2046 ntq_emit_instr(c, instr);
2047 }
2048 }
2049
2050 static void ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
2051
2052 static void
ntq_emit_loop(struct vc4_compile * c,nir_loop * loop)2053 ntq_emit_loop(struct vc4_compile *c, nir_loop *loop)
2054 {
2055 if (!c->vc4->screen->has_control_flow) {
2056 fprintf(stderr,
2057 "loop support requires updated kernel.\n");
2058 ntq_emit_cf_list(c, &loop->body);
2059 return;
2060 }
2061
2062 bool was_top_level = false;
2063 if (c->execute.file == QFILE_NULL) {
2064 c->execute = qir_MOV(c, qir_uniform_ui(c, 0));
2065 was_top_level = true;
2066 }
2067
2068 struct qblock *save_loop_cont_block = c->loop_cont_block;
2069 struct qblock *save_loop_break_block = c->loop_break_block;
2070
2071 c->loop_cont_block = qir_new_block(c);
2072 c->loop_break_block = qir_new_block(c);
2073
2074 qir_link_blocks(c->cur_block, c->loop_cont_block);
2075 qir_set_emit_block(c, c->loop_cont_block);
2076 ntq_activate_execute_for_block(c);
2077
2078 ntq_emit_cf_list(c, &loop->body);
2079
2080 /* If anything had explicitly continued, or is here at the end of the
2081 * loop, then we need to loop again. SF updates are masked by the
2082 * instruction's condition, so we can do the OR of the two conditions
2083 * within SF.
2084 */
2085 qir_SF(c, c->execute);
2086 struct qinst *cont_check =
2087 qir_SUB_dest(c,
2088 c->undef,
2089 c->execute,
2090 qir_uniform_ui(c, c->loop_cont_block->index));
2091 cont_check->cond = QPU_COND_ZC;
2092 cont_check->sf = true;
2093
2094 qir_BRANCH(c, QPU_COND_BRANCH_ANY_ZS);
2095 qir_link_blocks(c->cur_block, c->loop_cont_block);
2096 qir_link_blocks(c->cur_block, c->loop_break_block);
2097
2098 qir_set_emit_block(c, c->loop_break_block);
2099 if (was_top_level) {
2100 c->execute = c->undef;
2101 c->last_top_block = c->cur_block;
2102 } else {
2103 ntq_activate_execute_for_block(c);
2104 }
2105
2106 c->loop_break_block = save_loop_break_block;
2107 c->loop_cont_block = save_loop_cont_block;
2108 }
2109
2110 static void
ntq_emit_function(struct vc4_compile * c,nir_function_impl * func)2111 ntq_emit_function(struct vc4_compile *c, nir_function_impl *func)
2112 {
2113 fprintf(stderr, "FUNCTIONS not handled.\n");
2114 abort();
2115 }
2116
2117 static void
ntq_emit_cf_list(struct vc4_compile * c,struct exec_list * list)2118 ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list)
2119 {
2120 foreach_list_typed(nir_cf_node, node, node, list) {
2121 switch (node->type) {
2122 case nir_cf_node_block:
2123 ntq_emit_block(c, nir_cf_node_as_block(node));
2124 break;
2125
2126 case nir_cf_node_if:
2127 ntq_emit_if(c, nir_cf_node_as_if(node));
2128 break;
2129
2130 case nir_cf_node_loop:
2131 ntq_emit_loop(c, nir_cf_node_as_loop(node));
2132 break;
2133
2134 case nir_cf_node_function:
2135 ntq_emit_function(c, nir_cf_node_as_function(node));
2136 break;
2137
2138 default:
2139 fprintf(stderr, "Unknown NIR node type\n");
2140 abort();
2141 }
2142 }
2143 }
2144
2145 static void
ntq_emit_impl(struct vc4_compile * c,nir_function_impl * impl)2146 ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl)
2147 {
2148 ntq_setup_registers(c, &impl->registers);
2149 ntq_emit_cf_list(c, &impl->body);
2150 }
2151
2152 static void
nir_to_qir(struct vc4_compile * c)2153 nir_to_qir(struct vc4_compile *c)
2154 {
2155 if (c->stage == QSTAGE_FRAG && c->s->info.fs.uses_discard)
2156 c->discard = qir_MOV(c, qir_uniform_ui(c, 0));
2157
2158 ntq_setup_inputs(c);
2159 ntq_setup_outputs(c);
2160
2161 /* Find the main function and emit the body. */
2162 nir_foreach_function(function, c->s) {
2163 assert(strcmp(function->name, "main") == 0);
2164 assert(function->impl);
2165 ntq_emit_impl(c, function->impl);
2166 }
2167 }
2168
2169 static const nir_shader_compiler_options nir_options = {
2170 .lower_all_io_to_temps = true,
2171 .lower_extract_byte = true,
2172 .lower_extract_word = true,
2173 .lower_insert_byte = true,
2174 .lower_insert_word = true,
2175 .lower_fdiv = true,
2176 .lower_ffma16 = true,
2177 .lower_ffma32 = true,
2178 .lower_ffma64 = true,
2179 .lower_flrp32 = true,
2180 .lower_fmod = true,
2181 .lower_fpow = true,
2182 .lower_fsat = true,
2183 .lower_fsqrt = true,
2184 .lower_ldexp = true,
2185 .lower_fneg = true,
2186 .lower_ineg = true,
2187 .lower_rotate = true,
2188 .lower_to_scalar = true,
2189 .lower_umax = true,
2190 .lower_umin = true,
2191 .lower_isign = true,
2192 .has_fsub = true,
2193 .has_isub = true,
2194 .max_unroll_iterations = 32,
2195 .force_indirect_unrolling = (nir_var_shader_in | nir_var_shader_out | nir_var_function_temp),
2196 };
2197
2198 const void *
vc4_screen_get_compiler_options(struct pipe_screen * pscreen,enum pipe_shader_ir ir,enum pipe_shader_type shader)2199 vc4_screen_get_compiler_options(struct pipe_screen *pscreen,
2200 enum pipe_shader_ir ir,
2201 enum pipe_shader_type shader)
2202 {
2203 return &nir_options;
2204 }
2205
2206 static int
count_nir_instrs(nir_shader * nir)2207 count_nir_instrs(nir_shader *nir)
2208 {
2209 int count = 0;
2210 nir_foreach_function(function, nir) {
2211 if (!function->impl)
2212 continue;
2213 nir_foreach_block(block, function->impl) {
2214 nir_foreach_instr(instr, block)
2215 count++;
2216 }
2217 }
2218 return count;
2219 }
2220
2221 static struct vc4_compile *
vc4_shader_ntq(struct vc4_context * vc4,enum qstage stage,struct vc4_key * key,bool fs_threaded)2222 vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
2223 struct vc4_key *key, bool fs_threaded)
2224 {
2225 struct vc4_compile *c = qir_compile_init();
2226
2227 c->vc4 = vc4;
2228 c->stage = stage;
2229 c->shader_state = &key->shader_state->base;
2230 c->program_id = key->shader_state->program_id;
2231 c->variant_id =
2232 p_atomic_inc_return(&key->shader_state->compiled_variant_count);
2233 c->fs_threaded = fs_threaded;
2234
2235 c->key = key;
2236 switch (stage) {
2237 case QSTAGE_FRAG:
2238 c->fs_key = (struct vc4_fs_key *)key;
2239 if (c->fs_key->is_points) {
2240 c->point_x = emit_fragment_varying(c, ~0, 0);
2241 c->point_y = emit_fragment_varying(c, ~0, 0);
2242 } else if (c->fs_key->is_lines) {
2243 c->line_x = emit_fragment_varying(c, ~0, 0);
2244 }
2245 break;
2246 case QSTAGE_VERT:
2247 c->vs_key = (struct vc4_vs_key *)key;
2248 break;
2249 case QSTAGE_COORD:
2250 c->vs_key = (struct vc4_vs_key *)key;
2251 break;
2252 }
2253
2254 c->s = nir_shader_clone(c, key->shader_state->base.ir.nir);
2255
2256 if (stage == QSTAGE_FRAG) {
2257 NIR_PASS_V(c->s, vc4_nir_lower_blend, c);
2258 }
2259
2260 struct nir_lower_tex_options tex_options = {
2261 .lower_txp = ~0,
2262
2263 /* Apply swizzles to all samplers. */
2264 .swizzle_result = ~0,
2265 };
2266
2267 /* Lower the format swizzle and ARB_texture_swizzle-style swizzle.
2268 * The format swizzling applies before sRGB decode, and
2269 * ARB_texture_swizzle is the last thing before returning the sample.
2270 */
2271 for (int i = 0; i < ARRAY_SIZE(key->tex); i++) {
2272 enum pipe_format format = c->key->tex[i].format;
2273
2274 if (!format)
2275 continue;
2276
2277 const uint8_t *format_swizzle = vc4_get_format_swizzle(format);
2278
2279 for (int j = 0; j < 4; j++) {
2280 uint8_t arb_swiz = c->key->tex[i].swizzle[j];
2281
2282 if (arb_swiz <= 3) {
2283 tex_options.swizzles[i][j] =
2284 format_swizzle[arb_swiz];
2285 } else {
2286 tex_options.swizzles[i][j] = arb_swiz;
2287 }
2288 }
2289
2290 if (util_format_is_srgb(format))
2291 tex_options.lower_srgb |= (1 << i);
2292 }
2293
2294 NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
2295
2296 if (c->key->ucp_enables) {
2297 if (stage == QSTAGE_FRAG) {
2298 NIR_PASS_V(c->s, nir_lower_clip_fs,
2299 c->key->ucp_enables, false);
2300 } else {
2301 NIR_PASS_V(c->s, nir_lower_clip_vs,
2302 c->key->ucp_enables, false, false, NULL);
2303 NIR_PASS_V(c->s, nir_lower_io_to_scalar,
2304 nir_var_shader_out);
2305 }
2306 }
2307
2308 /* FS input scalarizing must happen after nir_lower_two_sided_color,
2309 * which only handles a vec4 at a time. Similarly, VS output
2310 * scalarizing must happen after nir_lower_clip_vs.
2311 */
2312 if (c->stage == QSTAGE_FRAG)
2313 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
2314 else
2315 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
2316
2317 NIR_PASS_V(c->s, vc4_nir_lower_io, c);
2318 NIR_PASS_V(c->s, vc4_nir_lower_txf_ms, c);
2319 nir_lower_idiv_options idiv_options = {
2320 .imprecise_32bit_lowering = true,
2321 .allow_fp16 = true,
2322 };
2323 NIR_PASS_V(c->s, nir_lower_idiv, &idiv_options);
2324
2325 vc4_optimize_nir(c->s);
2326
2327 /* Do late algebraic optimization to turn add(a, neg(b)) back into
2328 * subs, then the mandatory cleanup after algebraic. Note that it may
2329 * produce fnegs, and if so then we need to keep running to squash
2330 * fneg(fneg(a)).
2331 */
2332 bool more_late_algebraic = true;
2333 while (more_late_algebraic) {
2334 more_late_algebraic = false;
2335 NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late);
2336 NIR_PASS_V(c->s, nir_opt_constant_folding);
2337 NIR_PASS_V(c->s, nir_copy_prop);
2338 NIR_PASS_V(c->s, nir_opt_dce);
2339 NIR_PASS_V(c->s, nir_opt_cse);
2340 }
2341
2342 NIR_PASS_V(c->s, nir_lower_bool_to_int32);
2343
2344 NIR_PASS_V(c->s, nir_convert_from_ssa, true);
2345
2346 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2347 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
2348 qir_get_stage_name(c->stage),
2349 c->program_id, c->variant_id,
2350 count_nir_instrs(c->s));
2351 }
2352
2353 if (vc4_debug & VC4_DEBUG_NIR) {
2354 fprintf(stderr, "%s prog %d/%d NIR:\n",
2355 qir_get_stage_name(c->stage),
2356 c->program_id, c->variant_id);
2357 nir_print_shader(c->s, stderr);
2358 }
2359
2360 nir_to_qir(c);
2361
2362 switch (stage) {
2363 case QSTAGE_FRAG:
2364 /* FS threading requires that the thread execute
2365 * QPU_SIG_LAST_THREAD_SWITCH exactly once before terminating
2366 * (with no other THRSW afterwards, obviously). If we didn't
2367 * fetch a texture at a top level block, this wouldn't be
2368 * true.
2369 */
2370 if (c->fs_threaded && !c->last_thrsw_at_top_level) {
2371 c->failed = true;
2372 return c;
2373 }
2374
2375 emit_frag_end(c);
2376 break;
2377 case QSTAGE_VERT:
2378 emit_vert_end(c,
2379 c->vs_key->fs_inputs->input_slots,
2380 c->vs_key->fs_inputs->num_inputs);
2381 break;
2382 case QSTAGE_COORD:
2383 emit_coord_end(c);
2384 break;
2385 }
2386
2387 if (vc4_debug & VC4_DEBUG_QIR) {
2388 fprintf(stderr, "%s prog %d/%d pre-opt QIR:\n",
2389 qir_get_stage_name(c->stage),
2390 c->program_id, c->variant_id);
2391 qir_dump(c);
2392 fprintf(stderr, "\n");
2393 }
2394
2395 qir_optimize(c);
2396 qir_lower_uniforms(c);
2397
2398 qir_schedule_instructions(c);
2399 qir_emit_uniform_stream_resets(c);
2400
2401 if (vc4_debug & VC4_DEBUG_QIR) {
2402 fprintf(stderr, "%s prog %d/%d QIR:\n",
2403 qir_get_stage_name(c->stage),
2404 c->program_id, c->variant_id);
2405 qir_dump(c);
2406 fprintf(stderr, "\n");
2407 }
2408
2409 qir_reorder_uniforms(c);
2410 vc4_generate_code(vc4, c);
2411
2412 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2413 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d instructions\n",
2414 qir_get_stage_name(c->stage),
2415 c->program_id, c->variant_id,
2416 c->qpu_inst_count);
2417 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
2418 qir_get_stage_name(c->stage),
2419 c->program_id, c->variant_id,
2420 c->num_uniforms);
2421 }
2422
2423 ralloc_free(c->s);
2424
2425 return c;
2426 }
2427
2428 static void *
vc4_shader_state_create(struct pipe_context * pctx,const struct pipe_shader_state * cso)2429 vc4_shader_state_create(struct pipe_context *pctx,
2430 const struct pipe_shader_state *cso)
2431 {
2432 struct vc4_context *vc4 = vc4_context(pctx);
2433 struct vc4_uncompiled_shader *so = CALLOC_STRUCT(vc4_uncompiled_shader);
2434 if (!so)
2435 return NULL;
2436
2437 so->program_id = vc4->next_uncompiled_program_id++;
2438
2439 nir_shader *s;
2440
2441 if (cso->type == PIPE_SHADER_IR_NIR) {
2442 /* The backend takes ownership of the NIR shader on state
2443 * creation.
2444 */
2445 s = cso->ir.nir;
2446 } else {
2447 assert(cso->type == PIPE_SHADER_IR_TGSI);
2448
2449 if (vc4_debug & VC4_DEBUG_TGSI) {
2450 fprintf(stderr, "prog %d TGSI:\n",
2451 so->program_id);
2452 tgsi_dump(cso->tokens, 0);
2453 fprintf(stderr, "\n");
2454 }
2455 s = tgsi_to_nir(cso->tokens, pctx->screen, false);
2456 }
2457
2458 if (s->info.stage == MESA_SHADER_VERTEX)
2459 NIR_PASS_V(s, nir_lower_point_size, 1.0f, 0.0f);
2460
2461 NIR_PASS_V(s, nir_lower_io,
2462 nir_var_shader_in | nir_var_shader_out | nir_var_uniform,
2463 type_size, (nir_lower_io_options)0);
2464
2465 NIR_PASS_V(s, nir_lower_regs_to_ssa);
2466 NIR_PASS_V(s, nir_normalize_cubemap_coords);
2467
2468 NIR_PASS_V(s, nir_lower_load_const_to_scalar);
2469
2470 vc4_optimize_nir(s);
2471
2472 NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp, NULL);
2473
2474 /* Garbage collect dead instructions */
2475 nir_sweep(s);
2476
2477 so->base.type = PIPE_SHADER_IR_NIR;
2478 so->base.ir.nir = s;
2479
2480 if (vc4_debug & VC4_DEBUG_NIR) {
2481 fprintf(stderr, "%s prog %d NIR:\n",
2482 gl_shader_stage_name(s->info.stage),
2483 so->program_id);
2484 nir_print_shader(s, stderr);
2485 fprintf(stderr, "\n");
2486 }
2487
2488 return so;
2489 }
2490
2491 static void
copy_uniform_state_to_shader(struct vc4_compiled_shader * shader,struct vc4_compile * c)2492 copy_uniform_state_to_shader(struct vc4_compiled_shader *shader,
2493 struct vc4_compile *c)
2494 {
2495 int count = c->num_uniforms;
2496 struct vc4_shader_uniform_info *uinfo = &shader->uniforms;
2497
2498 uinfo->count = count;
2499 uinfo->data = ralloc_array(shader, uint32_t, count);
2500 memcpy(uinfo->data, c->uniform_data,
2501 count * sizeof(*uinfo->data));
2502 uinfo->contents = ralloc_array(shader, enum quniform_contents, count);
2503 memcpy(uinfo->contents, c->uniform_contents,
2504 count * sizeof(*uinfo->contents));
2505 uinfo->num_texture_samples = c->num_texture_samples;
2506
2507 vc4_set_shader_uniform_dirty_flags(shader);
2508 }
2509
2510 static void
vc4_setup_compiled_fs_inputs(struct vc4_context * vc4,struct vc4_compile * c,struct vc4_compiled_shader * shader)2511 vc4_setup_compiled_fs_inputs(struct vc4_context *vc4, struct vc4_compile *c,
2512 struct vc4_compiled_shader *shader)
2513 {
2514 struct vc4_fs_inputs inputs;
2515
2516 memset(&inputs, 0, sizeof(inputs));
2517 inputs.input_slots = ralloc_array(shader,
2518 struct vc4_varying_slot,
2519 c->num_input_slots);
2520
2521 bool input_live[c->num_input_slots];
2522
2523 memset(input_live, 0, sizeof(input_live));
2524 qir_for_each_inst_inorder(inst, c) {
2525 for (int i = 0; i < qir_get_nsrc(inst); i++) {
2526 if (inst->src[i].file == QFILE_VARY)
2527 input_live[inst->src[i].index] = true;
2528 }
2529 }
2530
2531 for (int i = 0; i < c->num_input_slots; i++) {
2532 struct vc4_varying_slot *slot = &c->input_slots[i];
2533
2534 if (!input_live[i])
2535 continue;
2536
2537 /* Skip non-VS-output inputs. */
2538 if (slot->slot == (uint8_t)~0)
2539 continue;
2540
2541 if (slot->slot == VARYING_SLOT_COL0 ||
2542 slot->slot == VARYING_SLOT_COL1 ||
2543 slot->slot == VARYING_SLOT_BFC0 ||
2544 slot->slot == VARYING_SLOT_BFC1) {
2545 shader->color_inputs |= (1 << inputs.num_inputs);
2546 }
2547
2548 inputs.input_slots[inputs.num_inputs] = *slot;
2549 inputs.num_inputs++;
2550 }
2551 shader->num_inputs = inputs.num_inputs;
2552
2553 /* Add our set of inputs to the set of all inputs seen. This way, we
2554 * can have a single pointer that identifies an FS inputs set,
2555 * allowing VS to avoid recompiling when the FS is recompiled (or a
2556 * new one is bound using separate shader objects) but the inputs
2557 * don't change.
2558 */
2559 struct set_entry *entry = _mesa_set_search(vc4->fs_inputs_set, &inputs);
2560 if (entry) {
2561 shader->fs_inputs = entry->key;
2562 ralloc_free(inputs.input_slots);
2563 } else {
2564 struct vc4_fs_inputs *alloc_inputs;
2565
2566 alloc_inputs = rzalloc(vc4->fs_inputs_set, struct vc4_fs_inputs);
2567 memcpy(alloc_inputs, &inputs, sizeof(inputs));
2568 ralloc_steal(alloc_inputs, inputs.input_slots);
2569 _mesa_set_add(vc4->fs_inputs_set, alloc_inputs);
2570
2571 shader->fs_inputs = alloc_inputs;
2572 }
2573 }
2574
2575 static struct vc4_compiled_shader *
vc4_get_compiled_shader(struct vc4_context * vc4,enum qstage stage,struct vc4_key * key)2576 vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
2577 struct vc4_key *key)
2578 {
2579 struct hash_table *ht;
2580 uint32_t key_size;
2581 bool try_threading;
2582
2583 if (stage == QSTAGE_FRAG) {
2584 ht = vc4->fs_cache;
2585 key_size = sizeof(struct vc4_fs_key);
2586 try_threading = vc4->screen->has_threaded_fs;
2587 } else {
2588 ht = vc4->vs_cache;
2589 key_size = sizeof(struct vc4_vs_key);
2590 try_threading = false;
2591 }
2592
2593 struct vc4_compiled_shader *shader;
2594 struct hash_entry *entry = _mesa_hash_table_search(ht, key);
2595 if (entry)
2596 return entry->data;
2597
2598 struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key, try_threading);
2599 /* If the FS failed to compile threaded, fall back to single threaded. */
2600 if (try_threading && c->failed) {
2601 qir_compile_destroy(c);
2602 c = vc4_shader_ntq(vc4, stage, key, false);
2603 }
2604
2605 shader = rzalloc(NULL, struct vc4_compiled_shader);
2606
2607 shader->program_id = vc4->next_compiled_program_id++;
2608 if (stage == QSTAGE_FRAG) {
2609 vc4_setup_compiled_fs_inputs(vc4, c, shader);
2610
2611 /* Note: the temporary clone in c->s has been freed. */
2612 nir_shader *orig_shader = key->shader_state->base.ir.nir;
2613 if (orig_shader->info.outputs_written & (1 << FRAG_RESULT_DEPTH))
2614 shader->disable_early_z = true;
2615 } else {
2616 shader->num_inputs = c->num_inputs;
2617
2618 shader->vattr_offsets[0] = 0;
2619 for (int i = 0; i < 8; i++) {
2620 shader->vattr_offsets[i + 1] =
2621 shader->vattr_offsets[i] + c->vattr_sizes[i];
2622
2623 if (c->vattr_sizes[i])
2624 shader->vattrs_live |= (1 << i);
2625 }
2626 }
2627
2628 shader->failed = c->failed;
2629 if (c->failed) {
2630 shader->failed = true;
2631 } else {
2632 copy_uniform_state_to_shader(shader, c);
2633 shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
2634 c->qpu_inst_count *
2635 sizeof(uint64_t));
2636 }
2637
2638 shader->fs_threaded = c->fs_threaded;
2639
2640 if ((vc4_debug & VC4_DEBUG_SHADERDB) && stage == QSTAGE_FRAG) {
2641 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d FS threads\n",
2642 qir_get_stage_name(c->stage),
2643 c->program_id, c->variant_id,
2644 1 + shader->fs_threaded);
2645 }
2646
2647 qir_compile_destroy(c);
2648
2649 struct vc4_key *dup_key;
2650 dup_key = rzalloc_size(shader, key_size); /* TODO: don't use rzalloc */
2651 memcpy(dup_key, key, key_size);
2652 _mesa_hash_table_insert(ht, dup_key, shader);
2653
2654 return shader;
2655 }
2656
2657 static void
vc4_setup_shared_key(struct vc4_context * vc4,struct vc4_key * key,struct vc4_texture_stateobj * texstate)2658 vc4_setup_shared_key(struct vc4_context *vc4, struct vc4_key *key,
2659 struct vc4_texture_stateobj *texstate)
2660 {
2661 for (int i = 0; i < texstate->num_textures; i++) {
2662 struct pipe_sampler_view *sampler = texstate->textures[i];
2663 struct vc4_sampler_view *vc4_sampler = vc4_sampler_view(sampler);
2664 struct pipe_sampler_state *sampler_state =
2665 texstate->samplers[i];
2666
2667 if (!sampler)
2668 continue;
2669
2670 key->tex[i].format = sampler->format;
2671 key->tex[i].swizzle[0] = sampler->swizzle_r;
2672 key->tex[i].swizzle[1] = sampler->swizzle_g;
2673 key->tex[i].swizzle[2] = sampler->swizzle_b;
2674 key->tex[i].swizzle[3] = sampler->swizzle_a;
2675
2676 if (sampler->texture->nr_samples > 1) {
2677 key->tex[i].msaa_width = sampler->texture->width0;
2678 key->tex[i].msaa_height = sampler->texture->height0;
2679 } else if (sampler){
2680 key->tex[i].compare_mode = sampler_state->compare_mode;
2681 key->tex[i].compare_func = sampler_state->compare_func;
2682 key->tex[i].wrap_s = sampler_state->wrap_s;
2683 key->tex[i].wrap_t = sampler_state->wrap_t;
2684 key->tex[i].force_first_level =
2685 vc4_sampler->force_first_level;
2686 }
2687 }
2688
2689 key->ucp_enables = vc4->rasterizer->base.clip_plane_enable;
2690 }
2691
2692 static void
vc4_update_compiled_fs(struct vc4_context * vc4,uint8_t prim_mode)2693 vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode)
2694 {
2695 struct vc4_job *job = vc4->job;
2696 struct vc4_fs_key local_key;
2697 struct vc4_fs_key *key = &local_key;
2698
2699 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2700 VC4_DIRTY_BLEND |
2701 VC4_DIRTY_FRAMEBUFFER |
2702 VC4_DIRTY_ZSA |
2703 VC4_DIRTY_RASTERIZER |
2704 VC4_DIRTY_SAMPLE_MASK |
2705 VC4_DIRTY_FRAGTEX |
2706 VC4_DIRTY_UNCOMPILED_FS |
2707 VC4_DIRTY_UBO_1_SIZE))) {
2708 return;
2709 }
2710
2711 memset(key, 0, sizeof(*key));
2712 vc4_setup_shared_key(vc4, &key->base, &vc4->fragtex);
2713 key->base.shader_state = vc4->prog.bind_fs;
2714 key->is_points = (prim_mode == PIPE_PRIM_POINTS);
2715 key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
2716 prim_mode <= PIPE_PRIM_LINE_STRIP);
2717 key->blend = vc4->blend->rt[0];
2718 if (vc4->blend->logicop_enable) {
2719 key->logicop_func = vc4->blend->logicop_func;
2720 } else {
2721 key->logicop_func = PIPE_LOGICOP_COPY;
2722 }
2723 if (job->msaa) {
2724 key->msaa = vc4->rasterizer->base.multisample;
2725 key->sample_coverage = (vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1);
2726 key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage;
2727 key->sample_alpha_to_one = vc4->blend->alpha_to_one;
2728 }
2729
2730 if (vc4->framebuffer.cbufs[0])
2731 key->color_format = vc4->framebuffer.cbufs[0]->format;
2732
2733 key->stencil_enabled = vc4->zsa->stencil_uniforms[0] != 0;
2734 key->stencil_twoside = vc4->zsa->stencil_uniforms[1] != 0;
2735 key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0;
2736 key->depth_enabled = (vc4->zsa->base.depth_enabled ||
2737 key->stencil_enabled);
2738
2739 if (key->is_points) {
2740 key->point_sprite_mask =
2741 vc4->rasterizer->base.sprite_coord_enable;
2742 key->point_coord_upper_left =
2743 (vc4->rasterizer->base.sprite_coord_mode ==
2744 PIPE_SPRITE_COORD_UPPER_LEFT);
2745 }
2746
2747 key->ubo_1_size = vc4->constbuf[PIPE_SHADER_FRAGMENT].cb[1].buffer_size;
2748
2749 struct vc4_compiled_shader *old_fs = vc4->prog.fs;
2750 vc4->prog.fs = vc4_get_compiled_shader(vc4, QSTAGE_FRAG, &key->base);
2751 if (vc4->prog.fs == old_fs)
2752 return;
2753
2754 vc4->dirty |= VC4_DIRTY_COMPILED_FS;
2755
2756 if (vc4->rasterizer->base.flatshade &&
2757 (!old_fs || vc4->prog.fs->color_inputs != old_fs->color_inputs)) {
2758 vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
2759 }
2760
2761 if (!old_fs || vc4->prog.fs->fs_inputs != old_fs->fs_inputs)
2762 vc4->dirty |= VC4_DIRTY_FS_INPUTS;
2763 }
2764
2765 static void
vc4_update_compiled_vs(struct vc4_context * vc4,uint8_t prim_mode)2766 vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode)
2767 {
2768 struct vc4_vs_key local_key;
2769 struct vc4_vs_key *key = &local_key;
2770
2771 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2772 VC4_DIRTY_RASTERIZER |
2773 VC4_DIRTY_VERTTEX |
2774 VC4_DIRTY_VTXSTATE |
2775 VC4_DIRTY_UNCOMPILED_VS |
2776 VC4_DIRTY_FS_INPUTS))) {
2777 return;
2778 }
2779
2780 memset(key, 0, sizeof(*key));
2781 vc4_setup_shared_key(vc4, &key->base, &vc4->verttex);
2782 key->base.shader_state = vc4->prog.bind_vs;
2783 key->fs_inputs = vc4->prog.fs->fs_inputs;
2784
2785 for (int i = 0; i < ARRAY_SIZE(key->attr_formats); i++)
2786 key->attr_formats[i] = vc4->vtx->pipe[i].src_format;
2787
2788 key->per_vertex_point_size =
2789 (prim_mode == PIPE_PRIM_POINTS &&
2790 vc4->rasterizer->base.point_size_per_vertex);
2791
2792 struct vc4_compiled_shader *vs =
2793 vc4_get_compiled_shader(vc4, QSTAGE_VERT, &key->base);
2794 if (vs != vc4->prog.vs) {
2795 vc4->prog.vs = vs;
2796 vc4->dirty |= VC4_DIRTY_COMPILED_VS;
2797 }
2798
2799 key->is_coord = true;
2800 /* Coord shaders don't care what the FS inputs are. */
2801 key->fs_inputs = NULL;
2802 struct vc4_compiled_shader *cs =
2803 vc4_get_compiled_shader(vc4, QSTAGE_COORD, &key->base);
2804 if (cs != vc4->prog.cs) {
2805 vc4->prog.cs = cs;
2806 vc4->dirty |= VC4_DIRTY_COMPILED_CS;
2807 }
2808 }
2809
2810 bool
vc4_update_compiled_shaders(struct vc4_context * vc4,uint8_t prim_mode)2811 vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode)
2812 {
2813 vc4_update_compiled_fs(vc4, prim_mode);
2814 vc4_update_compiled_vs(vc4, prim_mode);
2815
2816 return !(vc4->prog.cs->failed ||
2817 vc4->prog.vs->failed ||
2818 vc4->prog.fs->failed);
2819 }
2820
2821 static uint32_t
fs_cache_hash(const void * key)2822 fs_cache_hash(const void *key)
2823 {
2824 return _mesa_hash_data(key, sizeof(struct vc4_fs_key));
2825 }
2826
2827 static uint32_t
vs_cache_hash(const void * key)2828 vs_cache_hash(const void *key)
2829 {
2830 return _mesa_hash_data(key, sizeof(struct vc4_vs_key));
2831 }
2832
2833 static bool
fs_cache_compare(const void * key1,const void * key2)2834 fs_cache_compare(const void *key1, const void *key2)
2835 {
2836 return memcmp(key1, key2, sizeof(struct vc4_fs_key)) == 0;
2837 }
2838
2839 static bool
vs_cache_compare(const void * key1,const void * key2)2840 vs_cache_compare(const void *key1, const void *key2)
2841 {
2842 return memcmp(key1, key2, sizeof(struct vc4_vs_key)) == 0;
2843 }
2844
2845 static uint32_t
fs_inputs_hash(const void * key)2846 fs_inputs_hash(const void *key)
2847 {
2848 const struct vc4_fs_inputs *inputs = key;
2849
2850 return _mesa_hash_data(inputs->input_slots,
2851 sizeof(*inputs->input_slots) *
2852 inputs->num_inputs);
2853 }
2854
2855 static bool
fs_inputs_compare(const void * key1,const void * key2)2856 fs_inputs_compare(const void *key1, const void *key2)
2857 {
2858 const struct vc4_fs_inputs *inputs1 = key1;
2859 const struct vc4_fs_inputs *inputs2 = key2;
2860
2861 return (inputs1->num_inputs == inputs2->num_inputs &&
2862 memcmp(inputs1->input_slots,
2863 inputs2->input_slots,
2864 sizeof(*inputs1->input_slots) *
2865 inputs1->num_inputs) == 0);
2866 }
2867
2868 static void
delete_from_cache_if_matches(struct hash_table * ht,struct vc4_compiled_shader ** last_compile,struct hash_entry * entry,struct vc4_uncompiled_shader * so)2869 delete_from_cache_if_matches(struct hash_table *ht,
2870 struct vc4_compiled_shader **last_compile,
2871 struct hash_entry *entry,
2872 struct vc4_uncompiled_shader *so)
2873 {
2874 const struct vc4_key *key = entry->key;
2875
2876 if (key->shader_state == so) {
2877 struct vc4_compiled_shader *shader = entry->data;
2878 _mesa_hash_table_remove(ht, entry);
2879 vc4_bo_unreference(&shader->bo);
2880
2881 if (shader == *last_compile)
2882 *last_compile = NULL;
2883
2884 ralloc_free(shader);
2885 }
2886 }
2887
2888 static void
vc4_shader_state_delete(struct pipe_context * pctx,void * hwcso)2889 vc4_shader_state_delete(struct pipe_context *pctx, void *hwcso)
2890 {
2891 struct vc4_context *vc4 = vc4_context(pctx);
2892 struct vc4_uncompiled_shader *so = hwcso;
2893
2894 hash_table_foreach(vc4->fs_cache, entry) {
2895 delete_from_cache_if_matches(vc4->fs_cache, &vc4->prog.fs,
2896 entry, so);
2897 }
2898 hash_table_foreach(vc4->vs_cache, entry) {
2899 delete_from_cache_if_matches(vc4->vs_cache, &vc4->prog.vs,
2900 entry, so);
2901 }
2902
2903 ralloc_free(so->base.ir.nir);
2904 free(so);
2905 }
2906
2907 static void
vc4_fp_state_bind(struct pipe_context * pctx,void * hwcso)2908 vc4_fp_state_bind(struct pipe_context *pctx, void *hwcso)
2909 {
2910 struct vc4_context *vc4 = vc4_context(pctx);
2911 vc4->prog.bind_fs = hwcso;
2912 vc4->dirty |= VC4_DIRTY_UNCOMPILED_FS;
2913 }
2914
2915 static void
vc4_vp_state_bind(struct pipe_context * pctx,void * hwcso)2916 vc4_vp_state_bind(struct pipe_context *pctx, void *hwcso)
2917 {
2918 struct vc4_context *vc4 = vc4_context(pctx);
2919 vc4->prog.bind_vs = hwcso;
2920 vc4->dirty |= VC4_DIRTY_UNCOMPILED_VS;
2921 }
2922
2923 void
vc4_program_init(struct pipe_context * pctx)2924 vc4_program_init(struct pipe_context *pctx)
2925 {
2926 struct vc4_context *vc4 = vc4_context(pctx);
2927
2928 pctx->create_vs_state = vc4_shader_state_create;
2929 pctx->delete_vs_state = vc4_shader_state_delete;
2930
2931 pctx->create_fs_state = vc4_shader_state_create;
2932 pctx->delete_fs_state = vc4_shader_state_delete;
2933
2934 pctx->bind_fs_state = vc4_fp_state_bind;
2935 pctx->bind_vs_state = vc4_vp_state_bind;
2936
2937 vc4->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
2938 fs_cache_compare);
2939 vc4->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
2940 vs_cache_compare);
2941 vc4->fs_inputs_set = _mesa_set_create(pctx, fs_inputs_hash,
2942 fs_inputs_compare);
2943 }
2944
2945 void
vc4_program_fini(struct pipe_context * pctx)2946 vc4_program_fini(struct pipe_context *pctx)
2947 {
2948 struct vc4_context *vc4 = vc4_context(pctx);
2949
2950 hash_table_foreach(vc4->fs_cache, entry) {
2951 struct vc4_compiled_shader *shader = entry->data;
2952 vc4_bo_unreference(&shader->bo);
2953 ralloc_free(shader);
2954 _mesa_hash_table_remove(vc4->fs_cache, entry);
2955 }
2956
2957 hash_table_foreach(vc4->vs_cache, entry) {
2958 struct vc4_compiled_shader *shader = entry->data;
2959 vc4_bo_unreference(&shader->bo);
2960 ralloc_free(shader);
2961 _mesa_hash_table_remove(vc4->vs_cache, entry);
2962 }
2963 }
2964