1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_vec4.h"
25 #include "brw_fs.h"
26 #include "brw_cfg.h"
27 #include "brw_nir.h"
28 #include "brw_vec4_builder.h"
29 #include "brw_vec4_vs.h"
30 #include "brw_dead_control_flow.h"
31 #include "dev/intel_debug.h"
32 #include "program/prog_parameter.h"
33 #include "util/u_math.h"
34
35 #define MAX_INSTRUCTION (1 << 30)
36
37 using namespace brw;
38
39 namespace brw {
40
41 void
init()42 src_reg::init()
43 {
44 memset((void*)this, 0, sizeof(*this));
45 this->file = BAD_FILE;
46 this->type = BRW_REGISTER_TYPE_UD;
47 }
48
src_reg(enum brw_reg_file file,int nr,const glsl_type * type)49 src_reg::src_reg(enum brw_reg_file file, int nr, const glsl_type *type)
50 {
51 init();
52
53 this->file = file;
54 this->nr = nr;
55 if (type && (type->is_scalar() || type->is_vector() || type->is_matrix()))
56 this->swizzle = brw_swizzle_for_size(type->vector_elements);
57 else
58 this->swizzle = BRW_SWIZZLE_XYZW;
59 if (type)
60 this->type = brw_type_for_base_type(type);
61 }
62
63 /** Generic unset register constructor. */
src_reg()64 src_reg::src_reg()
65 {
66 init();
67 }
68
src_reg(struct::brw_reg reg)69 src_reg::src_reg(struct ::brw_reg reg) :
70 backend_reg(reg)
71 {
72 this->offset = 0;
73 this->reladdr = NULL;
74 }
75
src_reg(const dst_reg & reg)76 src_reg::src_reg(const dst_reg ®) :
77 backend_reg(reg)
78 {
79 this->reladdr = reg.reladdr;
80 this->swizzle = brw_swizzle_for_mask(reg.writemask);
81 }
82
83 void
init()84 dst_reg::init()
85 {
86 memset((void*)this, 0, sizeof(*this));
87 this->file = BAD_FILE;
88 this->type = BRW_REGISTER_TYPE_UD;
89 this->writemask = WRITEMASK_XYZW;
90 }
91
dst_reg()92 dst_reg::dst_reg()
93 {
94 init();
95 }
96
dst_reg(enum brw_reg_file file,int nr)97 dst_reg::dst_reg(enum brw_reg_file file, int nr)
98 {
99 init();
100
101 this->file = file;
102 this->nr = nr;
103 }
104
dst_reg(enum brw_reg_file file,int nr,const glsl_type * type,unsigned writemask)105 dst_reg::dst_reg(enum brw_reg_file file, int nr, const glsl_type *type,
106 unsigned writemask)
107 {
108 init();
109
110 this->file = file;
111 this->nr = nr;
112 this->type = brw_type_for_base_type(type);
113 this->writemask = writemask;
114 }
115
dst_reg(enum brw_reg_file file,int nr,brw_reg_type type,unsigned writemask)116 dst_reg::dst_reg(enum brw_reg_file file, int nr, brw_reg_type type,
117 unsigned writemask)
118 {
119 init();
120
121 this->file = file;
122 this->nr = nr;
123 this->type = type;
124 this->writemask = writemask;
125 }
126
dst_reg(struct::brw_reg reg)127 dst_reg::dst_reg(struct ::brw_reg reg) :
128 backend_reg(reg)
129 {
130 this->offset = 0;
131 this->reladdr = NULL;
132 }
133
dst_reg(const src_reg & reg)134 dst_reg::dst_reg(const src_reg ®) :
135 backend_reg(reg)
136 {
137 this->writemask = brw_mask_for_swizzle(reg.swizzle);
138 this->reladdr = reg.reladdr;
139 }
140
141 bool
equals(const dst_reg & r) const142 dst_reg::equals(const dst_reg &r) const
143 {
144 return (this->backend_reg::equals(r) &&
145 (reladdr == r.reladdr ||
146 (reladdr && r.reladdr && reladdr->equals(*r.reladdr))));
147 }
148
149 bool
is_send_from_grf() const150 vec4_instruction::is_send_from_grf() const
151 {
152 switch (opcode) {
153 case SHADER_OPCODE_SHADER_TIME_ADD:
154 case VS_OPCODE_PULL_CONSTANT_LOAD_GFX7:
155 case VEC4_OPCODE_UNTYPED_ATOMIC:
156 case VEC4_OPCODE_UNTYPED_SURFACE_READ:
157 case VEC4_OPCODE_UNTYPED_SURFACE_WRITE:
158 case VEC4_OPCODE_URB_READ:
159 case TCS_OPCODE_URB_WRITE:
160 case TCS_OPCODE_RELEASE_INPUT:
161 case SHADER_OPCODE_BARRIER:
162 return true;
163 default:
164 return false;
165 }
166 }
167
168 /**
169 * Returns true if this instruction's sources and destinations cannot
170 * safely be the same register.
171 *
172 * In most cases, a register can be written over safely by the same
173 * instruction that is its last use. For a single instruction, the
174 * sources are dereferenced before writing of the destination starts
175 * (naturally).
176 *
177 * However, there are a few cases where this can be problematic:
178 *
179 * - Virtual opcodes that translate to multiple instructions in the
180 * code generator: if src == dst and one instruction writes the
181 * destination before a later instruction reads the source, then
182 * src will have been clobbered.
183 *
184 * The register allocator uses this information to set up conflicts between
185 * GRF sources and the destination.
186 */
187 bool
has_source_and_destination_hazard() const188 vec4_instruction::has_source_and_destination_hazard() const
189 {
190 switch (opcode) {
191 case TCS_OPCODE_SET_INPUT_URB_OFFSETS:
192 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS:
193 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET:
194 return true;
195 default:
196 /* 8-wide compressed DF operations are executed as two 4-wide operations,
197 * so we have a src/dst hazard if the first half of the instruction
198 * overwrites the source of the second half. Prevent this by marking
199 * compressed instructions as having src/dst hazards, so the register
200 * allocator assigns safe register regions for dst and srcs.
201 */
202 return size_written > REG_SIZE;
203 }
204 }
205
206 unsigned
size_read(unsigned arg) const207 vec4_instruction::size_read(unsigned arg) const
208 {
209 switch (opcode) {
210 case SHADER_OPCODE_SHADER_TIME_ADD:
211 case VEC4_OPCODE_UNTYPED_ATOMIC:
212 case VEC4_OPCODE_UNTYPED_SURFACE_READ:
213 case VEC4_OPCODE_UNTYPED_SURFACE_WRITE:
214 case TCS_OPCODE_URB_WRITE:
215 if (arg == 0)
216 return mlen * REG_SIZE;
217 break;
218 case VS_OPCODE_PULL_CONSTANT_LOAD_GFX7:
219 if (arg == 1)
220 return mlen * REG_SIZE;
221 break;
222 default:
223 break;
224 }
225
226 switch (src[arg].file) {
227 case BAD_FILE:
228 return 0;
229 case IMM:
230 case UNIFORM:
231 return 4 * type_sz(src[arg].type);
232 default:
233 /* XXX - Represent actual vertical stride. */
234 return exec_size * type_sz(src[arg].type);
235 }
236 }
237
238 bool
can_do_source_mods(const struct intel_device_info * devinfo)239 vec4_instruction::can_do_source_mods(const struct intel_device_info *devinfo)
240 {
241 if (devinfo->ver == 6 && is_math())
242 return false;
243
244 if (is_send_from_grf())
245 return false;
246
247 if (!backend_instruction::can_do_source_mods())
248 return false;
249
250 return true;
251 }
252
253 bool
can_do_cmod()254 vec4_instruction::can_do_cmod()
255 {
256 if (!backend_instruction::can_do_cmod())
257 return false;
258
259 /* The accumulator result appears to get used for the conditional modifier
260 * generation. When negating a UD value, there is a 33rd bit generated for
261 * the sign in the accumulator value, so now you can't check, for example,
262 * equality with a 32-bit value. See piglit fs-op-neg-uvec4.
263 */
264 for (unsigned i = 0; i < 3; i++) {
265 if (src[i].file != BAD_FILE &&
266 brw_reg_type_is_unsigned_integer(src[i].type) && src[i].negate)
267 return false;
268 }
269
270 return true;
271 }
272
273 bool
can_do_writemask(const struct intel_device_info * devinfo)274 vec4_instruction::can_do_writemask(const struct intel_device_info *devinfo)
275 {
276 switch (opcode) {
277 case SHADER_OPCODE_GFX4_SCRATCH_READ:
278 case VEC4_OPCODE_DOUBLE_TO_F32:
279 case VEC4_OPCODE_DOUBLE_TO_D32:
280 case VEC4_OPCODE_DOUBLE_TO_U32:
281 case VEC4_OPCODE_TO_DOUBLE:
282 case VEC4_OPCODE_PICK_LOW_32BIT:
283 case VEC4_OPCODE_PICK_HIGH_32BIT:
284 case VEC4_OPCODE_SET_LOW_32BIT:
285 case VEC4_OPCODE_SET_HIGH_32BIT:
286 case VS_OPCODE_PULL_CONSTANT_LOAD:
287 case VS_OPCODE_PULL_CONSTANT_LOAD_GFX7:
288 case TCS_OPCODE_SET_INPUT_URB_OFFSETS:
289 case TCS_OPCODE_SET_OUTPUT_URB_OFFSETS:
290 case TES_OPCODE_CREATE_INPUT_READ_HEADER:
291 case TES_OPCODE_ADD_INDIRECT_URB_OFFSET:
292 case VEC4_OPCODE_URB_READ:
293 case SHADER_OPCODE_MOV_INDIRECT:
294 return false;
295 default:
296 /* The MATH instruction on Gfx6 only executes in align1 mode, which does
297 * not support writemasking.
298 */
299 if (devinfo->ver == 6 && is_math())
300 return false;
301
302 if (is_tex())
303 return false;
304
305 return true;
306 }
307 }
308
309 bool
can_change_types() const310 vec4_instruction::can_change_types() const
311 {
312 return dst.type == src[0].type &&
313 !src[0].abs && !src[0].negate && !saturate &&
314 (opcode == BRW_OPCODE_MOV ||
315 (opcode == BRW_OPCODE_SEL &&
316 dst.type == src[1].type &&
317 predicate != BRW_PREDICATE_NONE &&
318 !src[1].abs && !src[1].negate));
319 }
320
321 /**
322 * Returns how many MRFs an opcode will write over.
323 *
324 * Note that this is not the 0 or 1 implied writes in an actual gen
325 * instruction -- the generate_* functions generate additional MOVs
326 * for setup.
327 */
328 unsigned
implied_mrf_writes() const329 vec4_instruction::implied_mrf_writes() const
330 {
331 if (mlen == 0 || is_send_from_grf())
332 return 0;
333
334 switch (opcode) {
335 case SHADER_OPCODE_RCP:
336 case SHADER_OPCODE_RSQ:
337 case SHADER_OPCODE_SQRT:
338 case SHADER_OPCODE_EXP2:
339 case SHADER_OPCODE_LOG2:
340 case SHADER_OPCODE_SIN:
341 case SHADER_OPCODE_COS:
342 return 1;
343 case SHADER_OPCODE_INT_QUOTIENT:
344 case SHADER_OPCODE_INT_REMAINDER:
345 case SHADER_OPCODE_POW:
346 case TCS_OPCODE_THREAD_END:
347 return 2;
348 case VS_OPCODE_URB_WRITE:
349 return 1;
350 case VS_OPCODE_PULL_CONSTANT_LOAD:
351 return 2;
352 case SHADER_OPCODE_GFX4_SCRATCH_READ:
353 return 2;
354 case SHADER_OPCODE_GFX4_SCRATCH_WRITE:
355 return 3;
356 case GS_OPCODE_URB_WRITE:
357 case GS_OPCODE_URB_WRITE_ALLOCATE:
358 case GS_OPCODE_THREAD_END:
359 return 0;
360 case GS_OPCODE_FF_SYNC:
361 return 1;
362 case TCS_OPCODE_URB_WRITE:
363 return 0;
364 case SHADER_OPCODE_SHADER_TIME_ADD:
365 return 0;
366 case SHADER_OPCODE_TEX:
367 case SHADER_OPCODE_TXL:
368 case SHADER_OPCODE_TXD:
369 case SHADER_OPCODE_TXF:
370 case SHADER_OPCODE_TXF_CMS:
371 case SHADER_OPCODE_TXF_CMS_W:
372 case SHADER_OPCODE_TXF_MCS:
373 case SHADER_OPCODE_TXS:
374 case SHADER_OPCODE_TG4:
375 case SHADER_OPCODE_TG4_OFFSET:
376 case SHADER_OPCODE_SAMPLEINFO:
377 case SHADER_OPCODE_GET_BUFFER_SIZE:
378 return header_size;
379 default:
380 unreachable("not reached");
381 }
382 }
383
384 bool
equals(const src_reg & r) const385 src_reg::equals(const src_reg &r) const
386 {
387 return (this->backend_reg::equals(r) &&
388 !reladdr && !r.reladdr);
389 }
390
391 bool
negative_equals(const src_reg & r) const392 src_reg::negative_equals(const src_reg &r) const
393 {
394 return this->backend_reg::negative_equals(r) &&
395 !reladdr && !r.reladdr;
396 }
397
398 bool
opt_vector_float()399 vec4_visitor::opt_vector_float()
400 {
401 bool progress = false;
402
403 foreach_block(block, cfg) {
404 unsigned last_reg = ~0u, last_offset = ~0u;
405 enum brw_reg_file last_reg_file = BAD_FILE;
406
407 uint8_t imm[4] = { 0 };
408 int inst_count = 0;
409 vec4_instruction *imm_inst[4];
410 unsigned writemask = 0;
411 enum brw_reg_type dest_type = BRW_REGISTER_TYPE_F;
412
413 foreach_inst_in_block_safe(vec4_instruction, inst, block) {
414 int vf = -1;
415 enum brw_reg_type need_type = BRW_REGISTER_TYPE_LAST;
416
417 /* Look for unconditional MOVs from an immediate with a partial
418 * writemask. Skip type-conversion MOVs other than integer 0,
419 * where the type doesn't matter. See if the immediate can be
420 * represented as a VF.
421 */
422 if (inst->opcode == BRW_OPCODE_MOV &&
423 inst->src[0].file == IMM &&
424 inst->predicate == BRW_PREDICATE_NONE &&
425 inst->dst.writemask != WRITEMASK_XYZW &&
426 type_sz(inst->src[0].type) < 8 &&
427 (inst->src[0].type == inst->dst.type || inst->src[0].d == 0)) {
428
429 vf = brw_float_to_vf(inst->src[0].d);
430 need_type = BRW_REGISTER_TYPE_D;
431
432 if (vf == -1) {
433 vf = brw_float_to_vf(inst->src[0].f);
434 need_type = BRW_REGISTER_TYPE_F;
435 }
436 } else {
437 last_reg = ~0u;
438 }
439
440 /* If this wasn't a MOV, or the destination register doesn't match,
441 * or we have to switch destination types, then this breaks our
442 * sequence. Combine anything we've accumulated so far.
443 */
444 if (last_reg != inst->dst.nr ||
445 last_offset != inst->dst.offset ||
446 last_reg_file != inst->dst.file ||
447 (vf > 0 && dest_type != need_type)) {
448
449 if (inst_count > 1) {
450 unsigned vf;
451 memcpy(&vf, imm, sizeof(vf));
452 vec4_instruction *mov = MOV(imm_inst[0]->dst, brw_imm_vf(vf));
453 mov->dst.type = dest_type;
454 mov->dst.writemask = writemask;
455 inst->insert_before(block, mov);
456
457 for (int i = 0; i < inst_count; i++) {
458 imm_inst[i]->remove(block);
459 }
460
461 progress = true;
462 }
463
464 inst_count = 0;
465 last_reg = ~0u;;
466 writemask = 0;
467 dest_type = BRW_REGISTER_TYPE_F;
468
469 for (int i = 0; i < 4; i++) {
470 imm[i] = 0;
471 }
472 }
473
474 /* Record this instruction's value (if it was representable). */
475 if (vf != -1) {
476 if ((inst->dst.writemask & WRITEMASK_X) != 0)
477 imm[0] = vf;
478 if ((inst->dst.writemask & WRITEMASK_Y) != 0)
479 imm[1] = vf;
480 if ((inst->dst.writemask & WRITEMASK_Z) != 0)
481 imm[2] = vf;
482 if ((inst->dst.writemask & WRITEMASK_W) != 0)
483 imm[3] = vf;
484
485 writemask |= inst->dst.writemask;
486 imm_inst[inst_count++] = inst;
487
488 last_reg = inst->dst.nr;
489 last_offset = inst->dst.offset;
490 last_reg_file = inst->dst.file;
491 if (vf > 0)
492 dest_type = need_type;
493 }
494 }
495 }
496
497 if (progress)
498 invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
499
500 return progress;
501 }
502
503 /* Replaces unused channels of a swizzle with channels that are used.
504 *
505 * For instance, this pass transforms
506 *
507 * mov vgrf4.yz, vgrf5.wxzy
508 *
509 * into
510 *
511 * mov vgrf4.yz, vgrf5.xxzx
512 *
513 * This eliminates false uses of some channels, letting dead code elimination
514 * remove the instructions that wrote them.
515 */
516 bool
opt_reduce_swizzle()517 vec4_visitor::opt_reduce_swizzle()
518 {
519 bool progress = false;
520
521 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
522 if (inst->dst.file == BAD_FILE ||
523 inst->dst.file == ARF ||
524 inst->dst.file == FIXED_GRF ||
525 inst->is_send_from_grf())
526 continue;
527
528 unsigned swizzle;
529
530 /* Determine which channels of the sources are read. */
531 switch (inst->opcode) {
532 case VEC4_OPCODE_PACK_BYTES:
533 case BRW_OPCODE_DP4:
534 case BRW_OPCODE_DPH: /* FINISHME: DPH reads only three channels of src0,
535 * but all four of src1.
536 */
537 swizzle = brw_swizzle_for_size(4);
538 break;
539 case BRW_OPCODE_DP3:
540 swizzle = brw_swizzle_for_size(3);
541 break;
542 case BRW_OPCODE_DP2:
543 swizzle = brw_swizzle_for_size(2);
544 break;
545
546 case VEC4_OPCODE_TO_DOUBLE:
547 case VEC4_OPCODE_DOUBLE_TO_F32:
548 case VEC4_OPCODE_DOUBLE_TO_D32:
549 case VEC4_OPCODE_DOUBLE_TO_U32:
550 case VEC4_OPCODE_PICK_LOW_32BIT:
551 case VEC4_OPCODE_PICK_HIGH_32BIT:
552 case VEC4_OPCODE_SET_LOW_32BIT:
553 case VEC4_OPCODE_SET_HIGH_32BIT:
554 swizzle = brw_swizzle_for_size(4);
555 break;
556
557 default:
558 swizzle = brw_swizzle_for_mask(inst->dst.writemask);
559 break;
560 }
561
562 /* Update sources' swizzles. */
563 for (int i = 0; i < 3; i++) {
564 if (inst->src[i].file != VGRF &&
565 inst->src[i].file != ATTR &&
566 inst->src[i].file != UNIFORM)
567 continue;
568
569 const unsigned new_swizzle =
570 brw_compose_swizzle(swizzle, inst->src[i].swizzle);
571 if (inst->src[i].swizzle != new_swizzle) {
572 inst->src[i].swizzle = new_swizzle;
573 progress = true;
574 }
575 }
576 }
577
578 if (progress)
579 invalidate_analysis(DEPENDENCY_INSTRUCTION_DETAIL);
580
581 return progress;
582 }
583
584 void
split_uniform_registers()585 vec4_visitor::split_uniform_registers()
586 {
587 /* Prior to this, uniforms have been in an array sized according to
588 * the number of vector uniforms present, sparsely filled (so an
589 * aggregate results in reg indices being skipped over). Now we're
590 * going to cut those aggregates up so each .nr index is one
591 * vector. The goal is to make elimination of unused uniform
592 * components easier later.
593 */
594 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
595 for (int i = 0 ; i < 3; i++) {
596 if (inst->src[i].file != UNIFORM || inst->src[i].nr >= UBO_START)
597 continue;
598
599 assert(!inst->src[i].reladdr);
600
601 inst->src[i].nr += inst->src[i].offset / 16;
602 inst->src[i].offset %= 16;
603 }
604 }
605 }
606
607 /* This function returns the register number where we placed the uniform */
608 static int
set_push_constant_loc(const int nr_uniforms,int * new_uniform_count,const int src,const int size,const int channel_size,int * new_loc,int * new_chan,int * new_chans_used)609 set_push_constant_loc(const int nr_uniforms, int *new_uniform_count,
610 const int src, const int size, const int channel_size,
611 int *new_loc, int *new_chan,
612 int *new_chans_used)
613 {
614 int dst;
615 /* Find the lowest place we can slot this uniform in. */
616 for (dst = 0; dst < nr_uniforms; dst++) {
617 if (ALIGN(new_chans_used[dst], channel_size) + size <= 4)
618 break;
619 }
620
621 assert(dst < nr_uniforms);
622
623 new_loc[src] = dst;
624 new_chan[src] = ALIGN(new_chans_used[dst], channel_size);
625 new_chans_used[dst] = ALIGN(new_chans_used[dst], channel_size) + size;
626
627 *new_uniform_count = MAX2(*new_uniform_count, dst + 1);
628 return dst;
629 }
630
631 void
pack_uniform_registers()632 vec4_visitor::pack_uniform_registers()
633 {
634 if (!compiler->compact_params)
635 return;
636
637 uint8_t chans_used[this->uniforms];
638 int new_loc[this->uniforms];
639 int new_chan[this->uniforms];
640 bool is_aligned_to_dvec4[this->uniforms];
641 int new_chans_used[this->uniforms];
642 int channel_sizes[this->uniforms];
643
644 memset(chans_used, 0, sizeof(chans_used));
645 memset(new_loc, 0, sizeof(new_loc));
646 memset(new_chan, 0, sizeof(new_chan));
647 memset(new_chans_used, 0, sizeof(new_chans_used));
648 memset(is_aligned_to_dvec4, 0, sizeof(is_aligned_to_dvec4));
649 memset(channel_sizes, 0, sizeof(channel_sizes));
650
651 /* Find which uniform vectors are actually used by the program. We
652 * expect unused vector elements when we've moved array access out
653 * to pull constants, and from some GLSL code generators like wine.
654 */
655 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
656 unsigned readmask;
657 switch (inst->opcode) {
658 case VEC4_OPCODE_PACK_BYTES:
659 case BRW_OPCODE_DP4:
660 case BRW_OPCODE_DPH:
661 readmask = 0xf;
662 break;
663 case BRW_OPCODE_DP3:
664 readmask = 0x7;
665 break;
666 case BRW_OPCODE_DP2:
667 readmask = 0x3;
668 break;
669 default:
670 readmask = inst->dst.writemask;
671 break;
672 }
673
674 for (int i = 0 ; i < 3; i++) {
675 if (inst->src[i].file != UNIFORM || inst->src[i].nr >= UBO_START)
676 continue;
677
678 assert(type_sz(inst->src[i].type) % 4 == 0);
679 int channel_size = type_sz(inst->src[i].type) / 4;
680
681 int reg = inst->src[i].nr;
682 for (int c = 0; c < 4; c++) {
683 if (!(readmask & (1 << c)))
684 continue;
685
686 unsigned channel = BRW_GET_SWZ(inst->src[i].swizzle, c) + 1;
687 unsigned used = MAX2(chans_used[reg], channel * channel_size);
688 if (used <= 4) {
689 chans_used[reg] = used;
690 channel_sizes[reg] = MAX2(channel_sizes[reg], channel_size);
691 } else {
692 is_aligned_to_dvec4[reg] = true;
693 is_aligned_to_dvec4[reg + 1] = true;
694 chans_used[reg + 1] = used - 4;
695 channel_sizes[reg + 1] = MAX2(channel_sizes[reg + 1], channel_size);
696 }
697 }
698 }
699
700 if (inst->opcode == SHADER_OPCODE_MOV_INDIRECT &&
701 inst->src[0].file == UNIFORM) {
702 assert(inst->src[2].file == BRW_IMMEDIATE_VALUE);
703 assert(inst->src[0].subnr == 0);
704
705 unsigned bytes_read = inst->src[2].ud;
706 assert(bytes_read % 4 == 0);
707 unsigned vec4s_read = DIV_ROUND_UP(bytes_read, 16);
708
709 /* We just mark every register touched by a MOV_INDIRECT as being
710 * fully used. This ensures that it doesn't broken up piecewise by
711 * the next part of our packing algorithm.
712 */
713 int reg = inst->src[0].nr;
714 int channel_size = type_sz(inst->src[0].type) / 4;
715 for (unsigned i = 0; i < vec4s_read; i++) {
716 chans_used[reg + i] = 4;
717 channel_sizes[reg + i] = MAX2(channel_sizes[reg + i], channel_size);
718 }
719 }
720 }
721
722 int new_uniform_count = 0;
723
724 /* As the uniforms are going to be reordered, take the data from a temporary
725 * copy of the original param[].
726 */
727 uint32_t *param = ralloc_array(NULL, uint32_t, stage_prog_data->nr_params);
728 memcpy(param, stage_prog_data->param,
729 sizeof(uint32_t) * stage_prog_data->nr_params);
730
731 /* Now, figure out a packing of the live uniform vectors into our
732 * push constants. Start with dvec{3,4} because they are aligned to
733 * dvec4 size (2 vec4).
734 */
735 for (int src = 0; src < uniforms; src++) {
736 int size = chans_used[src];
737
738 if (size == 0 || !is_aligned_to_dvec4[src])
739 continue;
740
741 /* dvec3 are aligned to dvec4 size, apply the alignment of the size
742 * to 4 to avoid moving last component of a dvec3 to the available
743 * location at the end of a previous dvec3. These available locations
744 * could be filled by smaller variables in next loop.
745 */
746 size = ALIGN(size, 4);
747 int dst = set_push_constant_loc(uniforms, &new_uniform_count,
748 src, size, channel_sizes[src],
749 new_loc, new_chan,
750 new_chans_used);
751 /* Move the references to the data */
752 for (int j = 0; j < size; j++) {
753 stage_prog_data->param[dst * 4 + new_chan[src] + j] =
754 param[src * 4 + j];
755 }
756 }
757
758 /* Continue with the rest of data, which is aligned to vec4. */
759 for (int src = 0; src < uniforms; src++) {
760 int size = chans_used[src];
761
762 if (size == 0 || is_aligned_to_dvec4[src])
763 continue;
764
765 int dst = set_push_constant_loc(uniforms, &new_uniform_count,
766 src, size, channel_sizes[src],
767 new_loc, new_chan,
768 new_chans_used);
769 /* Move the references to the data */
770 for (int j = 0; j < size; j++) {
771 stage_prog_data->param[dst * 4 + new_chan[src] + j] =
772 param[src * 4 + j];
773 }
774 }
775
776 ralloc_free(param);
777 this->uniforms = new_uniform_count;
778 stage_prog_data->nr_params = new_uniform_count * 4;
779
780 /* Now, update the instructions for our repacked uniforms. */
781 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
782 for (int i = 0 ; i < 3; i++) {
783 int src = inst->src[i].nr;
784
785 if (inst->src[i].file != UNIFORM || inst->src[i].nr >= UBO_START)
786 continue;
787
788 int chan = new_chan[src] / channel_sizes[src];
789 inst->src[i].nr = new_loc[src];
790 inst->src[i].swizzle += BRW_SWIZZLE4(chan, chan, chan, chan);
791 }
792 }
793 }
794
795 /**
796 * Does algebraic optimizations (0 * a = 0, 1 * a = a, a + 0 = a).
797 *
798 * While GLSL IR also performs this optimization, we end up with it in
799 * our instruction stream for a couple of reasons. One is that we
800 * sometimes generate silly instructions, for example in array access
801 * where we'll generate "ADD offset, index, base" even if base is 0.
802 * The other is that GLSL IR's constant propagation doesn't track the
803 * components of aggregates, so some VS patterns (initialize matrix to
804 * 0, accumulate in vertex blending factors) end up breaking down to
805 * instructions involving 0.
806 */
807 bool
opt_algebraic()808 vec4_visitor::opt_algebraic()
809 {
810 bool progress = false;
811
812 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
813 switch (inst->opcode) {
814 case BRW_OPCODE_MOV:
815 if (inst->src[0].file != IMM)
816 break;
817
818 if (inst->saturate) {
819 /* Full mixed-type saturates don't happen. However, we can end up
820 * with things like:
821 *
822 * mov.sat(8) g21<1>DF -1F
823 *
824 * Other mixed-size-but-same-base-type cases may also be possible.
825 */
826 if (inst->dst.type != inst->src[0].type &&
827 inst->dst.type != BRW_REGISTER_TYPE_DF &&
828 inst->src[0].type != BRW_REGISTER_TYPE_F)
829 assert(!"unimplemented: saturate mixed types");
830
831 if (brw_saturate_immediate(inst->src[0].type,
832 &inst->src[0].as_brw_reg())) {
833 inst->saturate = false;
834 progress = true;
835 }
836 }
837 break;
838
839 case BRW_OPCODE_OR:
840 if (inst->src[1].is_zero()) {
841 inst->opcode = BRW_OPCODE_MOV;
842 inst->src[1] = src_reg();
843 progress = true;
844 }
845 break;
846
847 case VEC4_OPCODE_UNPACK_UNIFORM:
848 if (inst->src[0].file != UNIFORM) {
849 inst->opcode = BRW_OPCODE_MOV;
850 progress = true;
851 }
852 break;
853
854 case BRW_OPCODE_ADD:
855 if (inst->src[1].is_zero()) {
856 inst->opcode = BRW_OPCODE_MOV;
857 inst->src[1] = src_reg();
858 progress = true;
859 }
860 break;
861
862 case BRW_OPCODE_MUL:
863 if (inst->src[1].is_zero()) {
864 inst->opcode = BRW_OPCODE_MOV;
865 switch (inst->src[0].type) {
866 case BRW_REGISTER_TYPE_F:
867 inst->src[0] = brw_imm_f(0.0f);
868 break;
869 case BRW_REGISTER_TYPE_D:
870 inst->src[0] = brw_imm_d(0);
871 break;
872 case BRW_REGISTER_TYPE_UD:
873 inst->src[0] = brw_imm_ud(0u);
874 break;
875 default:
876 unreachable("not reached");
877 }
878 inst->src[1] = src_reg();
879 progress = true;
880 } else if (inst->src[1].is_one()) {
881 inst->opcode = BRW_OPCODE_MOV;
882 inst->src[1] = src_reg();
883 progress = true;
884 } else if (inst->src[1].is_negative_one()) {
885 inst->opcode = BRW_OPCODE_MOV;
886 inst->src[0].negate = !inst->src[0].negate;
887 inst->src[1] = src_reg();
888 progress = true;
889 }
890 break;
891 case SHADER_OPCODE_BROADCAST:
892 if (is_uniform(inst->src[0]) ||
893 inst->src[1].is_zero()) {
894 inst->opcode = BRW_OPCODE_MOV;
895 inst->src[1] = src_reg();
896 inst->force_writemask_all = true;
897 progress = true;
898 }
899 break;
900
901 default:
902 break;
903 }
904 }
905
906 if (progress)
907 invalidate_analysis(DEPENDENCY_INSTRUCTION_DATA_FLOW |
908 DEPENDENCY_INSTRUCTION_DETAIL);
909
910 return progress;
911 }
912
913 /**
914 * Only a limited number of hardware registers may be used for push
915 * constants, so this turns access to the overflowed constants into
916 * pull constants.
917 */
918 void
move_push_constants_to_pull_constants()919 vec4_visitor::move_push_constants_to_pull_constants()
920 {
921 int pull_constant_loc[this->uniforms];
922
923 const int max_uniform_components = push_length * 8;
924
925 if (this->uniforms * 4 <= max_uniform_components)
926 return;
927
928 assert(compiler->supports_pull_constants);
929 assert(compiler->compact_params);
930
931 /* If we got here, we also can't have any push ranges */
932 for (unsigned i = 0; i < 4; i++)
933 assert(prog_data->base.ubo_ranges[i].length == 0);
934
935 /* Make some sort of choice as to which uniforms get sent to pull
936 * constants. We could potentially do something clever here like
937 * look for the most infrequently used uniform vec4s, but leave
938 * that for later.
939 */
940 for (int i = 0; i < this->uniforms * 4; i += 4) {
941 pull_constant_loc[i / 4] = -1;
942
943 if (i >= max_uniform_components) {
944 uint32_t *values = &stage_prog_data->param[i];
945
946 /* Try to find an existing copy of this uniform in the pull
947 * constants if it was part of an array access already.
948 */
949 for (unsigned int j = 0; j < stage_prog_data->nr_pull_params; j += 4) {
950 int matches;
951
952 for (matches = 0; matches < 4; matches++) {
953 if (stage_prog_data->pull_param[j + matches] != values[matches])
954 break;
955 }
956
957 if (matches == 4) {
958 pull_constant_loc[i / 4] = j / 4;
959 break;
960 }
961 }
962
963 if (pull_constant_loc[i / 4] == -1) {
964 assert(stage_prog_data->nr_pull_params % 4 == 0);
965 pull_constant_loc[i / 4] = stage_prog_data->nr_pull_params / 4;
966
967 for (int j = 0; j < 4; j++) {
968 stage_prog_data->pull_param[stage_prog_data->nr_pull_params++] =
969 values[j];
970 }
971 }
972 }
973 }
974
975 /* Now actually rewrite usage of the things we've moved to pull
976 * constants.
977 */
978 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
979 for (int i = 0 ; i < 3; i++) {
980 if (inst->src[i].file != UNIFORM || inst->src[i].nr >= UBO_START ||
981 pull_constant_loc[inst->src[i].nr] == -1)
982 continue;
983
984 int uniform = inst->src[i].nr;
985
986 const glsl_type *temp_type = type_sz(inst->src[i].type) == 8 ?
987 glsl_type::dvec4_type : glsl_type::vec4_type;
988 dst_reg temp = dst_reg(this, temp_type);
989
990 emit_pull_constant_load(block, inst, temp, inst->src[i],
991 pull_constant_loc[uniform], src_reg());
992
993 inst->src[i].file = temp.file;
994 inst->src[i].nr = temp.nr;
995 inst->src[i].offset %= 16;
996 inst->src[i].reladdr = NULL;
997 }
998 }
999
1000 /* Repack push constants to remove the now-unused ones. */
1001 pack_uniform_registers();
1002 }
1003
1004 /* Conditions for which we want to avoid setting the dependency control bits */
1005 bool
is_dep_ctrl_unsafe(const vec4_instruction * inst)1006 vec4_visitor::is_dep_ctrl_unsafe(const vec4_instruction *inst)
1007 {
1008 #define IS_DWORD(reg) \
1009 (reg.type == BRW_REGISTER_TYPE_UD || \
1010 reg.type == BRW_REGISTER_TYPE_D)
1011
1012 #define IS_64BIT(reg) (reg.file != BAD_FILE && type_sz(reg.type) == 8)
1013
1014 if (devinfo->ver >= 7) {
1015 if (IS_64BIT(inst->dst) || IS_64BIT(inst->src[0]) ||
1016 IS_64BIT(inst->src[1]) || IS_64BIT(inst->src[2]))
1017 return true;
1018 }
1019
1020 #undef IS_64BIT
1021 #undef IS_DWORD
1022
1023 /*
1024 * mlen:
1025 * In the presence of send messages, totally interrupt dependency
1026 * control. They're long enough that the chance of dependency
1027 * control around them just doesn't matter.
1028 *
1029 * predicate:
1030 * From the Ivy Bridge PRM, volume 4 part 3.7, page 80:
1031 * When a sequence of NoDDChk and NoDDClr are used, the last instruction that
1032 * completes the scoreboard clear must have a non-zero execution mask. This
1033 * means, if any kind of predication can change the execution mask or channel
1034 * enable of the last instruction, the optimization must be avoided. This is
1035 * to avoid instructions being shot down the pipeline when no writes are
1036 * required.
1037 *
1038 * math:
1039 * Dependency control does not work well over math instructions.
1040 * NB: Discovered empirically
1041 */
1042 return (inst->mlen || inst->predicate || inst->is_math());
1043 }
1044
1045 /**
1046 * Sets the dependency control fields on instructions after register
1047 * allocation and before the generator is run.
1048 *
1049 * When you have a sequence of instructions like:
1050 *
1051 * DP4 temp.x vertex uniform[0]
1052 * DP4 temp.y vertex uniform[0]
1053 * DP4 temp.z vertex uniform[0]
1054 * DP4 temp.w vertex uniform[0]
1055 *
1056 * The hardware doesn't know that it can actually run the later instructions
1057 * while the previous ones are in flight, producing stalls. However, we have
1058 * manual fields we can set in the instructions that let it do so.
1059 */
1060 void
opt_set_dependency_control()1061 vec4_visitor::opt_set_dependency_control()
1062 {
1063 vec4_instruction *last_grf_write[BRW_MAX_GRF];
1064 uint8_t grf_channels_written[BRW_MAX_GRF];
1065 vec4_instruction *last_mrf_write[BRW_MAX_GRF];
1066 uint8_t mrf_channels_written[BRW_MAX_GRF];
1067
1068 assert(prog_data->total_grf ||
1069 !"Must be called after register allocation");
1070
1071 foreach_block (block, cfg) {
1072 memset(last_grf_write, 0, sizeof(last_grf_write));
1073 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1074
1075 foreach_inst_in_block (vec4_instruction, inst, block) {
1076 /* If we read from a register that we were doing dependency control
1077 * on, don't do dependency control across the read.
1078 */
1079 for (int i = 0; i < 3; i++) {
1080 int reg = inst->src[i].nr + inst->src[i].offset / REG_SIZE;
1081 if (inst->src[i].file == VGRF) {
1082 last_grf_write[reg] = NULL;
1083 } else if (inst->src[i].file == FIXED_GRF) {
1084 memset(last_grf_write, 0, sizeof(last_grf_write));
1085 break;
1086 }
1087 assert(inst->src[i].file != MRF);
1088 }
1089
1090 if (is_dep_ctrl_unsafe(inst)) {
1091 memset(last_grf_write, 0, sizeof(last_grf_write));
1092 memset(last_mrf_write, 0, sizeof(last_mrf_write));
1093 continue;
1094 }
1095
1096 /* Now, see if we can do dependency control for this instruction
1097 * against a previous one writing to its destination.
1098 */
1099 int reg = inst->dst.nr + inst->dst.offset / REG_SIZE;
1100 if (inst->dst.file == VGRF || inst->dst.file == FIXED_GRF) {
1101 if (last_grf_write[reg] &&
1102 last_grf_write[reg]->dst.offset == inst->dst.offset &&
1103 !(inst->dst.writemask & grf_channels_written[reg])) {
1104 last_grf_write[reg]->no_dd_clear = true;
1105 inst->no_dd_check = true;
1106 } else {
1107 grf_channels_written[reg] = 0;
1108 }
1109
1110 last_grf_write[reg] = inst;
1111 grf_channels_written[reg] |= inst->dst.writemask;
1112 } else if (inst->dst.file == MRF) {
1113 if (last_mrf_write[reg] &&
1114 last_mrf_write[reg]->dst.offset == inst->dst.offset &&
1115 !(inst->dst.writemask & mrf_channels_written[reg])) {
1116 last_mrf_write[reg]->no_dd_clear = true;
1117 inst->no_dd_check = true;
1118 } else {
1119 mrf_channels_written[reg] = 0;
1120 }
1121
1122 last_mrf_write[reg] = inst;
1123 mrf_channels_written[reg] |= inst->dst.writemask;
1124 }
1125 }
1126 }
1127 }
1128
1129 bool
can_reswizzle(const struct intel_device_info * devinfo,int dst_writemask,int swizzle,int swizzle_mask)1130 vec4_instruction::can_reswizzle(const struct intel_device_info *devinfo,
1131 int dst_writemask,
1132 int swizzle,
1133 int swizzle_mask)
1134 {
1135 /* Gfx6 MATH instructions can not execute in align16 mode, so swizzles
1136 * are not allowed.
1137 */
1138 if (devinfo->ver == 6 && is_math() && swizzle != BRW_SWIZZLE_XYZW)
1139 return false;
1140
1141 /* If we write to the flag register changing the swizzle would change
1142 * what channels are written to the flag register.
1143 */
1144 if (writes_flag(devinfo))
1145 return false;
1146
1147 /* We can't swizzle implicit accumulator access. We'd have to
1148 * reswizzle the producer of the accumulator value in addition
1149 * to the consumer (i.e. both MUL and MACH). Just skip this.
1150 */
1151 if (reads_accumulator_implicitly())
1152 return false;
1153
1154 if (!can_do_writemask(devinfo) && dst_writemask != WRITEMASK_XYZW)
1155 return false;
1156
1157 /* If this instruction sets anything not referenced by swizzle, then we'd
1158 * totally break it when we reswizzle.
1159 */
1160 if (dst.writemask & ~swizzle_mask)
1161 return false;
1162
1163 if (mlen > 0)
1164 return false;
1165
1166 for (int i = 0; i < 3; i++) {
1167 if (src[i].is_accumulator())
1168 return false;
1169 }
1170
1171 return true;
1172 }
1173
1174 /**
1175 * For any channels in the swizzle's source that were populated by this
1176 * instruction, rewrite the instruction to put the appropriate result directly
1177 * in those channels.
1178 *
1179 * e.g. for swizzle=yywx, MUL a.xy b c -> MUL a.yy_x b.yy z.yy_x
1180 */
1181 void
reswizzle(int dst_writemask,int swizzle)1182 vec4_instruction::reswizzle(int dst_writemask, int swizzle)
1183 {
1184 /* Destination write mask doesn't correspond to source swizzle for the dot
1185 * product and pack_bytes instructions.
1186 */
1187 if (opcode != BRW_OPCODE_DP4 && opcode != BRW_OPCODE_DPH &&
1188 opcode != BRW_OPCODE_DP3 && opcode != BRW_OPCODE_DP2 &&
1189 opcode != VEC4_OPCODE_PACK_BYTES) {
1190 for (int i = 0; i < 3; i++) {
1191 if (src[i].file == BAD_FILE)
1192 continue;
1193
1194 if (src[i].file == IMM) {
1195 assert(src[i].type != BRW_REGISTER_TYPE_V &&
1196 src[i].type != BRW_REGISTER_TYPE_UV);
1197
1198 /* Vector immediate types need to be reswizzled. */
1199 if (src[i].type == BRW_REGISTER_TYPE_VF) {
1200 const unsigned imm[] = {
1201 (src[i].ud >> 0) & 0x0ff,
1202 (src[i].ud >> 8) & 0x0ff,
1203 (src[i].ud >> 16) & 0x0ff,
1204 (src[i].ud >> 24) & 0x0ff,
1205 };
1206
1207 src[i] = brw_imm_vf4(imm[BRW_GET_SWZ(swizzle, 0)],
1208 imm[BRW_GET_SWZ(swizzle, 1)],
1209 imm[BRW_GET_SWZ(swizzle, 2)],
1210 imm[BRW_GET_SWZ(swizzle, 3)]);
1211 }
1212
1213 continue;
1214 }
1215
1216 src[i].swizzle = brw_compose_swizzle(swizzle, src[i].swizzle);
1217 }
1218 }
1219
1220 /* Apply the specified swizzle and writemask to the original mask of
1221 * written components.
1222 */
1223 dst.writemask = dst_writemask &
1224 brw_apply_swizzle_to_mask(swizzle, dst.writemask);
1225 }
1226
1227 /*
1228 * Tries to reduce extra MOV instructions by taking temporary GRFs that get
1229 * just written and then MOVed into another reg and making the original write
1230 * of the GRF write directly to the final destination instead.
1231 */
1232 bool
opt_register_coalesce()1233 vec4_visitor::opt_register_coalesce()
1234 {
1235 bool progress = false;
1236 int next_ip = 0;
1237 const vec4_live_variables &live = live_analysis.require();
1238
1239 foreach_block_and_inst_safe (block, vec4_instruction, inst, cfg) {
1240 int ip = next_ip;
1241 next_ip++;
1242
1243 if (inst->opcode != BRW_OPCODE_MOV ||
1244 (inst->dst.file != VGRF && inst->dst.file != MRF) ||
1245 inst->predicate ||
1246 inst->src[0].file != VGRF ||
1247 inst->dst.type != inst->src[0].type ||
1248 inst->src[0].abs || inst->src[0].negate || inst->src[0].reladdr)
1249 continue;
1250
1251 /* Remove no-op MOVs */
1252 if (inst->dst.file == inst->src[0].file &&
1253 inst->dst.nr == inst->src[0].nr &&
1254 inst->dst.offset == inst->src[0].offset) {
1255 bool is_nop_mov = true;
1256
1257 for (unsigned c = 0; c < 4; c++) {
1258 if ((inst->dst.writemask & (1 << c)) == 0)
1259 continue;
1260
1261 if (BRW_GET_SWZ(inst->src[0].swizzle, c) != c) {
1262 is_nop_mov = false;
1263 break;
1264 }
1265 }
1266
1267 if (is_nop_mov) {
1268 inst->remove(block);
1269 progress = true;
1270 continue;
1271 }
1272 }
1273
1274 bool to_mrf = (inst->dst.file == MRF);
1275
1276 /* Can't coalesce this GRF if someone else was going to
1277 * read it later.
1278 */
1279 if (live.var_range_end(var_from_reg(alloc, dst_reg(inst->src[0])), 8) > ip)
1280 continue;
1281
1282 /* We need to check interference with the final destination between this
1283 * instruction and the earliest instruction involved in writing the GRF
1284 * we're eliminating. To do that, keep track of which of our source
1285 * channels we've seen initialized.
1286 */
1287 const unsigned chans_needed =
1288 brw_apply_inv_swizzle_to_mask(inst->src[0].swizzle,
1289 inst->dst.writemask);
1290 unsigned chans_remaining = chans_needed;
1291
1292 /* Now walk up the instruction stream trying to see if we can rewrite
1293 * everything writing to the temporary to write into the destination
1294 * instead.
1295 */
1296 vec4_instruction *_scan_inst = (vec4_instruction *)inst->prev;
1297 foreach_inst_in_block_reverse_starting_from(vec4_instruction, scan_inst,
1298 inst) {
1299 _scan_inst = scan_inst;
1300
1301 if (regions_overlap(inst->src[0], inst->size_read(0),
1302 scan_inst->dst, scan_inst->size_written)) {
1303 /* Found something writing to the reg we want to coalesce away. */
1304 if (to_mrf) {
1305 /* SEND instructions can't have MRF as a destination. */
1306 if (scan_inst->mlen)
1307 break;
1308
1309 if (devinfo->ver == 6) {
1310 /* gfx6 math instructions must have the destination be
1311 * VGRF, so no compute-to-MRF for them.
1312 */
1313 if (scan_inst->is_math()) {
1314 break;
1315 }
1316 }
1317 }
1318
1319 /* VS_OPCODE_UNPACK_FLAGS_SIMD4X2 generates a bunch of mov(1)
1320 * instructions, and this optimization pass is not capable of
1321 * handling that. Bail on these instructions and hope that some
1322 * later optimization pass can do the right thing after they are
1323 * expanded.
1324 */
1325 if (scan_inst->opcode == VS_OPCODE_UNPACK_FLAGS_SIMD4X2)
1326 break;
1327
1328 /* This doesn't handle saturation on the instruction we
1329 * want to coalesce away if the register types do not match.
1330 * But if scan_inst is a non type-converting 'mov', we can fix
1331 * the types later.
1332 */
1333 if (inst->saturate &&
1334 inst->dst.type != scan_inst->dst.type &&
1335 !(scan_inst->opcode == BRW_OPCODE_MOV &&
1336 scan_inst->dst.type == scan_inst->src[0].type))
1337 break;
1338
1339 /* Only allow coalescing between registers of the same type size.
1340 * Otherwise we would need to make the pass aware of the fact that
1341 * channel sizes are different for single and double precision.
1342 */
1343 if (type_sz(inst->src[0].type) != type_sz(scan_inst->src[0].type))
1344 break;
1345
1346 /* Check that scan_inst writes the same amount of data as the
1347 * instruction, otherwise coalescing would lead to writing a
1348 * different (larger or smaller) region of the destination
1349 */
1350 if (scan_inst->size_written != inst->size_written)
1351 break;
1352
1353 /* If we can't handle the swizzle, bail. */
1354 if (!scan_inst->can_reswizzle(devinfo, inst->dst.writemask,
1355 inst->src[0].swizzle,
1356 chans_needed)) {
1357 break;
1358 }
1359
1360 /* This only handles coalescing writes of 8 channels (1 register
1361 * for single-precision and 2 registers for double-precision)
1362 * starting at the source offset of the copy instruction.
1363 */
1364 if (DIV_ROUND_UP(scan_inst->size_written,
1365 type_sz(scan_inst->dst.type)) > 8 ||
1366 scan_inst->dst.offset != inst->src[0].offset)
1367 break;
1368
1369 /* Mark which channels we found unconditional writes for. */
1370 if (!scan_inst->predicate)
1371 chans_remaining &= ~scan_inst->dst.writemask;
1372
1373 if (chans_remaining == 0)
1374 break;
1375 }
1376
1377 /* You can't read from an MRF, so if someone else reads our MRF's
1378 * source GRF that we wanted to rewrite, that stops us. If it's a
1379 * GRF we're trying to coalesce to, we don't actually handle
1380 * rewriting sources so bail in that case as well.
1381 */
1382 bool interfered = false;
1383 for (int i = 0; i < 3; i++) {
1384 if (regions_overlap(inst->src[0], inst->size_read(0),
1385 scan_inst->src[i], scan_inst->size_read(i)))
1386 interfered = true;
1387 }
1388 if (interfered)
1389 break;
1390
1391 /* If somebody else writes the same channels of our destination here,
1392 * we can't coalesce before that.
1393 */
1394 if (regions_overlap(inst->dst, inst->size_written,
1395 scan_inst->dst, scan_inst->size_written) &&
1396 (inst->dst.writemask & scan_inst->dst.writemask) != 0) {
1397 break;
1398 }
1399
1400 /* Check for reads of the register we're trying to coalesce into. We
1401 * can't go rewriting instructions above that to put some other value
1402 * in the register instead.
1403 */
1404 if (to_mrf && scan_inst->mlen > 0) {
1405 unsigned start = scan_inst->base_mrf;
1406 unsigned end = scan_inst->base_mrf + scan_inst->mlen;
1407
1408 if (inst->dst.nr >= start && inst->dst.nr < end) {
1409 break;
1410 }
1411 } else {
1412 for (int i = 0; i < 3; i++) {
1413 if (regions_overlap(inst->dst, inst->size_written,
1414 scan_inst->src[i], scan_inst->size_read(i)))
1415 interfered = true;
1416 }
1417 if (interfered)
1418 break;
1419 }
1420 }
1421
1422 if (chans_remaining == 0) {
1423 /* If we've made it here, we have an MOV we want to coalesce out, and
1424 * a scan_inst pointing to the earliest instruction involved in
1425 * computing the value. Now go rewrite the instruction stream
1426 * between the two.
1427 */
1428 vec4_instruction *scan_inst = _scan_inst;
1429 while (scan_inst != inst) {
1430 if (scan_inst->dst.file == VGRF &&
1431 scan_inst->dst.nr == inst->src[0].nr &&
1432 scan_inst->dst.offset == inst->src[0].offset) {
1433 scan_inst->reswizzle(inst->dst.writemask,
1434 inst->src[0].swizzle);
1435 scan_inst->dst.file = inst->dst.file;
1436 scan_inst->dst.nr = inst->dst.nr;
1437 scan_inst->dst.offset = inst->dst.offset;
1438 if (inst->saturate &&
1439 inst->dst.type != scan_inst->dst.type) {
1440 /* If we have reached this point, scan_inst is a non
1441 * type-converting 'mov' and we can modify its register types
1442 * to match the ones in inst. Otherwise, we could have an
1443 * incorrect saturation result.
1444 */
1445 scan_inst->dst.type = inst->dst.type;
1446 scan_inst->src[0].type = inst->src[0].type;
1447 }
1448 scan_inst->saturate |= inst->saturate;
1449 }
1450 scan_inst = (vec4_instruction *)scan_inst->next;
1451 }
1452 inst->remove(block);
1453 progress = true;
1454 }
1455 }
1456
1457 if (progress)
1458 invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
1459
1460 return progress;
1461 }
1462
1463 /**
1464 * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
1465 * flow. We could probably do better here with some form of divergence
1466 * analysis.
1467 */
1468 bool
eliminate_find_live_channel()1469 vec4_visitor::eliminate_find_live_channel()
1470 {
1471 bool progress = false;
1472 unsigned depth = 0;
1473
1474 if (!brw_stage_has_packed_dispatch(devinfo, stage, stage_prog_data)) {
1475 /* The optimization below assumes that channel zero is live on thread
1476 * dispatch, which may not be the case if the fixed function dispatches
1477 * threads sparsely.
1478 */
1479 return false;
1480 }
1481
1482 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
1483 switch (inst->opcode) {
1484 case BRW_OPCODE_IF:
1485 case BRW_OPCODE_DO:
1486 depth++;
1487 break;
1488
1489 case BRW_OPCODE_ENDIF:
1490 case BRW_OPCODE_WHILE:
1491 depth--;
1492 break;
1493
1494 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
1495 if (depth == 0) {
1496 inst->opcode = BRW_OPCODE_MOV;
1497 inst->src[0] = brw_imm_d(0);
1498 inst->force_writemask_all = true;
1499 progress = true;
1500 }
1501 break;
1502
1503 default:
1504 break;
1505 }
1506 }
1507
1508 if (progress)
1509 invalidate_analysis(DEPENDENCY_INSTRUCTION_DETAIL);
1510
1511 return progress;
1512 }
1513
1514 /**
1515 * Splits virtual GRFs requesting more than one contiguous physical register.
1516 *
1517 * We initially create large virtual GRFs for temporary structures, arrays,
1518 * and matrices, so that the visitor functions can add offsets to work their
1519 * way down to the actual member being accessed. But when it comes to
1520 * optimization, we'd like to treat each register as individual storage if
1521 * possible.
1522 *
1523 * So far, the only thing that might prevent splitting is a send message from
1524 * a GRF on IVB.
1525 */
1526 void
split_virtual_grfs()1527 vec4_visitor::split_virtual_grfs()
1528 {
1529 int num_vars = this->alloc.count;
1530 int new_virtual_grf[num_vars];
1531 bool split_grf[num_vars];
1532
1533 memset(new_virtual_grf, 0, sizeof(new_virtual_grf));
1534
1535 /* Try to split anything > 0 sized. */
1536 for (int i = 0; i < num_vars; i++) {
1537 split_grf[i] = this->alloc.sizes[i] != 1;
1538 }
1539
1540 /* Check that the instructions are compatible with the registers we're trying
1541 * to split.
1542 */
1543 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
1544 if (inst->dst.file == VGRF && regs_written(inst) > 1)
1545 split_grf[inst->dst.nr] = false;
1546
1547 for (int i = 0; i < 3; i++) {
1548 if (inst->src[i].file == VGRF && regs_read(inst, i) > 1)
1549 split_grf[inst->src[i].nr] = false;
1550 }
1551 }
1552
1553 /* Allocate new space for split regs. Note that the virtual
1554 * numbers will be contiguous.
1555 */
1556 for (int i = 0; i < num_vars; i++) {
1557 if (!split_grf[i])
1558 continue;
1559
1560 new_virtual_grf[i] = alloc.allocate(1);
1561 for (unsigned j = 2; j < this->alloc.sizes[i]; j++) {
1562 unsigned reg = alloc.allocate(1);
1563 assert(reg == new_virtual_grf[i] + j - 1);
1564 (void) reg;
1565 }
1566 this->alloc.sizes[i] = 1;
1567 }
1568
1569 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
1570 if (inst->dst.file == VGRF && split_grf[inst->dst.nr] &&
1571 inst->dst.offset / REG_SIZE != 0) {
1572 inst->dst.nr = (new_virtual_grf[inst->dst.nr] +
1573 inst->dst.offset / REG_SIZE - 1);
1574 inst->dst.offset %= REG_SIZE;
1575 }
1576 for (int i = 0; i < 3; i++) {
1577 if (inst->src[i].file == VGRF && split_grf[inst->src[i].nr] &&
1578 inst->src[i].offset / REG_SIZE != 0) {
1579 inst->src[i].nr = (new_virtual_grf[inst->src[i].nr] +
1580 inst->src[i].offset / REG_SIZE - 1);
1581 inst->src[i].offset %= REG_SIZE;
1582 }
1583 }
1584 }
1585 invalidate_analysis(DEPENDENCY_INSTRUCTION_DETAIL | DEPENDENCY_VARIABLES);
1586 }
1587
1588 void
dump_instruction(const backend_instruction * be_inst) const1589 vec4_visitor::dump_instruction(const backend_instruction *be_inst) const
1590 {
1591 dump_instruction(be_inst, stderr);
1592 }
1593
1594 void
dump_instruction(const backend_instruction * be_inst,FILE * file) const1595 vec4_visitor::dump_instruction(const backend_instruction *be_inst, FILE *file) const
1596 {
1597 const vec4_instruction *inst = (const vec4_instruction *)be_inst;
1598
1599 if (inst->predicate) {
1600 fprintf(file, "(%cf%d.%d%s) ",
1601 inst->predicate_inverse ? '-' : '+',
1602 inst->flag_subreg / 2,
1603 inst->flag_subreg % 2,
1604 pred_ctrl_align16[inst->predicate]);
1605 }
1606
1607 fprintf(file, "%s(%d)", brw_instruction_name(devinfo, inst->opcode),
1608 inst->exec_size);
1609 if (inst->saturate)
1610 fprintf(file, ".sat");
1611 if (inst->conditional_mod) {
1612 fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
1613 if (!inst->predicate &&
1614 (devinfo->ver < 5 || (inst->opcode != BRW_OPCODE_SEL &&
1615 inst->opcode != BRW_OPCODE_CSEL &&
1616 inst->opcode != BRW_OPCODE_IF &&
1617 inst->opcode != BRW_OPCODE_WHILE))) {
1618 fprintf(file, ".f%d.%d", inst->flag_subreg / 2, inst->flag_subreg % 2);
1619 }
1620 }
1621 fprintf(file, " ");
1622
1623 switch (inst->dst.file) {
1624 case VGRF:
1625 fprintf(file, "vgrf%d", inst->dst.nr);
1626 break;
1627 case FIXED_GRF:
1628 fprintf(file, "g%d", inst->dst.nr);
1629 break;
1630 case MRF:
1631 fprintf(file, "m%d", inst->dst.nr);
1632 break;
1633 case ARF:
1634 switch (inst->dst.nr) {
1635 case BRW_ARF_NULL:
1636 fprintf(file, "null");
1637 break;
1638 case BRW_ARF_ADDRESS:
1639 fprintf(file, "a0.%d", inst->dst.subnr);
1640 break;
1641 case BRW_ARF_ACCUMULATOR:
1642 fprintf(file, "acc%d", inst->dst.subnr);
1643 break;
1644 case BRW_ARF_FLAG:
1645 fprintf(file, "f%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
1646 break;
1647 default:
1648 fprintf(file, "arf%d.%d", inst->dst.nr & 0xf, inst->dst.subnr);
1649 break;
1650 }
1651 break;
1652 case BAD_FILE:
1653 fprintf(file, "(null)");
1654 break;
1655 case IMM:
1656 case ATTR:
1657 case UNIFORM:
1658 unreachable("not reached");
1659 }
1660 if (inst->dst.offset ||
1661 (inst->dst.file == VGRF &&
1662 alloc.sizes[inst->dst.nr] * REG_SIZE != inst->size_written)) {
1663 const unsigned reg_size = (inst->dst.file == UNIFORM ? 16 : REG_SIZE);
1664 fprintf(file, "+%d.%d", inst->dst.offset / reg_size,
1665 inst->dst.offset % reg_size);
1666 }
1667 if (inst->dst.writemask != WRITEMASK_XYZW) {
1668 fprintf(file, ".");
1669 if (inst->dst.writemask & 1)
1670 fprintf(file, "x");
1671 if (inst->dst.writemask & 2)
1672 fprintf(file, "y");
1673 if (inst->dst.writemask & 4)
1674 fprintf(file, "z");
1675 if (inst->dst.writemask & 8)
1676 fprintf(file, "w");
1677 }
1678 fprintf(file, ":%s", brw_reg_type_to_letters(inst->dst.type));
1679
1680 if (inst->src[0].file != BAD_FILE)
1681 fprintf(file, ", ");
1682
1683 for (int i = 0; i < 3 && inst->src[i].file != BAD_FILE; i++) {
1684 if (inst->src[i].negate)
1685 fprintf(file, "-");
1686 if (inst->src[i].abs)
1687 fprintf(file, "|");
1688 switch (inst->src[i].file) {
1689 case VGRF:
1690 fprintf(file, "vgrf%d", inst->src[i].nr);
1691 break;
1692 case FIXED_GRF:
1693 fprintf(file, "g%d.%d", inst->src[i].nr, inst->src[i].subnr);
1694 break;
1695 case ATTR:
1696 fprintf(file, "attr%d", inst->src[i].nr);
1697 break;
1698 case UNIFORM:
1699 fprintf(file, "u%d", inst->src[i].nr);
1700 break;
1701 case IMM:
1702 switch (inst->src[i].type) {
1703 case BRW_REGISTER_TYPE_F:
1704 fprintf(file, "%fF", inst->src[i].f);
1705 break;
1706 case BRW_REGISTER_TYPE_DF:
1707 fprintf(file, "%fDF", inst->src[i].df);
1708 break;
1709 case BRW_REGISTER_TYPE_D:
1710 fprintf(file, "%dD", inst->src[i].d);
1711 break;
1712 case BRW_REGISTER_TYPE_UD:
1713 fprintf(file, "%uU", inst->src[i].ud);
1714 break;
1715 case BRW_REGISTER_TYPE_VF:
1716 fprintf(file, "[%-gF, %-gF, %-gF, %-gF]",
1717 brw_vf_to_float((inst->src[i].ud >> 0) & 0xff),
1718 brw_vf_to_float((inst->src[i].ud >> 8) & 0xff),
1719 brw_vf_to_float((inst->src[i].ud >> 16) & 0xff),
1720 brw_vf_to_float((inst->src[i].ud >> 24) & 0xff));
1721 break;
1722 default:
1723 fprintf(file, "???");
1724 break;
1725 }
1726 break;
1727 case ARF:
1728 switch (inst->src[i].nr) {
1729 case BRW_ARF_NULL:
1730 fprintf(file, "null");
1731 break;
1732 case BRW_ARF_ADDRESS:
1733 fprintf(file, "a0.%d", inst->src[i].subnr);
1734 break;
1735 case BRW_ARF_ACCUMULATOR:
1736 fprintf(file, "acc%d", inst->src[i].subnr);
1737 break;
1738 case BRW_ARF_FLAG:
1739 fprintf(file, "f%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
1740 break;
1741 default:
1742 fprintf(file, "arf%d.%d", inst->src[i].nr & 0xf, inst->src[i].subnr);
1743 break;
1744 }
1745 break;
1746 case BAD_FILE:
1747 fprintf(file, "(null)");
1748 break;
1749 case MRF:
1750 unreachable("not reached");
1751 }
1752
1753 if (inst->src[i].offset ||
1754 (inst->src[i].file == VGRF &&
1755 alloc.sizes[inst->src[i].nr] * REG_SIZE != inst->size_read(i))) {
1756 const unsigned reg_size = (inst->src[i].file == UNIFORM ? 16 : REG_SIZE);
1757 fprintf(file, "+%d.%d", inst->src[i].offset / reg_size,
1758 inst->src[i].offset % reg_size);
1759 }
1760
1761 if (inst->src[i].file != IMM) {
1762 static const char *chans[4] = {"x", "y", "z", "w"};
1763 fprintf(file, ".");
1764 for (int c = 0; c < 4; c++) {
1765 fprintf(file, "%s", chans[BRW_GET_SWZ(inst->src[i].swizzle, c)]);
1766 }
1767 }
1768
1769 if (inst->src[i].abs)
1770 fprintf(file, "|");
1771
1772 if (inst->src[i].file != IMM) {
1773 fprintf(file, ":%s", brw_reg_type_to_letters(inst->src[i].type));
1774 }
1775
1776 if (i < 2 && inst->src[i + 1].file != BAD_FILE)
1777 fprintf(file, ", ");
1778 }
1779
1780 if (inst->force_writemask_all)
1781 fprintf(file, " NoMask");
1782
1783 if (inst->exec_size != 8)
1784 fprintf(file, " group%d", inst->group);
1785
1786 fprintf(file, "\n");
1787 }
1788
1789
1790 int
setup_attributes(int payload_reg)1791 vec4_vs_visitor::setup_attributes(int payload_reg)
1792 {
1793 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
1794 for (int i = 0; i < 3; i++) {
1795 if (inst->src[i].file == ATTR) {
1796 assert(inst->src[i].offset % REG_SIZE == 0);
1797 int grf = payload_reg + inst->src[i].nr +
1798 inst->src[i].offset / REG_SIZE;
1799
1800 struct brw_reg reg = brw_vec8_grf(grf, 0);
1801 reg.swizzle = inst->src[i].swizzle;
1802 reg.type = inst->src[i].type;
1803 reg.abs = inst->src[i].abs;
1804 reg.negate = inst->src[i].negate;
1805 inst->src[i] = reg;
1806 }
1807 }
1808 }
1809
1810 return payload_reg + vs_prog_data->nr_attribute_slots;
1811 }
1812
1813 void
setup_push_ranges()1814 vec4_visitor::setup_push_ranges()
1815 {
1816 /* Only allow 32 registers (256 uniform components) as push constants,
1817 * which is the limit on gfx6.
1818 *
1819 * If changing this value, note the limitation about total_regs in
1820 * brw_curbe.c.
1821 */
1822 const unsigned max_push_length = 32;
1823
1824 push_length = DIV_ROUND_UP(prog_data->base.nr_params, 8);
1825 push_length = MIN2(push_length, max_push_length);
1826
1827 /* Shrink UBO push ranges so it all fits in max_push_length */
1828 for (unsigned i = 0; i < 4; i++) {
1829 struct brw_ubo_range *range = &prog_data->base.ubo_ranges[i];
1830
1831 if (push_length + range->length > max_push_length)
1832 range->length = max_push_length - push_length;
1833
1834 push_length += range->length;
1835 }
1836 assert(push_length <= max_push_length);
1837 }
1838
1839 int
setup_uniforms(int reg)1840 vec4_visitor::setup_uniforms(int reg)
1841 {
1842 /* It's possible that uniform compaction will shrink further than expected
1843 * so we re-compute the layout and set up our UBO push starts.
1844 */
1845 const unsigned old_push_length = push_length;
1846 push_length = DIV_ROUND_UP(prog_data->base.nr_params, 8);
1847 for (unsigned i = 0; i < 4; i++) {
1848 ubo_push_start[i] = push_length;
1849 push_length += stage_prog_data->ubo_ranges[i].length;
1850 }
1851 assert(push_length <= old_push_length);
1852 if (push_length < old_push_length)
1853 assert(compiler->compact_params);
1854
1855 /* The pre-gfx6 VS requires that some push constants get loaded no
1856 * matter what, or the GPU would hang.
1857 */
1858 if (devinfo->ver < 6 && push_length == 0) {
1859 brw_stage_prog_data_add_params(stage_prog_data, 4);
1860 for (unsigned int i = 0; i < 4; i++) {
1861 unsigned int slot = this->uniforms * 4 + i;
1862 stage_prog_data->param[slot] = BRW_PARAM_BUILTIN_ZERO;
1863 }
1864 push_length = 1;
1865 }
1866
1867 prog_data->base.dispatch_grf_start_reg = reg;
1868 prog_data->base.curb_read_length = push_length;
1869
1870 return reg + push_length;
1871 }
1872
1873 void
setup_payload(void)1874 vec4_vs_visitor::setup_payload(void)
1875 {
1876 int reg = 0;
1877
1878 /* The payload always contains important data in g0, which contains
1879 * the URB handles that are passed on to the URB write at the end
1880 * of the thread. So, we always start push constants at g1.
1881 */
1882 reg++;
1883
1884 reg = setup_uniforms(reg);
1885
1886 reg = setup_attributes(reg);
1887
1888 this->first_non_payload_grf = reg;
1889 }
1890
1891 bool
lower_minmax()1892 vec4_visitor::lower_minmax()
1893 {
1894 assert(devinfo->ver < 6);
1895
1896 bool progress = false;
1897
1898 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
1899 const vec4_builder ibld(this, block, inst);
1900
1901 if (inst->opcode == BRW_OPCODE_SEL &&
1902 inst->predicate == BRW_PREDICATE_NONE) {
1903 /* If src1 is an immediate value that is not NaN, then it can't be
1904 * NaN. In that case, emit CMP because it is much better for cmod
1905 * propagation. Likewise if src1 is not float. Gfx4 and Gfx5 don't
1906 * support HF or DF, so it is not necessary to check for those.
1907 */
1908 if (inst->src[1].type != BRW_REGISTER_TYPE_F ||
1909 (inst->src[1].file == IMM && !isnan(inst->src[1].f))) {
1910 ibld.CMP(ibld.null_reg_d(), inst->src[0], inst->src[1],
1911 inst->conditional_mod);
1912 } else {
1913 ibld.CMPN(ibld.null_reg_d(), inst->src[0], inst->src[1],
1914 inst->conditional_mod);
1915 }
1916 inst->predicate = BRW_PREDICATE_NORMAL;
1917 inst->conditional_mod = BRW_CONDITIONAL_NONE;
1918
1919 progress = true;
1920 }
1921 }
1922
1923 if (progress)
1924 invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
1925
1926 return progress;
1927 }
1928
1929 src_reg
get_timestamp()1930 vec4_visitor::get_timestamp()
1931 {
1932 assert(devinfo->ver == 7);
1933
1934 src_reg ts = src_reg(brw_reg(BRW_ARCHITECTURE_REGISTER_FILE,
1935 BRW_ARF_TIMESTAMP,
1936 0,
1937 0,
1938 0,
1939 BRW_REGISTER_TYPE_UD,
1940 BRW_VERTICAL_STRIDE_0,
1941 BRW_WIDTH_4,
1942 BRW_HORIZONTAL_STRIDE_4,
1943 BRW_SWIZZLE_XYZW,
1944 WRITEMASK_XYZW));
1945
1946 dst_reg dst = dst_reg(this, glsl_type::uvec4_type);
1947
1948 vec4_instruction *mov = emit(MOV(dst, ts));
1949 /* We want to read the 3 fields we care about (mostly field 0, but also 2)
1950 * even if it's not enabled in the dispatch.
1951 */
1952 mov->force_writemask_all = true;
1953
1954 return src_reg(dst);
1955 }
1956
1957 void
emit_shader_time_begin()1958 vec4_visitor::emit_shader_time_begin()
1959 {
1960 current_annotation = "shader time start";
1961 shader_start_time = get_timestamp();
1962 }
1963
1964 void
emit_shader_time_end()1965 vec4_visitor::emit_shader_time_end()
1966 {
1967 current_annotation = "shader time end";
1968 src_reg shader_end_time = get_timestamp();
1969
1970
1971 /* Check that there weren't any timestamp reset events (assuming these
1972 * were the only two timestamp reads that happened).
1973 */
1974 src_reg reset_end = shader_end_time;
1975 reset_end.swizzle = BRW_SWIZZLE_ZZZZ;
1976 vec4_instruction *test = emit(AND(dst_null_ud(), reset_end, brw_imm_ud(1u)));
1977 test->conditional_mod = BRW_CONDITIONAL_Z;
1978
1979 emit(IF(BRW_PREDICATE_NORMAL));
1980
1981 /* Take the current timestamp and get the delta. */
1982 shader_start_time.negate = true;
1983 dst_reg diff = dst_reg(this, glsl_type::uint_type);
1984 emit(ADD(diff, shader_start_time, shader_end_time));
1985
1986 /* If there were no instructions between the two timestamp gets, the diff
1987 * is 2 cycles. Remove that overhead, so I can forget about that when
1988 * trying to determine the time taken for single instructions.
1989 */
1990 emit(ADD(diff, src_reg(diff), brw_imm_ud(-2u)));
1991
1992 emit_shader_time_write(0, src_reg(diff));
1993 emit_shader_time_write(1, brw_imm_ud(1u));
1994 emit(BRW_OPCODE_ELSE);
1995 emit_shader_time_write(2, brw_imm_ud(1u));
1996 emit(BRW_OPCODE_ENDIF);
1997 }
1998
1999 void
emit_shader_time_write(int shader_time_subindex,src_reg value)2000 vec4_visitor::emit_shader_time_write(int shader_time_subindex, src_reg value)
2001 {
2002 dst_reg dst =
2003 dst_reg(this, glsl_type::get_array_instance(glsl_type::vec4_type, 2));
2004
2005 dst_reg offset = dst;
2006 dst_reg time = dst;
2007 time.offset += REG_SIZE;
2008
2009 offset.type = BRW_REGISTER_TYPE_UD;
2010 int index = shader_time_index * 3 + shader_time_subindex;
2011 emit(MOV(offset, brw_imm_d(index * BRW_SHADER_TIME_STRIDE)));
2012
2013 time.type = BRW_REGISTER_TYPE_UD;
2014 emit(MOV(time, value));
2015
2016 vec4_instruction *inst =
2017 emit(SHADER_OPCODE_SHADER_TIME_ADD, dst_reg(), src_reg(dst));
2018 inst->mlen = 2;
2019 }
2020
2021 static bool
is_align1_df(vec4_instruction * inst)2022 is_align1_df(vec4_instruction *inst)
2023 {
2024 switch (inst->opcode) {
2025 case VEC4_OPCODE_DOUBLE_TO_F32:
2026 case VEC4_OPCODE_DOUBLE_TO_D32:
2027 case VEC4_OPCODE_DOUBLE_TO_U32:
2028 case VEC4_OPCODE_TO_DOUBLE:
2029 case VEC4_OPCODE_PICK_LOW_32BIT:
2030 case VEC4_OPCODE_PICK_HIGH_32BIT:
2031 case VEC4_OPCODE_SET_LOW_32BIT:
2032 case VEC4_OPCODE_SET_HIGH_32BIT:
2033 return true;
2034 default:
2035 return false;
2036 }
2037 }
2038
2039 /**
2040 * Three source instruction must have a GRF/MRF destination register.
2041 * ARF NULL is not allowed. Fix that up by allocating a temporary GRF.
2042 */
2043 void
fixup_3src_null_dest()2044 vec4_visitor::fixup_3src_null_dest()
2045 {
2046 bool progress = false;
2047
2048 foreach_block_and_inst_safe (block, vec4_instruction, inst, cfg) {
2049 if (inst->is_3src(devinfo) && inst->dst.is_null()) {
2050 const unsigned size_written = type_sz(inst->dst.type);
2051 const unsigned num_regs = DIV_ROUND_UP(size_written, REG_SIZE);
2052
2053 inst->dst = retype(dst_reg(VGRF, alloc.allocate(num_regs)),
2054 inst->dst.type);
2055 progress = true;
2056 }
2057 }
2058
2059 if (progress)
2060 invalidate_analysis(DEPENDENCY_INSTRUCTION_DETAIL |
2061 DEPENDENCY_VARIABLES);
2062 }
2063
2064 void
convert_to_hw_regs()2065 vec4_visitor::convert_to_hw_regs()
2066 {
2067 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
2068 for (int i = 0; i < 3; i++) {
2069 class src_reg &src = inst->src[i];
2070 struct brw_reg reg;
2071 switch (src.file) {
2072 case VGRF: {
2073 reg = byte_offset(brw_vecn_grf(4, src.nr, 0), src.offset);
2074 reg.type = src.type;
2075 reg.abs = src.abs;
2076 reg.negate = src.negate;
2077 break;
2078 }
2079
2080 case UNIFORM: {
2081 if (src.nr >= UBO_START) {
2082 reg = byte_offset(brw_vec4_grf(
2083 prog_data->base.dispatch_grf_start_reg +
2084 ubo_push_start[src.nr - UBO_START] +
2085 src.offset / 32, 0),
2086 src.offset % 32);
2087 } else {
2088 reg = byte_offset(brw_vec4_grf(
2089 prog_data->base.dispatch_grf_start_reg +
2090 src.nr / 2, src.nr % 2 * 4),
2091 src.offset);
2092 }
2093 reg = stride(reg, 0, 4, 1);
2094 reg.type = src.type;
2095 reg.abs = src.abs;
2096 reg.negate = src.negate;
2097
2098 /* This should have been moved to pull constants. */
2099 assert(!src.reladdr);
2100 break;
2101 }
2102
2103 case FIXED_GRF:
2104 if (type_sz(src.type) == 8) {
2105 reg = src.as_brw_reg();
2106 break;
2107 }
2108 FALLTHROUGH;
2109 case ARF:
2110 case IMM:
2111 continue;
2112
2113 case BAD_FILE:
2114 /* Probably unused. */
2115 reg = brw_null_reg();
2116 reg = retype(reg, src.type);
2117 break;
2118
2119 case MRF:
2120 case ATTR:
2121 unreachable("not reached");
2122 }
2123
2124 apply_logical_swizzle(®, inst, i);
2125 src = reg;
2126
2127 /* From IVB PRM, vol4, part3, "General Restrictions on Regioning
2128 * Parameters":
2129 *
2130 * "If ExecSize = Width and HorzStride ≠ 0, VertStride must be set
2131 * to Width * HorzStride."
2132 *
2133 * We can break this rule with DF sources on DF align1
2134 * instructions, because the exec_size would be 4 and width is 4.
2135 * As we know we are not accessing to next GRF, it is safe to
2136 * set vstride to the formula given by the rule itself.
2137 */
2138 if (is_align1_df(inst) && (cvt(inst->exec_size) - 1) == src.width)
2139 src.vstride = src.width + src.hstride;
2140 }
2141
2142 if (inst->is_3src(devinfo)) {
2143 /* 3-src instructions with scalar sources support arbitrary subnr,
2144 * but don't actually use swizzles. Convert swizzle into subnr.
2145 * Skip this for double-precision instructions: RepCtrl=1 is not
2146 * allowed for them and needs special handling.
2147 */
2148 for (int i = 0; i < 3; i++) {
2149 if (inst->src[i].vstride == BRW_VERTICAL_STRIDE_0 &&
2150 type_sz(inst->src[i].type) < 8) {
2151 assert(brw_is_single_value_swizzle(inst->src[i].swizzle));
2152 inst->src[i].subnr += 4 * BRW_GET_SWZ(inst->src[i].swizzle, 0);
2153 }
2154 }
2155 }
2156
2157 dst_reg &dst = inst->dst;
2158 struct brw_reg reg;
2159
2160 switch (inst->dst.file) {
2161 case VGRF:
2162 reg = byte_offset(brw_vec8_grf(dst.nr, 0), dst.offset);
2163 reg.type = dst.type;
2164 reg.writemask = dst.writemask;
2165 break;
2166
2167 case MRF:
2168 reg = byte_offset(brw_message_reg(dst.nr), dst.offset);
2169 assert((reg.nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->ver));
2170 reg.type = dst.type;
2171 reg.writemask = dst.writemask;
2172 break;
2173
2174 case ARF:
2175 case FIXED_GRF:
2176 reg = dst.as_brw_reg();
2177 break;
2178
2179 case BAD_FILE:
2180 reg = brw_null_reg();
2181 reg = retype(reg, dst.type);
2182 break;
2183
2184 case IMM:
2185 case ATTR:
2186 case UNIFORM:
2187 unreachable("not reached");
2188 }
2189
2190 dst = reg;
2191 }
2192 }
2193
2194 static bool
stage_uses_interleaved_attributes(unsigned stage,enum shader_dispatch_mode dispatch_mode)2195 stage_uses_interleaved_attributes(unsigned stage,
2196 enum shader_dispatch_mode dispatch_mode)
2197 {
2198 switch (stage) {
2199 case MESA_SHADER_TESS_EVAL:
2200 return true;
2201 case MESA_SHADER_GEOMETRY:
2202 return dispatch_mode != DISPATCH_MODE_4X2_DUAL_OBJECT;
2203 default:
2204 return false;
2205 }
2206 }
2207
2208 /**
2209 * Get the closest native SIMD width supported by the hardware for instruction
2210 * \p inst. The instruction will be left untouched by
2211 * vec4_visitor::lower_simd_width() if the returned value matches the
2212 * instruction's original execution size.
2213 */
2214 static unsigned
get_lowered_simd_width(const struct intel_device_info * devinfo,enum shader_dispatch_mode dispatch_mode,unsigned stage,const vec4_instruction * inst)2215 get_lowered_simd_width(const struct intel_device_info *devinfo,
2216 enum shader_dispatch_mode dispatch_mode,
2217 unsigned stage, const vec4_instruction *inst)
2218 {
2219 /* Do not split some instructions that require special handling */
2220 switch (inst->opcode) {
2221 case SHADER_OPCODE_GFX4_SCRATCH_READ:
2222 case SHADER_OPCODE_GFX4_SCRATCH_WRITE:
2223 return inst->exec_size;
2224 default:
2225 break;
2226 }
2227
2228 unsigned lowered_width = MIN2(16, inst->exec_size);
2229
2230 /* We need to split some cases of double-precision instructions that write
2231 * 2 registers. We only need to care about this in gfx7 because that is the
2232 * only hardware that implements fp64 in Align16.
2233 */
2234 if (devinfo->ver == 7 && inst->size_written > REG_SIZE) {
2235 /* Align16 8-wide double-precision SEL does not work well. Verified
2236 * empirically.
2237 */
2238 if (inst->opcode == BRW_OPCODE_SEL && type_sz(inst->dst.type) == 8)
2239 lowered_width = MIN2(lowered_width, 4);
2240
2241 /* HSW PRM, 3D Media GPGPU Engine, Region Alignment Rules for Direct
2242 * Register Addressing:
2243 *
2244 * "When destination spans two registers, the source MUST span two
2245 * registers."
2246 */
2247 for (unsigned i = 0; i < 3; i++) {
2248 if (inst->src[i].file == BAD_FILE)
2249 continue;
2250 if (inst->size_read(i) <= REG_SIZE)
2251 lowered_width = MIN2(lowered_width, 4);
2252
2253 /* Interleaved attribute setups use a vertical stride of 0, which
2254 * makes them hit the associated instruction decompression bug in gfx7.
2255 * Split them to prevent this.
2256 */
2257 if (inst->src[i].file == ATTR &&
2258 stage_uses_interleaved_attributes(stage, dispatch_mode))
2259 lowered_width = MIN2(lowered_width, 4);
2260 }
2261 }
2262
2263 /* IvyBridge can manage a maximum of 4 DFs per SIMD4x2 instruction, since
2264 * it doesn't support compression in Align16 mode, no matter if it has
2265 * force_writemask_all enabled or disabled (the latter is affected by the
2266 * compressed instruction bug in gfx7, which is another reason to enforce
2267 * this limit).
2268 */
2269 if (devinfo->verx10 == 70 &&
2270 (get_exec_type_size(inst) == 8 || type_sz(inst->dst.type) == 8))
2271 lowered_width = MIN2(lowered_width, 4);
2272
2273 return lowered_width;
2274 }
2275
2276 static bool
dst_src_regions_overlap(vec4_instruction * inst)2277 dst_src_regions_overlap(vec4_instruction *inst)
2278 {
2279 if (inst->size_written == 0)
2280 return false;
2281
2282 unsigned dst_start = inst->dst.offset;
2283 unsigned dst_end = dst_start + inst->size_written - 1;
2284 for (int i = 0; i < 3; i++) {
2285 if (inst->src[i].file == BAD_FILE)
2286 continue;
2287
2288 if (inst->dst.file != inst->src[i].file ||
2289 inst->dst.nr != inst->src[i].nr)
2290 continue;
2291
2292 unsigned src_start = inst->src[i].offset;
2293 unsigned src_end = src_start + inst->size_read(i) - 1;
2294
2295 if ((dst_start >= src_start && dst_start <= src_end) ||
2296 (dst_end >= src_start && dst_end <= src_end) ||
2297 (dst_start <= src_start && dst_end >= src_end)) {
2298 return true;
2299 }
2300 }
2301
2302 return false;
2303 }
2304
2305 bool
lower_simd_width()2306 vec4_visitor::lower_simd_width()
2307 {
2308 bool progress = false;
2309
2310 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
2311 const unsigned lowered_width =
2312 get_lowered_simd_width(devinfo, prog_data->dispatch_mode, stage, inst);
2313 assert(lowered_width <= inst->exec_size);
2314 if (lowered_width == inst->exec_size)
2315 continue;
2316
2317 /* We need to deal with source / destination overlaps when splitting.
2318 * The hardware supports reading from and writing to the same register
2319 * in the same instruction, but we need to be careful that each split
2320 * instruction we produce does not corrupt the source of the next.
2321 *
2322 * The easiest way to handle this is to make the split instructions write
2323 * to temporaries if there is an src/dst overlap and then move from the
2324 * temporaries to the original destination. We also need to consider
2325 * instructions that do partial writes via align1 opcodes, in which case
2326 * we need to make sure that the we initialize the temporary with the
2327 * value of the instruction's dst.
2328 */
2329 bool needs_temp = dst_src_regions_overlap(inst);
2330 for (unsigned n = 0; n < inst->exec_size / lowered_width; n++) {
2331 unsigned channel_offset = lowered_width * n;
2332
2333 unsigned size_written = lowered_width * type_sz(inst->dst.type);
2334
2335 /* Create the split instruction from the original so that we copy all
2336 * relevant instruction fields, then set the width and calculate the
2337 * new dst/src regions.
2338 */
2339 vec4_instruction *linst = new(mem_ctx) vec4_instruction(*inst);
2340 linst->exec_size = lowered_width;
2341 linst->group = channel_offset;
2342 linst->size_written = size_written;
2343
2344 /* Compute split dst region */
2345 dst_reg dst;
2346 if (needs_temp) {
2347 unsigned num_regs = DIV_ROUND_UP(size_written, REG_SIZE);
2348 dst = retype(dst_reg(VGRF, alloc.allocate(num_regs)),
2349 inst->dst.type);
2350 if (inst->is_align1_partial_write()) {
2351 vec4_instruction *copy = MOV(dst, src_reg(inst->dst));
2352 copy->exec_size = lowered_width;
2353 copy->group = channel_offset;
2354 copy->size_written = size_written;
2355 inst->insert_before(block, copy);
2356 }
2357 } else {
2358 dst = horiz_offset(inst->dst, channel_offset);
2359 }
2360 linst->dst = dst;
2361
2362 /* Compute split source regions */
2363 for (int i = 0; i < 3; i++) {
2364 if (linst->src[i].file == BAD_FILE)
2365 continue;
2366
2367 bool is_interleaved_attr =
2368 linst->src[i].file == ATTR &&
2369 stage_uses_interleaved_attributes(stage,
2370 prog_data->dispatch_mode);
2371
2372 if (!is_uniform(linst->src[i]) && !is_interleaved_attr)
2373 linst->src[i] = horiz_offset(linst->src[i], channel_offset);
2374 }
2375
2376 inst->insert_before(block, linst);
2377
2378 /* If we used a temporary to store the result of the split
2379 * instruction, copy the result to the original destination
2380 */
2381 if (needs_temp) {
2382 vec4_instruction *mov =
2383 MOV(offset(inst->dst, lowered_width, n), src_reg(dst));
2384 mov->exec_size = lowered_width;
2385 mov->group = channel_offset;
2386 mov->size_written = size_written;
2387 mov->predicate = inst->predicate;
2388 inst->insert_before(block, mov);
2389 }
2390 }
2391
2392 inst->remove(block);
2393 progress = true;
2394 }
2395
2396 if (progress)
2397 invalidate_analysis(DEPENDENCY_INSTRUCTIONS | DEPENDENCY_VARIABLES);
2398
2399 return progress;
2400 }
2401
2402 static brw_predicate
scalarize_predicate(brw_predicate predicate,unsigned writemask)2403 scalarize_predicate(brw_predicate predicate, unsigned writemask)
2404 {
2405 if (predicate != BRW_PREDICATE_NORMAL)
2406 return predicate;
2407
2408 switch (writemask) {
2409 case WRITEMASK_X:
2410 return BRW_PREDICATE_ALIGN16_REPLICATE_X;
2411 case WRITEMASK_Y:
2412 return BRW_PREDICATE_ALIGN16_REPLICATE_Y;
2413 case WRITEMASK_Z:
2414 return BRW_PREDICATE_ALIGN16_REPLICATE_Z;
2415 case WRITEMASK_W:
2416 return BRW_PREDICATE_ALIGN16_REPLICATE_W;
2417 default:
2418 unreachable("invalid writemask");
2419 }
2420 }
2421
2422 /* Gfx7 has a hardware decompression bug that we can exploit to represent
2423 * handful of additional swizzles natively.
2424 */
2425 static bool
is_gfx7_supported_64bit_swizzle(vec4_instruction * inst,unsigned arg)2426 is_gfx7_supported_64bit_swizzle(vec4_instruction *inst, unsigned arg)
2427 {
2428 switch (inst->src[arg].swizzle) {
2429 case BRW_SWIZZLE_XXXX:
2430 case BRW_SWIZZLE_YYYY:
2431 case BRW_SWIZZLE_ZZZZ:
2432 case BRW_SWIZZLE_WWWW:
2433 case BRW_SWIZZLE_XYXY:
2434 case BRW_SWIZZLE_YXYX:
2435 case BRW_SWIZZLE_ZWZW:
2436 case BRW_SWIZZLE_WZWZ:
2437 return true;
2438 default:
2439 return false;
2440 }
2441 }
2442
2443 /* 64-bit sources use regions with a width of 2. These 2 elements in each row
2444 * can be addressed using 32-bit swizzles (which is what the hardware supports)
2445 * but it also means that the swizzle we apply on the first two components of a
2446 * dvec4 is coupled with the swizzle we use for the last 2. In other words,
2447 * only some specific swizzle combinations can be natively supported.
2448 *
2449 * FIXME: we can go an step further and implement even more swizzle
2450 * variations using only partial scalarization.
2451 *
2452 * For more details see:
2453 * https://bugs.freedesktop.org/show_bug.cgi?id=92760#c82
2454 */
2455 bool
is_supported_64bit_region(vec4_instruction * inst,unsigned arg)2456 vec4_visitor::is_supported_64bit_region(vec4_instruction *inst, unsigned arg)
2457 {
2458 const src_reg &src = inst->src[arg];
2459 assert(type_sz(src.type) == 8);
2460
2461 /* Uniform regions have a vstride=0. Because we use 2-wide rows with
2462 * 64-bit regions it means that we cannot access components Z/W, so
2463 * return false for any such case. Interleaved attributes will also be
2464 * mapped to GRF registers with a vstride of 0, so apply the same
2465 * treatment.
2466 */
2467 if ((is_uniform(src) ||
2468 (stage_uses_interleaved_attributes(stage, prog_data->dispatch_mode) &&
2469 src.file == ATTR)) &&
2470 (brw_mask_for_swizzle(src.swizzle) & 12))
2471 return false;
2472
2473 switch (src.swizzle) {
2474 case BRW_SWIZZLE_XYZW:
2475 case BRW_SWIZZLE_XXZZ:
2476 case BRW_SWIZZLE_YYWW:
2477 case BRW_SWIZZLE_YXWZ:
2478 return true;
2479 default:
2480 return devinfo->ver == 7 && is_gfx7_supported_64bit_swizzle(inst, arg);
2481 }
2482 }
2483
2484 bool
scalarize_df()2485 vec4_visitor::scalarize_df()
2486 {
2487 bool progress = false;
2488
2489 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
2490 /* Skip DF instructions that operate in Align1 mode */
2491 if (is_align1_df(inst))
2492 continue;
2493
2494 /* Check if this is a double-precision instruction */
2495 bool is_double = type_sz(inst->dst.type) == 8;
2496 for (int arg = 0; !is_double && arg < 3; arg++) {
2497 is_double = inst->src[arg].file != BAD_FILE &&
2498 type_sz(inst->src[arg].type) == 8;
2499 }
2500
2501 if (!is_double)
2502 continue;
2503
2504 /* Skip the lowering for specific regioning scenarios that we can
2505 * support natively.
2506 */
2507 bool skip_lowering = true;
2508
2509 /* XY and ZW writemasks operate in 32-bit, which means that they don't
2510 * have a native 64-bit representation and they should always be split.
2511 */
2512 if (inst->dst.writemask == WRITEMASK_XY ||
2513 inst->dst.writemask == WRITEMASK_ZW) {
2514 skip_lowering = false;
2515 } else {
2516 for (unsigned i = 0; i < 3; i++) {
2517 if (inst->src[i].file == BAD_FILE || type_sz(inst->src[i].type) < 8)
2518 continue;
2519 skip_lowering = skip_lowering && is_supported_64bit_region(inst, i);
2520 }
2521 }
2522
2523 if (skip_lowering)
2524 continue;
2525
2526 /* Generate scalar instructions for each enabled channel */
2527 for (unsigned chan = 0; chan < 4; chan++) {
2528 unsigned chan_mask = 1 << chan;
2529 if (!(inst->dst.writemask & chan_mask))
2530 continue;
2531
2532 vec4_instruction *scalar_inst = new(mem_ctx) vec4_instruction(*inst);
2533
2534 for (unsigned i = 0; i < 3; i++) {
2535 unsigned swz = BRW_GET_SWZ(inst->src[i].swizzle, chan);
2536 scalar_inst->src[i].swizzle = BRW_SWIZZLE4(swz, swz, swz, swz);
2537 }
2538
2539 scalar_inst->dst.writemask = chan_mask;
2540
2541 if (inst->predicate != BRW_PREDICATE_NONE) {
2542 scalar_inst->predicate =
2543 scalarize_predicate(inst->predicate, chan_mask);
2544 }
2545
2546 inst->insert_before(block, scalar_inst);
2547 }
2548
2549 inst->remove(block);
2550 progress = true;
2551 }
2552
2553 if (progress)
2554 invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
2555
2556 return progress;
2557 }
2558
2559 bool
lower_64bit_mad_to_mul_add()2560 vec4_visitor::lower_64bit_mad_to_mul_add()
2561 {
2562 bool progress = false;
2563
2564 foreach_block_and_inst_safe(block, vec4_instruction, inst, cfg) {
2565 if (inst->opcode != BRW_OPCODE_MAD)
2566 continue;
2567
2568 if (type_sz(inst->dst.type) != 8)
2569 continue;
2570
2571 dst_reg mul_dst = dst_reg(this, glsl_type::dvec4_type);
2572
2573 /* Use the copy constructor so we copy all relevant instruction fields
2574 * from the original mad into the add and mul instructions
2575 */
2576 vec4_instruction *mul = new(mem_ctx) vec4_instruction(*inst);
2577 mul->opcode = BRW_OPCODE_MUL;
2578 mul->dst = mul_dst;
2579 mul->src[0] = inst->src[1];
2580 mul->src[1] = inst->src[2];
2581 mul->src[2].file = BAD_FILE;
2582
2583 vec4_instruction *add = new(mem_ctx) vec4_instruction(*inst);
2584 add->opcode = BRW_OPCODE_ADD;
2585 add->src[0] = src_reg(mul_dst);
2586 add->src[1] = inst->src[0];
2587 add->src[2].file = BAD_FILE;
2588
2589 inst->insert_before(block, mul);
2590 inst->insert_before(block, add);
2591 inst->remove(block);
2592
2593 progress = true;
2594 }
2595
2596 if (progress)
2597 invalidate_analysis(DEPENDENCY_INSTRUCTIONS | DEPENDENCY_VARIABLES);
2598
2599 return progress;
2600 }
2601
2602 /* The align16 hardware can only do 32-bit swizzle channels, so we need to
2603 * translate the logical 64-bit swizzle channels that we use in the Vec4 IR
2604 * to 32-bit swizzle channels in hardware registers.
2605 *
2606 * @inst and @arg identify the original vec4 IR source operand we need to
2607 * translate the swizzle for and @hw_reg is the hardware register where we
2608 * will write the hardware swizzle to use.
2609 *
2610 * This pass assumes that Align16/DF instructions have been fully scalarized
2611 * previously so there is just one 64-bit swizzle channel to deal with for any
2612 * given Vec4 IR source.
2613 */
2614 void
apply_logical_swizzle(struct brw_reg * hw_reg,vec4_instruction * inst,int arg)2615 vec4_visitor::apply_logical_swizzle(struct brw_reg *hw_reg,
2616 vec4_instruction *inst, int arg)
2617 {
2618 src_reg reg = inst->src[arg];
2619
2620 if (reg.file == BAD_FILE || reg.file == BRW_IMMEDIATE_VALUE)
2621 return;
2622
2623 /* If this is not a 64-bit operand or this is a scalar instruction we don't
2624 * need to do anything about the swizzles.
2625 */
2626 if(type_sz(reg.type) < 8 || is_align1_df(inst)) {
2627 hw_reg->swizzle = reg.swizzle;
2628 return;
2629 }
2630
2631 /* Take the 64-bit logical swizzle channel and translate it to 32-bit */
2632 assert(brw_is_single_value_swizzle(reg.swizzle) ||
2633 is_supported_64bit_region(inst, arg));
2634
2635 /* Apply the region <2, 2, 1> for GRF or <0, 2, 1> for uniforms, as align16
2636 * HW can only do 32-bit swizzle channels.
2637 */
2638 hw_reg->width = BRW_WIDTH_2;
2639
2640 if (is_supported_64bit_region(inst, arg) &&
2641 !is_gfx7_supported_64bit_swizzle(inst, arg)) {
2642 /* Supported 64-bit swizzles are those such that their first two
2643 * components, when expanded to 32-bit swizzles, match the semantics
2644 * of the original 64-bit swizzle with 2-wide row regioning.
2645 */
2646 unsigned swizzle0 = BRW_GET_SWZ(reg.swizzle, 0);
2647 unsigned swizzle1 = BRW_GET_SWZ(reg.swizzle, 1);
2648 hw_reg->swizzle = BRW_SWIZZLE4(swizzle0 * 2, swizzle0 * 2 + 1,
2649 swizzle1 * 2, swizzle1 * 2 + 1);
2650 } else {
2651 /* If we got here then we have one of the following:
2652 *
2653 * 1. An unsupported swizzle, which should be single-value thanks to the
2654 * scalarization pass.
2655 *
2656 * 2. A gfx7 supported swizzle. These can be single-value or double-value
2657 * swizzles. If the latter, they are never cross-dvec2 channels. For
2658 * these we always need to activate the gfx7 vstride=0 exploit.
2659 */
2660 unsigned swizzle0 = BRW_GET_SWZ(reg.swizzle, 0);
2661 unsigned swizzle1 = BRW_GET_SWZ(reg.swizzle, 1);
2662 assert((swizzle0 < 2) == (swizzle1 < 2));
2663
2664 /* To gain access to Z/W components we need to select the second half
2665 * of the register and then use a X/Y swizzle to select Z/W respectively.
2666 */
2667 if (swizzle0 >= 2) {
2668 *hw_reg = suboffset(*hw_reg, 2);
2669 swizzle0 -= 2;
2670 swizzle1 -= 2;
2671 }
2672
2673 /* All gfx7-specific supported swizzles require the vstride=0 exploit */
2674 if (devinfo->ver == 7 && is_gfx7_supported_64bit_swizzle(inst, arg))
2675 hw_reg->vstride = BRW_VERTICAL_STRIDE_0;
2676
2677 /* Any 64-bit source with an offset at 16B is intended to address the
2678 * second half of a register and needs a vertical stride of 0 so we:
2679 *
2680 * 1. Don't violate register region restrictions.
2681 * 2. Activate the gfx7 instruction decompresion bug exploit when
2682 * execsize > 4
2683 */
2684 if (hw_reg->subnr % REG_SIZE == 16) {
2685 assert(devinfo->ver == 7);
2686 hw_reg->vstride = BRW_VERTICAL_STRIDE_0;
2687 }
2688
2689 hw_reg->swizzle = BRW_SWIZZLE4(swizzle0 * 2, swizzle0 * 2 + 1,
2690 swizzle1 * 2, swizzle1 * 2 + 1);
2691 }
2692 }
2693
2694 void
invalidate_analysis(brw::analysis_dependency_class c)2695 vec4_visitor::invalidate_analysis(brw::analysis_dependency_class c)
2696 {
2697 backend_shader::invalidate_analysis(c);
2698 live_analysis.invalidate(c);
2699 }
2700
2701 bool
run()2702 vec4_visitor::run()
2703 {
2704 if (shader_time_index >= 0)
2705 emit_shader_time_begin();
2706
2707 setup_push_ranges();
2708
2709 if (prog_data->base.zero_push_reg) {
2710 /* push_reg_mask_param is in uint32 params and UNIFORM is in vec4s */
2711 const unsigned mask_param = stage_prog_data->push_reg_mask_param;
2712 src_reg mask = src_reg(dst_reg(UNIFORM, mask_param / 4));
2713 assert(mask_param % 2 == 0); /* Should be 64-bit-aligned */
2714 mask.swizzle = BRW_SWIZZLE4((mask_param + 0) % 4,
2715 (mask_param + 1) % 4,
2716 (mask_param + 0) % 4,
2717 (mask_param + 1) % 4);
2718
2719 emit(VEC4_OPCODE_ZERO_OOB_PUSH_REGS,
2720 dst_reg(VGRF, alloc.allocate(3)), mask);
2721 }
2722
2723 emit_prolog();
2724
2725 emit_nir_code();
2726 if (failed)
2727 return false;
2728 base_ir = NULL;
2729
2730 emit_thread_end();
2731
2732 calculate_cfg();
2733
2734 /* Before any optimization, push array accesses out to scratch
2735 * space where we need them to be. This pass may allocate new
2736 * virtual GRFs, so we want to do it early. It also makes sure
2737 * that we have reladdr computations available for CSE, since we'll
2738 * often do repeated subexpressions for those.
2739 */
2740 move_grf_array_access_to_scratch();
2741 move_uniform_array_access_to_pull_constants();
2742
2743 pack_uniform_registers();
2744 move_push_constants_to_pull_constants();
2745 split_virtual_grfs();
2746
2747 #define OPT(pass, args...) ({ \
2748 pass_num++; \
2749 bool this_progress = pass(args); \
2750 \
2751 if (INTEL_DEBUG(DEBUG_OPTIMIZER) && this_progress) { \
2752 char filename[64]; \
2753 snprintf(filename, 64, "%s-%s-%02d-%02d-" #pass, \
2754 stage_abbrev, nir->info.name, iteration, pass_num); \
2755 \
2756 backend_shader::dump_instructions(filename); \
2757 } \
2758 \
2759 progress = progress || this_progress; \
2760 this_progress; \
2761 })
2762
2763
2764 if (INTEL_DEBUG(DEBUG_OPTIMIZER)) {
2765 char filename[64];
2766 snprintf(filename, 64, "%s-%s-00-00-start",
2767 stage_abbrev, nir->info.name);
2768
2769 backend_shader::dump_instructions(filename);
2770 }
2771
2772 bool progress;
2773 int iteration = 0;
2774 int pass_num = 0;
2775 do {
2776 progress = false;
2777 pass_num = 0;
2778 iteration++;
2779
2780 OPT(opt_predicated_break, this);
2781 OPT(opt_reduce_swizzle);
2782 OPT(dead_code_eliminate);
2783 OPT(dead_control_flow_eliminate, this);
2784 OPT(opt_copy_propagation);
2785 OPT(opt_cmod_propagation);
2786 OPT(opt_cse);
2787 OPT(opt_algebraic);
2788 OPT(opt_register_coalesce);
2789 OPT(eliminate_find_live_channel);
2790 } while (progress);
2791
2792 pass_num = 0;
2793
2794 if (OPT(opt_vector_float)) {
2795 OPT(opt_cse);
2796 OPT(opt_copy_propagation, false);
2797 OPT(opt_copy_propagation, true);
2798 OPT(dead_code_eliminate);
2799 }
2800
2801 if (devinfo->ver <= 5 && OPT(lower_minmax)) {
2802 OPT(opt_cmod_propagation);
2803 OPT(opt_cse);
2804 OPT(opt_copy_propagation);
2805 OPT(dead_code_eliminate);
2806 }
2807
2808 if (OPT(lower_simd_width)) {
2809 OPT(opt_copy_propagation);
2810 OPT(dead_code_eliminate);
2811 }
2812
2813 if (failed)
2814 return false;
2815
2816 OPT(lower_64bit_mad_to_mul_add);
2817
2818 /* Run this before payload setup because tesselation shaders
2819 * rely on it to prevent cross dvec2 regioning on DF attributes
2820 * that are setup so that XY are on the second half of register and
2821 * ZW are in the first half of the next.
2822 */
2823 OPT(scalarize_df);
2824
2825 setup_payload();
2826
2827 if (INTEL_DEBUG(DEBUG_SPILL_VEC4)) {
2828 /* Debug of register spilling: Go spill everything. */
2829 const int grf_count = alloc.count;
2830 float spill_costs[alloc.count];
2831 bool no_spill[alloc.count];
2832 evaluate_spill_costs(spill_costs, no_spill);
2833 for (int i = 0; i < grf_count; i++) {
2834 if (no_spill[i])
2835 continue;
2836 spill_reg(i);
2837 }
2838
2839 /* We want to run this after spilling because 64-bit (un)spills need to
2840 * emit code to shuffle 64-bit data for the 32-bit scratch read/write
2841 * messages that can produce unsupported 64-bit swizzle regions.
2842 */
2843 OPT(scalarize_df);
2844 }
2845
2846 fixup_3src_null_dest();
2847
2848 bool allocated_without_spills = reg_allocate();
2849
2850 if (!allocated_without_spills) {
2851 brw_shader_perf_log(compiler, log_data,
2852 "%s shader triggered register spilling. "
2853 "Try reducing the number of live vec4 values "
2854 "to improve performance.\n",
2855 stage_name);
2856
2857 while (!reg_allocate()) {
2858 if (failed)
2859 return false;
2860 }
2861
2862 /* We want to run this after spilling because 64-bit (un)spills need to
2863 * emit code to shuffle 64-bit data for the 32-bit scratch read/write
2864 * messages that can produce unsupported 64-bit swizzle regions.
2865 */
2866 OPT(scalarize_df);
2867 }
2868
2869 opt_schedule_instructions();
2870
2871 opt_set_dependency_control();
2872
2873 convert_to_hw_regs();
2874
2875 if (last_scratch > 0) {
2876 prog_data->base.total_scratch =
2877 brw_get_scratch_size(last_scratch * REG_SIZE);
2878 }
2879
2880 return !failed;
2881 }
2882
2883 } /* namespace brw */
2884
2885 extern "C" {
2886
2887 const unsigned *
brw_compile_vs(const struct brw_compiler * compiler,void * mem_ctx,struct brw_compile_vs_params * params)2888 brw_compile_vs(const struct brw_compiler *compiler,
2889 void *mem_ctx,
2890 struct brw_compile_vs_params *params)
2891 {
2892 struct nir_shader *nir = params->nir;
2893 const struct brw_vs_prog_key *key = params->key;
2894 struct brw_vs_prog_data *prog_data = params->prog_data;
2895 const bool debug_enabled =
2896 INTEL_DEBUG(params->debug_flag ? params->debug_flag : DEBUG_VS);
2897
2898 prog_data->base.base.stage = MESA_SHADER_VERTEX;
2899
2900 const bool is_scalar = compiler->scalar_stage[MESA_SHADER_VERTEX];
2901 brw_nir_apply_key(nir, compiler, &key->base, 8, is_scalar);
2902
2903 const unsigned *assembly = NULL;
2904
2905 prog_data->inputs_read = nir->info.inputs_read;
2906 prog_data->double_inputs_read = nir->info.vs.double_inputs;
2907
2908 brw_nir_lower_vs_inputs(nir, params->edgeflag_is_last, key->gl_attrib_wa_flags);
2909 brw_nir_lower_vue_outputs(nir);
2910 brw_postprocess_nir(nir, compiler, is_scalar, debug_enabled,
2911 key->base.robust_buffer_access);
2912
2913 prog_data->base.clip_distance_mask =
2914 ((1 << nir->info.clip_distance_array_size) - 1);
2915 prog_data->base.cull_distance_mask =
2916 ((1 << nir->info.cull_distance_array_size) - 1) <<
2917 nir->info.clip_distance_array_size;
2918
2919 unsigned nr_attribute_slots = util_bitcount64(prog_data->inputs_read);
2920
2921 /* gl_VertexID and gl_InstanceID are system values, but arrive via an
2922 * incoming vertex attribute. So, add an extra slot.
2923 */
2924 if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX) ||
2925 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) ||
2926 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) ||
2927 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID)) {
2928 nr_attribute_slots++;
2929 }
2930
2931 /* gl_DrawID and IsIndexedDraw share its very own vec4 */
2932 if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_DRAW_ID) ||
2933 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_IS_INDEXED_DRAW)) {
2934 nr_attribute_slots++;
2935 }
2936
2937 if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_IS_INDEXED_DRAW))
2938 prog_data->uses_is_indexed_draw = true;
2939
2940 if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX))
2941 prog_data->uses_firstvertex = true;
2942
2943 if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE))
2944 prog_data->uses_baseinstance = true;
2945
2946 if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE))
2947 prog_data->uses_vertexid = true;
2948
2949 if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID))
2950 prog_data->uses_instanceid = true;
2951
2952 if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_DRAW_ID))
2953 prog_data->uses_drawid = true;
2954
2955 /* The 3DSTATE_VS documentation lists the lower bound on "Vertex URB Entry
2956 * Read Length" as 1 in vec4 mode, and 0 in SIMD8 mode. Empirically, in
2957 * vec4 mode, the hardware appears to wedge unless we read something.
2958 */
2959 if (is_scalar)
2960 prog_data->base.urb_read_length =
2961 DIV_ROUND_UP(nr_attribute_slots, 2);
2962 else
2963 prog_data->base.urb_read_length =
2964 DIV_ROUND_UP(MAX2(nr_attribute_slots, 1), 2);
2965
2966 prog_data->nr_attribute_slots = nr_attribute_slots;
2967
2968 /* Since vertex shaders reuse the same VUE entry for inputs and outputs
2969 * (overwriting the original contents), we need to make sure the size is
2970 * the larger of the two.
2971 */
2972 const unsigned vue_entries =
2973 MAX2(nr_attribute_slots, (unsigned)prog_data->base.vue_map.num_slots);
2974
2975 if (compiler->devinfo->ver == 6) {
2976 prog_data->base.urb_entry_size = DIV_ROUND_UP(vue_entries, 8);
2977 } else {
2978 prog_data->base.urb_entry_size = DIV_ROUND_UP(vue_entries, 4);
2979 }
2980
2981 if (unlikely(debug_enabled)) {
2982 fprintf(stderr, "VS Output ");
2983 brw_print_vue_map(stderr, &prog_data->base.vue_map, MESA_SHADER_VERTEX);
2984 }
2985
2986 if (is_scalar) {
2987 prog_data->base.dispatch_mode = DISPATCH_MODE_SIMD8;
2988
2989 fs_visitor v(compiler, params->log_data, mem_ctx, &key->base,
2990 &prog_data->base.base, nir, 8,
2991 params->shader_time ? params->shader_time_index : -1,
2992 debug_enabled);
2993 if (!v.run_vs()) {
2994 params->error_str = ralloc_strdup(mem_ctx, v.fail_msg);
2995 return NULL;
2996 }
2997
2998 prog_data->base.base.dispatch_grf_start_reg = v.payload.num_regs;
2999
3000 fs_generator g(compiler, params->log_data, mem_ctx,
3001 &prog_data->base.base, v.runtime_check_aads_emit,
3002 MESA_SHADER_VERTEX);
3003 if (unlikely(debug_enabled)) {
3004 const char *debug_name =
3005 ralloc_asprintf(mem_ctx, "%s vertex shader %s",
3006 nir->info.label ? nir->info.label :
3007 "unnamed",
3008 nir->info.name);
3009
3010 g.enable_debug(debug_name);
3011 }
3012 g.generate_code(v.cfg, 8, v.shader_stats,
3013 v.performance_analysis.require(), params->stats);
3014 g.add_const_data(nir->constant_data, nir->constant_data_size);
3015 assembly = g.get_assembly();
3016 }
3017
3018 if (!assembly) {
3019 prog_data->base.dispatch_mode = DISPATCH_MODE_4X2_DUAL_OBJECT;
3020
3021 vec4_vs_visitor v(compiler, params->log_data, key, prog_data,
3022 nir, mem_ctx,
3023 params->shader_time ? params->shader_time_index : -1,
3024 debug_enabled);
3025 if (!v.run()) {
3026 params->error_str = ralloc_strdup(mem_ctx, v.fail_msg);
3027 return NULL;
3028 }
3029
3030 assembly = brw_vec4_generate_assembly(compiler, params->log_data, mem_ctx,
3031 nir, &prog_data->base,
3032 v.cfg,
3033 v.performance_analysis.require(),
3034 params->stats, debug_enabled);
3035 }
3036
3037 return assembly;
3038 }
3039
3040 } /* extern "C" */
3041