1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include <inttypes.h>
29 #include "nir_search.h"
30 #include "nir_builder.h"
31 #include "nir_worklist.h"
32 #include "util/half_float.h"
33
34 /* This should be the same as nir_search_max_comm_ops in nir_algebraic.py. */
35 #define NIR_SEARCH_MAX_COMM_OPS 8
36
37 struct match_state {
38 bool inexact_match;
39 bool has_exact_alu;
40 uint8_t comm_op_direction;
41 unsigned variables_seen;
42
43 /* Used for running the automaton on newly-constructed instructions. */
44 struct util_dynarray *states;
45 const struct per_op_table *pass_op_table;
46
47 nir_alu_src variables[NIR_SEARCH_MAX_VARIABLES];
48 struct hash_table *range_ht;
49 };
50
51 static bool
52 match_expression(const nir_search_expression *expr, nir_alu_instr *instr,
53 unsigned num_components, const uint8_t *swizzle,
54 struct match_state *state);
55 static bool
56 nir_algebraic_automaton(nir_instr *instr, struct util_dynarray *states,
57 const struct per_op_table *pass_op_table);
58
59 static const uint8_t identity_swizzle[NIR_MAX_VEC_COMPONENTS] =
60 {
61 0, 1, 2, 3,
62 4, 5, 6, 7,
63 8, 9, 10, 11,
64 12, 13, 14, 15,
65 };
66
67 /**
68 * Check if a source produces a value of the given type.
69 *
70 * Used for satisfying 'a@type' constraints.
71 */
72 static bool
src_is_type(nir_src src,nir_alu_type type)73 src_is_type(nir_src src, nir_alu_type type)
74 {
75 assert(type != nir_type_invalid);
76
77 if (!src.is_ssa)
78 return false;
79
80 if (src.ssa->parent_instr->type == nir_instr_type_alu) {
81 nir_alu_instr *src_alu = nir_instr_as_alu(src.ssa->parent_instr);
82 nir_alu_type output_type = nir_op_infos[src_alu->op].output_type;
83
84 if (type == nir_type_bool) {
85 switch (src_alu->op) {
86 case nir_op_iand:
87 case nir_op_ior:
88 case nir_op_ixor:
89 return src_is_type(src_alu->src[0].src, nir_type_bool) &&
90 src_is_type(src_alu->src[1].src, nir_type_bool);
91 case nir_op_inot:
92 return src_is_type(src_alu->src[0].src, nir_type_bool);
93 default:
94 break;
95 }
96 }
97
98 return nir_alu_type_get_base_type(output_type) == type;
99 } else if (src.ssa->parent_instr->type == nir_instr_type_intrinsic) {
100 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(src.ssa->parent_instr);
101
102 if (type == nir_type_bool) {
103 return intr->intrinsic == nir_intrinsic_load_front_face ||
104 intr->intrinsic == nir_intrinsic_load_helper_invocation;
105 }
106 }
107
108 /* don't know */
109 return false;
110 }
111
112 static bool
nir_op_matches_search_op(nir_op nop,uint16_t sop)113 nir_op_matches_search_op(nir_op nop, uint16_t sop)
114 {
115 if (sop <= nir_last_opcode)
116 return nop == sop;
117
118 #define MATCH_FCONV_CASE(op) \
119 case nir_search_op_##op: \
120 return nop == nir_op_##op##16 || \
121 nop == nir_op_##op##32 || \
122 nop == nir_op_##op##64;
123
124 #define MATCH_ICONV_CASE(op) \
125 case nir_search_op_##op: \
126 return nop == nir_op_##op##8 || \
127 nop == nir_op_##op##16 || \
128 nop == nir_op_##op##32 || \
129 nop == nir_op_##op##64;
130
131 #define MATCH_BCONV_CASE(op) \
132 case nir_search_op_##op: \
133 return nop == nir_op_##op##1 || \
134 nop == nir_op_##op##32;
135
136 switch (sop) {
137 MATCH_FCONV_CASE(i2f)
138 MATCH_FCONV_CASE(u2f)
139 MATCH_FCONV_CASE(f2f)
140 MATCH_ICONV_CASE(f2u)
141 MATCH_ICONV_CASE(f2i)
142 MATCH_ICONV_CASE(u2u)
143 MATCH_ICONV_CASE(i2i)
144 MATCH_FCONV_CASE(b2f)
145 MATCH_ICONV_CASE(b2i)
146 MATCH_BCONV_CASE(i2b)
147 MATCH_BCONV_CASE(f2b)
148 default:
149 unreachable("Invalid nir_search_op");
150 }
151
152 #undef MATCH_FCONV_CASE
153 #undef MATCH_ICONV_CASE
154 #undef MATCH_BCONV_CASE
155 }
156
157 uint16_t
nir_search_op_for_nir_op(nir_op nop)158 nir_search_op_for_nir_op(nir_op nop)
159 {
160 #define MATCH_FCONV_CASE(op) \
161 case nir_op_##op##16: \
162 case nir_op_##op##32: \
163 case nir_op_##op##64: \
164 return nir_search_op_##op;
165
166 #define MATCH_ICONV_CASE(op) \
167 case nir_op_##op##8: \
168 case nir_op_##op##16: \
169 case nir_op_##op##32: \
170 case nir_op_##op##64: \
171 return nir_search_op_##op;
172
173 #define MATCH_BCONV_CASE(op) \
174 case nir_op_##op##1: \
175 case nir_op_##op##32: \
176 return nir_search_op_##op;
177
178
179 switch (nop) {
180 MATCH_FCONV_CASE(i2f)
181 MATCH_FCONV_CASE(u2f)
182 MATCH_FCONV_CASE(f2f)
183 MATCH_ICONV_CASE(f2u)
184 MATCH_ICONV_CASE(f2i)
185 MATCH_ICONV_CASE(u2u)
186 MATCH_ICONV_CASE(i2i)
187 MATCH_FCONV_CASE(b2f)
188 MATCH_ICONV_CASE(b2i)
189 MATCH_BCONV_CASE(i2b)
190 MATCH_BCONV_CASE(f2b)
191 default:
192 return nop;
193 }
194
195 #undef MATCH_FCONV_CASE
196 #undef MATCH_ICONV_CASE
197 #undef MATCH_BCONV_CASE
198 }
199
200 static nir_op
nir_op_for_search_op(uint16_t sop,unsigned bit_size)201 nir_op_for_search_op(uint16_t sop, unsigned bit_size)
202 {
203 if (sop <= nir_last_opcode)
204 return sop;
205
206 #define RET_FCONV_CASE(op) \
207 case nir_search_op_##op: \
208 switch (bit_size) { \
209 case 16: return nir_op_##op##16; \
210 case 32: return nir_op_##op##32; \
211 case 64: return nir_op_##op##64; \
212 default: unreachable("Invalid bit size"); \
213 }
214
215 #define RET_ICONV_CASE(op) \
216 case nir_search_op_##op: \
217 switch (bit_size) { \
218 case 8: return nir_op_##op##8; \
219 case 16: return nir_op_##op##16; \
220 case 32: return nir_op_##op##32; \
221 case 64: return nir_op_##op##64; \
222 default: unreachable("Invalid bit size"); \
223 }
224
225 #define RET_BCONV_CASE(op) \
226 case nir_search_op_##op: \
227 switch (bit_size) { \
228 case 1: return nir_op_##op##1; \
229 case 32: return nir_op_##op##32; \
230 default: unreachable("Invalid bit size"); \
231 }
232
233 switch (sop) {
234 RET_FCONV_CASE(i2f)
235 RET_FCONV_CASE(u2f)
236 RET_FCONV_CASE(f2f)
237 RET_ICONV_CASE(f2u)
238 RET_ICONV_CASE(f2i)
239 RET_ICONV_CASE(u2u)
240 RET_ICONV_CASE(i2i)
241 RET_FCONV_CASE(b2f)
242 RET_ICONV_CASE(b2i)
243 RET_BCONV_CASE(i2b)
244 RET_BCONV_CASE(f2b)
245 default:
246 unreachable("Invalid nir_search_op");
247 }
248
249 #undef RET_FCONV_CASE
250 #undef RET_ICONV_CASE
251 #undef RET_BCONV_CASE
252 }
253
254 static bool
match_value(const nir_search_value * value,nir_alu_instr * instr,unsigned src,unsigned num_components,const uint8_t * swizzle,struct match_state * state)255 match_value(const nir_search_value *value, nir_alu_instr *instr, unsigned src,
256 unsigned num_components, const uint8_t *swizzle,
257 struct match_state *state)
258 {
259 uint8_t new_swizzle[NIR_MAX_VEC_COMPONENTS];
260
261 /* Searching only works on SSA values because, if it's not SSA, we can't
262 * know if the value changed between one instance of that value in the
263 * expression and another. Also, the replace operation will place reads of
264 * that value right before the last instruction in the expression we're
265 * replacing so those reads will happen after the original reads and may
266 * not be valid if they're register reads.
267 */
268 assert(instr->src[src].src.is_ssa);
269
270 /* If the source is an explicitly sized source, then we need to reset
271 * both the number of components and the swizzle.
272 */
273 if (nir_op_infos[instr->op].input_sizes[src] != 0) {
274 num_components = nir_op_infos[instr->op].input_sizes[src];
275 swizzle = identity_swizzle;
276 }
277
278 for (unsigned i = 0; i < num_components; ++i)
279 new_swizzle[i] = instr->src[src].swizzle[swizzle[i]];
280
281 /* If the value has a specific bit size and it doesn't match, bail */
282 if (value->bit_size > 0 &&
283 nir_src_bit_size(instr->src[src].src) != value->bit_size)
284 return false;
285
286 switch (value->type) {
287 case nir_search_value_expression:
288 if (instr->src[src].src.ssa->parent_instr->type != nir_instr_type_alu)
289 return false;
290
291 return match_expression(nir_search_value_as_expression(value),
292 nir_instr_as_alu(instr->src[src].src.ssa->parent_instr),
293 num_components, new_swizzle, state);
294
295 case nir_search_value_variable: {
296 nir_search_variable *var = nir_search_value_as_variable(value);
297 assert(var->variable < NIR_SEARCH_MAX_VARIABLES);
298
299 if (state->variables_seen & (1 << var->variable)) {
300 if (state->variables[var->variable].src.ssa != instr->src[src].src.ssa)
301 return false;
302
303 assert(!instr->src[src].abs && !instr->src[src].negate);
304
305 for (unsigned i = 0; i < num_components; ++i) {
306 if (state->variables[var->variable].swizzle[i] != new_swizzle[i])
307 return false;
308 }
309
310 return true;
311 } else {
312 if (var->is_constant &&
313 instr->src[src].src.ssa->parent_instr->type != nir_instr_type_load_const)
314 return false;
315
316 if (var->cond && !var->cond(state->range_ht, instr,
317 src, num_components, new_swizzle))
318 return false;
319
320 if (var->type != nir_type_invalid &&
321 !src_is_type(instr->src[src].src, var->type))
322 return false;
323
324 state->variables_seen |= (1 << var->variable);
325 state->variables[var->variable].src = instr->src[src].src;
326 state->variables[var->variable].abs = false;
327 state->variables[var->variable].negate = false;
328
329 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i) {
330 if (i < num_components)
331 state->variables[var->variable].swizzle[i] = new_swizzle[i];
332 else
333 state->variables[var->variable].swizzle[i] = 0;
334 }
335
336 return true;
337 }
338 }
339
340 case nir_search_value_constant: {
341 nir_search_constant *const_val = nir_search_value_as_constant(value);
342
343 if (!nir_src_is_const(instr->src[src].src))
344 return false;
345
346 switch (const_val->type) {
347 case nir_type_float: {
348 nir_load_const_instr *const load =
349 nir_instr_as_load_const(instr->src[src].src.ssa->parent_instr);
350
351 /* There are 8-bit and 1-bit integer types, but there are no 8-bit or
352 * 1-bit float types. This prevents potential assertion failures in
353 * nir_src_comp_as_float.
354 */
355 if (load->def.bit_size < 16)
356 return false;
357
358 for (unsigned i = 0; i < num_components; ++i) {
359 double val = nir_src_comp_as_float(instr->src[src].src,
360 new_swizzle[i]);
361 if (val != const_val->data.d)
362 return false;
363 }
364 return true;
365 }
366
367 case nir_type_int:
368 case nir_type_uint:
369 case nir_type_bool: {
370 unsigned bit_size = nir_src_bit_size(instr->src[src].src);
371 uint64_t mask = u_uintN_max(bit_size);
372 for (unsigned i = 0; i < num_components; ++i) {
373 uint64_t val = nir_src_comp_as_uint(instr->src[src].src,
374 new_swizzle[i]);
375 if ((val & mask) != (const_val->data.u & mask))
376 return false;
377 }
378 return true;
379 }
380
381 default:
382 unreachable("Invalid alu source type");
383 }
384 }
385
386 default:
387 unreachable("Invalid search value type");
388 }
389 }
390
391 static bool
match_expression(const nir_search_expression * expr,nir_alu_instr * instr,unsigned num_components,const uint8_t * swizzle,struct match_state * state)392 match_expression(const nir_search_expression *expr, nir_alu_instr *instr,
393 unsigned num_components, const uint8_t *swizzle,
394 struct match_state *state)
395 {
396 if (expr->cond && !expr->cond(instr))
397 return false;
398
399 if (!nir_op_matches_search_op(instr->op, expr->opcode))
400 return false;
401
402 assert(instr->dest.dest.is_ssa);
403
404 if (expr->value.bit_size > 0 &&
405 instr->dest.dest.ssa.bit_size != expr->value.bit_size)
406 return false;
407
408 state->inexact_match = expr->inexact || state->inexact_match;
409 state->has_exact_alu = instr->exact || state->has_exact_alu;
410 if (state->inexact_match && state->has_exact_alu)
411 return false;
412
413 assert(!instr->dest.saturate);
414 assert(nir_op_infos[instr->op].num_inputs > 0);
415
416 /* If we have an explicitly sized destination, we can only handle the
417 * identity swizzle. While dot(vec3(a, b, c).zxy) is a valid
418 * expression, we don't have the information right now to propagate that
419 * swizzle through. We can only properly propagate swizzles if the
420 * instruction is vectorized.
421 */
422 if (nir_op_infos[instr->op].output_size != 0) {
423 for (unsigned i = 0; i < num_components; i++) {
424 if (swizzle[i] != i)
425 return false;
426 }
427 }
428
429 /* If this is a commutative expression and it's one of the first few, look
430 * up its direction for the current search operation. We'll use that value
431 * to possibly flip the sources for the match.
432 */
433 unsigned comm_op_flip =
434 (expr->comm_expr_idx >= 0 &&
435 expr->comm_expr_idx < NIR_SEARCH_MAX_COMM_OPS) ?
436 ((state->comm_op_direction >> expr->comm_expr_idx) & 1) : 0;
437
438 bool matched = true;
439 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
440 /* 2src_commutative instructions that have 3 sources are only commutative
441 * in the first two sources. Source 2 is always source 2.
442 */
443 if (!match_value(expr->srcs[i], instr,
444 i < 2 ? i ^ comm_op_flip : i,
445 num_components, swizzle, state)) {
446 matched = false;
447 break;
448 }
449 }
450
451 return matched;
452 }
453
454 static unsigned
replace_bitsize(const nir_search_value * value,unsigned search_bitsize,struct match_state * state)455 replace_bitsize(const nir_search_value *value, unsigned search_bitsize,
456 struct match_state *state)
457 {
458 if (value->bit_size > 0)
459 return value->bit_size;
460 if (value->bit_size < 0)
461 return nir_src_bit_size(state->variables[-value->bit_size - 1].src);
462 return search_bitsize;
463 }
464
465 static nir_alu_src
construct_value(nir_builder * build,const nir_search_value * value,unsigned num_components,unsigned search_bitsize,struct match_state * state,nir_instr * instr)466 construct_value(nir_builder *build,
467 const nir_search_value *value,
468 unsigned num_components, unsigned search_bitsize,
469 struct match_state *state,
470 nir_instr *instr)
471 {
472 switch (value->type) {
473 case nir_search_value_expression: {
474 const nir_search_expression *expr = nir_search_value_as_expression(value);
475 unsigned dst_bit_size = replace_bitsize(value, search_bitsize, state);
476 nir_op op = nir_op_for_search_op(expr->opcode, dst_bit_size);
477
478 if (nir_op_infos[op].output_size != 0)
479 num_components = nir_op_infos[op].output_size;
480
481 nir_alu_instr *alu = nir_alu_instr_create(build->shader, op);
482 nir_ssa_dest_init(&alu->instr, &alu->dest.dest, num_components,
483 dst_bit_size, NULL);
484 alu->dest.write_mask = (1 << num_components) - 1;
485 alu->dest.saturate = false;
486
487 /* We have no way of knowing what values in a given search expression
488 * map to a particular replacement value. Therefore, if the
489 * expression we are replacing has any exact values, the entire
490 * replacement should be exact.
491 */
492 alu->exact = state->has_exact_alu || expr->exact;
493
494 for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {
495 /* If the source is an explicitly sized source, then we need to reset
496 * the number of components to match.
497 */
498 if (nir_op_infos[alu->op].input_sizes[i] != 0)
499 num_components = nir_op_infos[alu->op].input_sizes[i];
500
501 alu->src[i] = construct_value(build, expr->srcs[i],
502 num_components, search_bitsize,
503 state, instr);
504 }
505
506 nir_builder_instr_insert(build, &alu->instr);
507
508 assert(alu->dest.dest.ssa.index ==
509 util_dynarray_num_elements(state->states, uint16_t));
510 util_dynarray_append(state->states, uint16_t, 0);
511 nir_algebraic_automaton(&alu->instr, state->states, state->pass_op_table);
512
513 nir_alu_src val;
514 val.src = nir_src_for_ssa(&alu->dest.dest.ssa);
515 val.negate = false;
516 val.abs = false,
517 memcpy(val.swizzle, identity_swizzle, sizeof val.swizzle);
518
519 return val;
520 }
521
522 case nir_search_value_variable: {
523 const nir_search_variable *var = nir_search_value_as_variable(value);
524 assert(state->variables_seen & (1 << var->variable));
525
526 nir_alu_src val = { NIR_SRC_INIT };
527 nir_alu_src_copy(&val, &state->variables[var->variable]);
528 assert(!var->is_constant);
529
530 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
531 val.swizzle[i] = state->variables[var->variable].swizzle[var->swizzle[i]];
532
533 return val;
534 }
535
536 case nir_search_value_constant: {
537 const nir_search_constant *c = nir_search_value_as_constant(value);
538 unsigned bit_size = replace_bitsize(value, search_bitsize, state);
539
540 nir_ssa_def *cval;
541 switch (c->type) {
542 case nir_type_float:
543 cval = nir_imm_floatN_t(build, c->data.d, bit_size);
544 break;
545
546 case nir_type_int:
547 case nir_type_uint:
548 cval = nir_imm_intN_t(build, c->data.i, bit_size);
549 break;
550
551 case nir_type_bool:
552 cval = nir_imm_boolN_t(build, c->data.u, bit_size);
553 break;
554
555 default:
556 unreachable("Invalid alu source type");
557 }
558
559 assert(cval->index ==
560 util_dynarray_num_elements(state->states, uint16_t));
561 util_dynarray_append(state->states, uint16_t, 0);
562 nir_algebraic_automaton(cval->parent_instr, state->states,
563 state->pass_op_table);
564
565 nir_alu_src val;
566 val.src = nir_src_for_ssa(cval);
567 val.negate = false;
568 val.abs = false,
569 memset(val.swizzle, 0, sizeof val.swizzle);
570
571 return val;
572 }
573
574 default:
575 unreachable("Invalid search value type");
576 }
577 }
578
dump_value(const nir_search_value * val)579 UNUSED static void dump_value(const nir_search_value *val)
580 {
581 switch (val->type) {
582 case nir_search_value_constant: {
583 const nir_search_constant *sconst = nir_search_value_as_constant(val);
584 switch (sconst->type) {
585 case nir_type_float:
586 fprintf(stderr, "%f", sconst->data.d);
587 break;
588 case nir_type_int:
589 fprintf(stderr, "%"PRId64, sconst->data.i);
590 break;
591 case nir_type_uint:
592 fprintf(stderr, "0x%"PRIx64, sconst->data.u);
593 break;
594 case nir_type_bool:
595 fprintf(stderr, "%s", sconst->data.u != 0 ? "True" : "False");
596 break;
597 default:
598 unreachable("bad const type");
599 }
600 break;
601 }
602
603 case nir_search_value_variable: {
604 const nir_search_variable *var = nir_search_value_as_variable(val);
605 if (var->is_constant)
606 fprintf(stderr, "#");
607 fprintf(stderr, "%c", var->variable + 'a');
608 break;
609 }
610
611 case nir_search_value_expression: {
612 const nir_search_expression *expr = nir_search_value_as_expression(val);
613 fprintf(stderr, "(");
614 if (expr->inexact)
615 fprintf(stderr, "~");
616 switch (expr->opcode) {
617 #define CASE(n) \
618 case nir_search_op_##n: fprintf(stderr, #n); break;
619 CASE(f2b)
620 CASE(b2f)
621 CASE(b2i)
622 CASE(i2b)
623 CASE(i2i)
624 CASE(f2i)
625 CASE(i2f)
626 #undef CASE
627 default:
628 fprintf(stderr, "%s", nir_op_infos[expr->opcode].name);
629 }
630
631 unsigned num_srcs = 1;
632 if (expr->opcode <= nir_last_opcode)
633 num_srcs = nir_op_infos[expr->opcode].num_inputs;
634
635 for (unsigned i = 0; i < num_srcs; i++) {
636 fprintf(stderr, " ");
637 dump_value(expr->srcs[i]);
638 }
639
640 fprintf(stderr, ")");
641 break;
642 }
643 }
644
645 if (val->bit_size > 0)
646 fprintf(stderr, "@%d", val->bit_size);
647 }
648
649 static void
add_uses_to_worklist(nir_instr * instr,nir_instr_worklist * worklist,struct util_dynarray * states,const struct per_op_table * pass_op_table)650 add_uses_to_worklist(nir_instr *instr,
651 nir_instr_worklist *worklist,
652 struct util_dynarray *states,
653 const struct per_op_table *pass_op_table)
654 {
655 nir_ssa_def *def = nir_instr_ssa_def(instr);
656
657 nir_foreach_use_safe(use_src, def) {
658 if (nir_algebraic_automaton(use_src->parent_instr, states, pass_op_table))
659 nir_instr_worklist_push_tail(worklist, use_src->parent_instr);
660 }
661 }
662
663 static void
nir_algebraic_update_automaton(nir_instr * new_instr,nir_instr_worklist * algebraic_worklist,struct util_dynarray * states,const struct per_op_table * pass_op_table)664 nir_algebraic_update_automaton(nir_instr *new_instr,
665 nir_instr_worklist *algebraic_worklist,
666 struct util_dynarray *states,
667 const struct per_op_table *pass_op_table)
668 {
669
670 nir_instr_worklist *automaton_worklist = nir_instr_worklist_create();
671
672 /* Walk through the tree of uses of our new instruction's SSA value,
673 * recursively updating the automaton state until it stabilizes.
674 */
675 add_uses_to_worklist(new_instr, automaton_worklist, states, pass_op_table);
676
677 nir_instr *instr;
678 while ((instr = nir_instr_worklist_pop_head(automaton_worklist))) {
679 nir_instr_worklist_push_tail(algebraic_worklist, instr);
680 add_uses_to_worklist(instr, automaton_worklist, states, pass_op_table);
681 }
682
683 nir_instr_worklist_destroy(automaton_worklist);
684 }
685
686 nir_ssa_def *
nir_replace_instr(nir_builder * build,nir_alu_instr * instr,struct hash_table * range_ht,struct util_dynarray * states,const struct per_op_table * pass_op_table,const nir_search_expression * search,const nir_search_value * replace,nir_instr_worklist * algebraic_worklist)687 nir_replace_instr(nir_builder *build, nir_alu_instr *instr,
688 struct hash_table *range_ht,
689 struct util_dynarray *states,
690 const struct per_op_table *pass_op_table,
691 const nir_search_expression *search,
692 const nir_search_value *replace,
693 nir_instr_worklist *algebraic_worklist)
694 {
695 uint8_t swizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
696
697 for (unsigned i = 0; i < instr->dest.dest.ssa.num_components; ++i)
698 swizzle[i] = i;
699
700 assert(instr->dest.dest.is_ssa);
701
702 struct match_state state;
703 state.inexact_match = false;
704 state.has_exact_alu = false;
705 state.range_ht = range_ht;
706 state.pass_op_table = pass_op_table;
707
708 STATIC_ASSERT(sizeof(state.comm_op_direction) * 8 >= NIR_SEARCH_MAX_COMM_OPS);
709
710 unsigned comm_expr_combinations =
711 1 << MIN2(search->comm_exprs, NIR_SEARCH_MAX_COMM_OPS);
712
713 bool found = false;
714 for (unsigned comb = 0; comb < comm_expr_combinations; comb++) {
715 /* The bitfield of directions is just the current iteration. Hooray for
716 * binary.
717 */
718 state.comm_op_direction = comb;
719 state.variables_seen = 0;
720
721 if (match_expression(search, instr,
722 instr->dest.dest.ssa.num_components,
723 swizzle, &state)) {
724 found = true;
725 break;
726 }
727 }
728 if (!found)
729 return NULL;
730
731 #if 0
732 fprintf(stderr, "matched: ");
733 dump_value(&search->value);
734 fprintf(stderr, " -> ");
735 dump_value(replace);
736 fprintf(stderr, " ssa_%d\n", instr->dest.dest.ssa.index);
737 #endif
738
739 /* If the instruction at the root of the expression tree being replaced is
740 * a unary operation, insert the replacement instructions at the location
741 * of the source of the unary operation. Otherwise, insert the replacement
742 * instructions at the location of the expression tree root.
743 *
744 * For the unary operation case, this is done to prevent some spurious code
745 * motion that can dramatically extend live ranges. Imagine an expression
746 * like -(A+B) where the addtion and the negation are separated by flow
747 * control and thousands of instructions. If this expression is replaced
748 * with -A+-B, inserting the new instructions at the site of the negation
749 * could extend the live range of A and B dramtically. This could increase
750 * register pressure and cause spilling.
751 *
752 * It may well be that moving instructions around is a good thing, but
753 * keeping algebraic optimizations and code motion optimizations separate
754 * seems safest.
755 */
756 nir_alu_instr *const src_instr = nir_src_as_alu_instr(instr->src[0].src);
757 if (src_instr != NULL &&
758 (instr->op == nir_op_fneg || instr->op == nir_op_fabs ||
759 instr->op == nir_op_ineg || instr->op == nir_op_iabs ||
760 instr->op == nir_op_inot)) {
761 /* Insert new instructions *after*. Otherwise a hypothetical
762 * replacement fneg(X) -> fabs(X) would insert the fabs() instruction
763 * before X! This can also occur for things like fneg(X.wzyx) -> X.wzyx
764 * in vector mode. A move instruction to handle the swizzle will get
765 * inserted before X.
766 *
767 * This manifested in a single OpenGL ES 2.0 CTS vertex shader test on
768 * older Intel GPU that use vector-mode vertex processing.
769 */
770 build->cursor = nir_after_instr(&src_instr->instr);
771 } else {
772 build->cursor = nir_before_instr(&instr->instr);
773 }
774
775 state.states = states;
776
777 nir_alu_src val = construct_value(build, replace,
778 instr->dest.dest.ssa.num_components,
779 instr->dest.dest.ssa.bit_size,
780 &state, &instr->instr);
781
782 /* Note that NIR builder will elide the MOV if it's a no-op, which may
783 * allow more work to be done in a single pass through algebraic.
784 */
785 nir_ssa_def *ssa_val =
786 nir_mov_alu(build, val, instr->dest.dest.ssa.num_components);
787 if (ssa_val->index == util_dynarray_num_elements(states, uint16_t)) {
788 util_dynarray_append(states, uint16_t, 0);
789 nir_algebraic_automaton(ssa_val->parent_instr, states, pass_op_table);
790 }
791
792 /* Rewrite the uses of the old SSA value to the new one, and recurse
793 * through the uses updating the automaton's state.
794 */
795 nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, ssa_val);
796 nir_algebraic_update_automaton(ssa_val->parent_instr, algebraic_worklist,
797 states, pass_op_table);
798
799 /* Nothing uses the instr any more, so drop it out of the program. Note
800 * that the instr may be in the worklist still, so we can't free it
801 * directly.
802 */
803 nir_instr_remove(&instr->instr);
804
805 return ssa_val;
806 }
807
808 static bool
nir_algebraic_automaton(nir_instr * instr,struct util_dynarray * states,const struct per_op_table * pass_op_table)809 nir_algebraic_automaton(nir_instr *instr, struct util_dynarray *states,
810 const struct per_op_table *pass_op_table)
811 {
812 switch (instr->type) {
813 case nir_instr_type_alu: {
814 nir_alu_instr *alu = nir_instr_as_alu(instr);
815 nir_op op = alu->op;
816 uint16_t search_op = nir_search_op_for_nir_op(op);
817 const struct per_op_table *tbl = &pass_op_table[search_op];
818 if (tbl->num_filtered_states == 0)
819 return false;
820
821 /* Calculate the index into the transition table. Note the index
822 * calculated must match the iteration order of Python's
823 * itertools.product(), which was used to emit the transition
824 * table.
825 */
826 unsigned index = 0;
827 for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {
828 index *= tbl->num_filtered_states;
829 index += tbl->filter[*util_dynarray_element(states, uint16_t,
830 alu->src[i].src.ssa->index)];
831 }
832
833 uint16_t *state = util_dynarray_element(states, uint16_t,
834 alu->dest.dest.ssa.index);
835 if (*state != tbl->table[index]) {
836 *state = tbl->table[index];
837 return true;
838 }
839 return false;
840 }
841
842 case nir_instr_type_load_const: {
843 nir_load_const_instr *load_const = nir_instr_as_load_const(instr);
844 uint16_t *state = util_dynarray_element(states, uint16_t,
845 load_const->def.index);
846 if (*state != CONST_STATE) {
847 *state = CONST_STATE;
848 return true;
849 }
850 return false;
851 }
852
853 default:
854 return false;
855 }
856 }
857
858 static bool
nir_algebraic_instr(nir_builder * build,nir_instr * instr,struct hash_table * range_ht,const bool * condition_flags,const struct transform ** transforms,const uint16_t * transform_counts,struct util_dynarray * states,const struct per_op_table * pass_op_table,nir_instr_worklist * worklist)859 nir_algebraic_instr(nir_builder *build, nir_instr *instr,
860 struct hash_table *range_ht,
861 const bool *condition_flags,
862 const struct transform **transforms,
863 const uint16_t *transform_counts,
864 struct util_dynarray *states,
865 const struct per_op_table *pass_op_table,
866 nir_instr_worklist *worklist)
867 {
868
869 if (instr->type != nir_instr_type_alu)
870 return false;
871
872 nir_alu_instr *alu = nir_instr_as_alu(instr);
873 if (!alu->dest.dest.is_ssa)
874 return false;
875
876 unsigned bit_size = alu->dest.dest.ssa.bit_size;
877 const unsigned execution_mode =
878 build->shader->info.float_controls_execution_mode;
879 const bool ignore_inexact =
880 nir_is_float_control_signed_zero_inf_nan_preserve(execution_mode, bit_size) ||
881 nir_is_denorm_flush_to_zero(execution_mode, bit_size);
882
883 int xform_idx = *util_dynarray_element(states, uint16_t,
884 alu->dest.dest.ssa.index);
885 for (uint16_t i = 0; i < transform_counts[xform_idx]; i++) {
886 const struct transform *xform = &transforms[xform_idx][i];
887 if (condition_flags[xform->condition_offset] &&
888 !(xform->search->inexact && ignore_inexact) &&
889 nir_replace_instr(build, alu, range_ht, states, pass_op_table,
890 xform->search, xform->replace, worklist)) {
891 _mesa_hash_table_clear(range_ht, NULL);
892 return true;
893 }
894 }
895
896 return false;
897 }
898
899 bool
nir_algebraic_impl(nir_function_impl * impl,const bool * condition_flags,const struct transform ** transforms,const uint16_t * transform_counts,const struct per_op_table * pass_op_table)900 nir_algebraic_impl(nir_function_impl *impl,
901 const bool *condition_flags,
902 const struct transform **transforms,
903 const uint16_t *transform_counts,
904 const struct per_op_table *pass_op_table)
905 {
906 bool progress = false;
907
908 nir_builder build;
909 nir_builder_init(&build, impl);
910
911 /* Note: it's important here that we're allocating a zeroed array, since
912 * state 0 is the default state, which means we don't have to visit
913 * anything other than constants and ALU instructions.
914 */
915 struct util_dynarray states = {0};
916 if (!util_dynarray_resize(&states, uint16_t, impl->ssa_alloc)) {
917 nir_metadata_preserve(impl, nir_metadata_all);
918 return false;
919 }
920 memset(states.data, 0, states.size);
921
922 struct hash_table *range_ht = _mesa_pointer_hash_table_create(NULL);
923
924 nir_instr_worklist *worklist = nir_instr_worklist_create();
925
926 /* Walk top-to-bottom setting up the automaton state. */
927 nir_foreach_block(block, impl) {
928 nir_foreach_instr(instr, block) {
929 nir_algebraic_automaton(instr, &states, pass_op_table);
930 }
931 }
932
933 /* Put our instrs in the worklist such that we're popping the last instr
934 * first. This will encourage us to match the biggest source patterns when
935 * possible.
936 */
937 nir_foreach_block_reverse(block, impl) {
938 nir_foreach_instr_reverse(instr, block) {
939 if (instr->type == nir_instr_type_alu)
940 nir_instr_worklist_push_tail(worklist, instr);
941 }
942 }
943
944 nir_instr *instr;
945 while ((instr = nir_instr_worklist_pop_head(worklist))) {
946 /* The worklist can have an instr pushed to it multiple times if it was
947 * the src of multiple instrs that also got optimized, so make sure that
948 * we don't try to re-optimize an instr we already handled.
949 */
950 if (exec_node_is_tail_sentinel(&instr->node))
951 continue;
952
953 progress |= nir_algebraic_instr(&build, instr,
954 range_ht, condition_flags,
955 transforms, transform_counts, &states,
956 pass_op_table, worklist);
957 }
958
959 nir_instr_worklist_destroy(worklist);
960 ralloc_free(range_ht);
961 util_dynarray_fini(&states);
962
963 if (progress) {
964 nir_metadata_preserve(impl, nir_metadata_block_index |
965 nir_metadata_dominance);
966 } else {
967 nir_metadata_preserve(impl, nir_metadata_all);
968 }
969
970 return progress;
971 }
972