1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Connor Abbott (cwabbott0@gmail.com)
25  *
26  */
27 
28 #include "nir.h"
29 #include "c11/threads.h"
30 #include <assert.h>
31 
32 /*
33  * This file checks for invalid IR indicating a bug somewhere in the compiler.
34  */
35 
36 /* Since this file is just a pile of asserts, don't bother compiling it if
37  * we're not building a debug build.
38  */
39 #ifndef NDEBUG
40 
41 /*
42  * Per-register validation state.
43  */
44 
45 typedef struct {
46    /*
47     * equivalent to the uses and defs in nir_register, but built up by the
48     * validator. At the end, we verify that the sets have the same entries.
49     */
50    struct set *uses, *if_uses, *defs;
51    nir_function_impl *where_defined; /* NULL for global registers */
52 } reg_validate_state;
53 
54 typedef struct {
55    void *mem_ctx;
56 
57    /* map of register -> validation state (struct above) */
58    struct hash_table *regs;
59 
60    /* the current shader being validated */
61    nir_shader *shader;
62 
63    /* the current instruction being validated */
64    nir_instr *instr;
65 
66    /* the current variable being validated */
67    nir_variable *var;
68 
69    /* the current basic block being validated */
70    nir_block *block;
71 
72    /* the current if statement being validated */
73    nir_if *if_stmt;
74 
75    /* the current loop being visited */
76    nir_loop *loop;
77 
78    /* the parent of the current cf node being visited */
79    nir_cf_node *parent_node;
80 
81    /* the current function implementation being validated */
82    nir_function_impl *impl;
83 
84    /* Set of all blocks in the list */
85    struct set *blocks;
86 
87    /* Set of seen SSA sources */
88    struct set *ssa_srcs;
89 
90    /* bitset of ssa definitions we have found; used to check uniqueness */
91    BITSET_WORD *ssa_defs_found;
92 
93    /* bitset of registers we have currently found; used to check uniqueness */
94    BITSET_WORD *regs_found;
95 
96    /* map of variable -> function implementation where it is defined or NULL
97     * if it is a global variable
98     */
99    struct hash_table *var_defs;
100 
101    /* map of instruction/var/etc to failed assert string */
102    struct hash_table *errors;
103 
104    struct set *shader_gc_list;
105 } validate_state;
106 
107 static void
log_error(validate_state * state,const char * cond,const char * file,int line)108 log_error(validate_state *state, const char *cond, const char *file, int line)
109 {
110    const void *obj;
111 
112    if (state->instr)
113       obj = state->instr;
114    else if (state->var)
115       obj = state->var;
116    else
117       obj = cond;
118 
119    char *msg = ralloc_asprintf(state->errors, "error: %s (%s:%d)",
120                                cond, file, line);
121 
122    _mesa_hash_table_insert(state->errors, obj, msg);
123 }
124 
125 static bool
validate_assert_impl(validate_state * state,bool cond,const char * str,const char * file,unsigned line)126 validate_assert_impl(validate_state *state, bool cond, const char *str,
127                      const char *file, unsigned line)
128 {
129    if (!cond)
130       log_error(state, str, file, line);
131    return cond;
132 }
133 
134 #define validate_assert(state, cond) \
135    validate_assert_impl(state, (cond), #cond, __FILE__, __LINE__)
136 
137 
138 static void validate_src(nir_src *src, validate_state *state,
139                          unsigned bit_sizes, unsigned num_components);
140 
141 static void
validate_num_components(validate_state * state,unsigned num_components)142 validate_num_components(validate_state *state, unsigned num_components)
143 {
144    validate_assert(state, nir_num_components_valid(num_components));
145 }
146 
147 static void
validate_reg_src(nir_src * src,validate_state * state,unsigned bit_sizes,unsigned num_components)148 validate_reg_src(nir_src *src, validate_state *state,
149                  unsigned bit_sizes, unsigned num_components)
150 {
151    validate_assert(state, src->reg.reg != NULL);
152 
153    struct hash_entry *entry;
154    entry = _mesa_hash_table_search(state->regs, src->reg.reg);
155    validate_assert(state, entry);
156 
157    reg_validate_state *reg_state = (reg_validate_state *) entry->data;
158 
159    if (state->instr) {
160       _mesa_set_add(reg_state->uses, src);
161    } else {
162       validate_assert(state, state->if_stmt);
163       _mesa_set_add(reg_state->if_uses, src);
164    }
165 
166    validate_assert(state, reg_state->where_defined == state->impl &&
167           "using a register declared in a different function");
168 
169    if (bit_sizes)
170       validate_assert(state, src->reg.reg->bit_size & bit_sizes);
171    if (num_components)
172       validate_assert(state, src->reg.reg->num_components == num_components);
173 
174    validate_assert(state, (src->reg.reg->num_array_elems == 0 ||
175           src->reg.base_offset < src->reg.reg->num_array_elems) &&
176           "definitely out-of-bounds array access");
177 
178    if (src->reg.indirect) {
179       validate_assert(state, src->reg.reg->num_array_elems != 0);
180       validate_assert(state, (src->reg.indirect->is_ssa ||
181               src->reg.indirect->reg.indirect == NULL) &&
182              "only one level of indirection allowed");
183       validate_src(src->reg.indirect, state, 32, 1);
184    }
185 }
186 
187 #define SET_PTR_BIT(ptr, bit) \
188    (void *)(((uintptr_t)(ptr)) | (((uintptr_t)1) << bit))
189 
190 static void
validate_ssa_src(nir_src * src,validate_state * state,unsigned bit_sizes,unsigned num_components)191 validate_ssa_src(nir_src *src, validate_state *state,
192                  unsigned bit_sizes, unsigned num_components)
193 {
194    validate_assert(state, src->ssa != NULL);
195 
196    /* As we walk SSA defs, we add every use to this set.  We need to make sure
197     * our use is seen in a use list.
198     */
199    struct set_entry *entry;
200    if (state->instr) {
201       entry = _mesa_set_search(state->ssa_srcs, src);
202    } else {
203       entry = _mesa_set_search(state->ssa_srcs, SET_PTR_BIT(src, 0));
204    }
205    validate_assert(state, entry);
206 
207    /* This will let us prove that we've seen all the sources */
208    if (entry)
209       _mesa_set_remove(state->ssa_srcs, entry);
210 
211    if (bit_sizes)
212       validate_assert(state, src->ssa->bit_size & bit_sizes);
213    if (num_components)
214       validate_assert(state, src->ssa->num_components == num_components);
215 
216    /* TODO validate that the use is dominated by the definition */
217 }
218 
219 static void
validate_src(nir_src * src,validate_state * state,unsigned bit_sizes,unsigned num_components)220 validate_src(nir_src *src, validate_state *state,
221              unsigned bit_sizes, unsigned num_components)
222 {
223    if (state->instr)
224       validate_assert(state, src->parent_instr == state->instr);
225    else
226       validate_assert(state, src->parent_if == state->if_stmt);
227 
228    if (src->is_ssa)
229       validate_ssa_src(src, state, bit_sizes, num_components);
230    else
231       validate_reg_src(src, state, bit_sizes, num_components);
232 }
233 
234 static void
validate_alu_src(nir_alu_instr * instr,unsigned index,validate_state * state)235 validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state)
236 {
237    nir_alu_src *src = &instr->src[index];
238 
239    if (instr->op == nir_op_mov)
240       assert(!src->abs && !src->negate);
241 
242    unsigned num_components = nir_src_num_components(src->src);
243    for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
244       validate_assert(state, src->swizzle[i] < NIR_MAX_VEC_COMPONENTS);
245 
246       if (nir_alu_instr_channel_used(instr, index, i))
247          validate_assert(state, src->swizzle[i] < num_components);
248    }
249 
250    validate_src(&src->src, state, 0, 0);
251 }
252 
253 static void
validate_reg_dest(nir_reg_dest * dest,validate_state * state,unsigned bit_sizes,unsigned num_components)254 validate_reg_dest(nir_reg_dest *dest, validate_state *state,
255                   unsigned bit_sizes, unsigned num_components)
256 {
257    validate_assert(state, dest->reg != NULL);
258 
259    validate_assert(state, dest->parent_instr == state->instr);
260 
261    struct hash_entry *entry2;
262    entry2 = _mesa_hash_table_search(state->regs, dest->reg);
263 
264    validate_assert(state, entry2);
265 
266    reg_validate_state *reg_state = (reg_validate_state *) entry2->data;
267    _mesa_set_add(reg_state->defs, dest);
268 
269    validate_assert(state, reg_state->where_defined == state->impl &&
270           "writing to a register declared in a different function");
271 
272    if (bit_sizes)
273       validate_assert(state, dest->reg->bit_size & bit_sizes);
274    if (num_components)
275       validate_assert(state, dest->reg->num_components == num_components);
276 
277    validate_assert(state, (dest->reg->num_array_elems == 0 ||
278           dest->base_offset < dest->reg->num_array_elems) &&
279           "definitely out-of-bounds array access");
280 
281    if (dest->indirect) {
282       validate_assert(state, dest->reg->num_array_elems != 0);
283       validate_assert(state, (dest->indirect->is_ssa || dest->indirect->reg.indirect == NULL) &&
284              "only one level of indirection allowed");
285       validate_src(dest->indirect, state, 32, 1);
286    }
287 }
288 
289 static void
validate_ssa_def(nir_ssa_def * def,validate_state * state)290 validate_ssa_def(nir_ssa_def *def, validate_state *state)
291 {
292    validate_assert(state, def->index < state->impl->ssa_alloc);
293    validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
294    BITSET_SET(state->ssa_defs_found, def->index);
295 
296    validate_assert(state, def->parent_instr == state->instr);
297    validate_num_components(state, def->num_components);
298 
299    list_validate(&def->uses);
300    nir_foreach_use(src, def) {
301       validate_assert(state, src->is_ssa);
302       validate_assert(state, src->ssa == def);
303       bool already_seen = false;
304       _mesa_set_search_and_add(state->ssa_srcs, src, &already_seen);
305       /* A nir_src should only appear once and only in one SSA def use list */
306       validate_assert(state, !already_seen);
307    }
308 
309    list_validate(&def->if_uses);
310    nir_foreach_if_use(src, def) {
311       validate_assert(state, src->is_ssa);
312       validate_assert(state, src->ssa == def);
313       bool already_seen = false;
314       _mesa_set_search_and_add(state->ssa_srcs, SET_PTR_BIT(src, 0),
315                                &already_seen);
316       /* A nir_src should only appear once and only in one SSA def use list */
317       validate_assert(state, !already_seen);
318    }
319 }
320 
321 static void
validate_dest(nir_dest * dest,validate_state * state,unsigned bit_sizes,unsigned num_components)322 validate_dest(nir_dest *dest, validate_state *state,
323               unsigned bit_sizes, unsigned num_components)
324 {
325    if (dest->is_ssa) {
326       if (bit_sizes)
327          validate_assert(state, dest->ssa.bit_size & bit_sizes);
328       if (num_components)
329          validate_assert(state, dest->ssa.num_components == num_components);
330       validate_ssa_def(&dest->ssa, state);
331    } else {
332       validate_reg_dest(&dest->reg, state, bit_sizes, num_components);
333    }
334 }
335 
336 static void
validate_alu_dest(nir_alu_instr * instr,validate_state * state)337 validate_alu_dest(nir_alu_instr *instr, validate_state *state)
338 {
339    nir_alu_dest *dest = &instr->dest;
340 
341    if (instr->op == nir_op_mov)
342       assert(!dest->saturate);
343 
344    unsigned dest_size = nir_dest_num_components(dest->dest);
345    /*
346     * validate that the instruction doesn't write to components not in the
347     * register/SSA value
348     */
349    validate_assert(state, !(dest->write_mask & ~((1 << dest_size) - 1)));
350 
351    /* validate that saturate is only ever used on instructions with
352     * destinations of type float
353     */
354    nir_alu_instr *alu = nir_instr_as_alu(state->instr);
355    validate_assert(state,
356           (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) ==
357            nir_type_float) ||
358           !dest->saturate);
359 
360    validate_dest(&dest->dest, state, 0, 0);
361 }
362 
363 static void
validate_alu_instr(nir_alu_instr * instr,validate_state * state)364 validate_alu_instr(nir_alu_instr *instr, validate_state *state)
365 {
366    validate_assert(state, instr->op < nir_num_opcodes);
367 
368    unsigned instr_bit_size = 0;
369    for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
370       nir_alu_type src_type = nir_op_infos[instr->op].input_types[i];
371       unsigned src_bit_size = nir_src_bit_size(instr->src[i].src);
372       if (nir_alu_type_get_type_size(src_type)) {
373          validate_assert(state, src_bit_size == nir_alu_type_get_type_size(src_type));
374       } else if (instr_bit_size) {
375          validate_assert(state, src_bit_size == instr_bit_size);
376       } else {
377          instr_bit_size = src_bit_size;
378       }
379 
380       if (nir_alu_type_get_base_type(src_type) == nir_type_float) {
381          /* 8-bit float isn't a thing */
382          validate_assert(state, src_bit_size == 16 || src_bit_size == 32 ||
383                                 src_bit_size == 64);
384       }
385 
386       validate_alu_src(instr, i, state);
387    }
388 
389    nir_alu_type dest_type = nir_op_infos[instr->op].output_type;
390    unsigned dest_bit_size = nir_dest_bit_size(instr->dest.dest);
391    if (nir_alu_type_get_type_size(dest_type)) {
392       validate_assert(state, dest_bit_size == nir_alu_type_get_type_size(dest_type));
393    } else if (instr_bit_size) {
394       validate_assert(state, dest_bit_size == instr_bit_size);
395    } else {
396       /* The only unsized thing is the destination so it's vacuously valid */
397    }
398 
399    if (nir_alu_type_get_base_type(dest_type) == nir_type_float) {
400       /* 8-bit float isn't a thing */
401       validate_assert(state, dest_bit_size == 16 || dest_bit_size == 32 ||
402                              dest_bit_size == 64);
403    }
404 
405    validate_alu_dest(instr, state);
406 }
407 
408 static void
validate_var_use(nir_variable * var,validate_state * state)409 validate_var_use(nir_variable *var, validate_state *state)
410 {
411    struct hash_entry *entry = _mesa_hash_table_search(state->var_defs, var);
412    validate_assert(state, entry);
413    if (entry && var->data.mode == nir_var_function_temp)
414       validate_assert(state, (nir_function_impl *) entry->data == state->impl);
415 }
416 
417 static void
validate_deref_instr(nir_deref_instr * instr,validate_state * state)418 validate_deref_instr(nir_deref_instr *instr, validate_state *state)
419 {
420    if (instr->deref_type == nir_deref_type_var) {
421       /* Variable dereferences are stupid simple. */
422       validate_assert(state, instr->modes == instr->var->data.mode);
423       validate_assert(state, instr->type == instr->var->type);
424       validate_var_use(instr->var, state);
425    } else if (instr->deref_type == nir_deref_type_cast) {
426       /* For cast, we simply have to trust the instruction.  It's up to
427        * lowering passes and front/back-ends to make them sane.
428        */
429       validate_src(&instr->parent, state, 0, 0);
430 
431       /* Most variable modes in NIR can only exist by themselves. */
432       if (instr->modes & ~nir_var_mem_generic)
433          validate_assert(state, util_bitcount(instr->modes) == 1);
434 
435       nir_deref_instr *parent = nir_src_as_deref(instr->parent);
436       if (parent) {
437          /* Casts can change the mode but it can't change completely.  The new
438           * mode must have some bits in common with the old.
439           */
440          validate_assert(state, instr->modes & parent->modes);
441       } else {
442          /* If our parent isn't a deref, just assert the mode is there */
443          validate_assert(state, instr->modes != 0);
444       }
445 
446       /* We just validate that the type is there */
447       validate_assert(state, instr->type);
448       if (instr->cast.align_mul > 0) {
449          validate_assert(state, util_is_power_of_two_nonzero(instr->cast.align_mul));
450          validate_assert(state, instr->cast.align_offset < instr->cast.align_mul);
451       } else {
452          validate_assert(state, instr->cast.align_offset == 0);
453       }
454    } else {
455       /* We require the parent to be SSA.  This may be lifted in the future */
456       validate_assert(state, instr->parent.is_ssa);
457 
458       /* The parent pointer value must have the same number of components
459        * as the destination.
460        */
461       validate_src(&instr->parent, state, nir_dest_bit_size(instr->dest),
462                    nir_dest_num_components(instr->dest));
463 
464       nir_instr *parent_instr = instr->parent.ssa->parent_instr;
465 
466       /* The parent must come from another deref instruction */
467       validate_assert(state, parent_instr->type == nir_instr_type_deref);
468 
469       nir_deref_instr *parent = nir_instr_as_deref(parent_instr);
470 
471       validate_assert(state, instr->modes == parent->modes);
472 
473       switch (instr->deref_type) {
474       case nir_deref_type_struct:
475          validate_assert(state, glsl_type_is_struct_or_ifc(parent->type));
476          validate_assert(state,
477             instr->strct.index < glsl_get_length(parent->type));
478          validate_assert(state, instr->type ==
479             glsl_get_struct_field(parent->type, instr->strct.index));
480          break;
481 
482       case nir_deref_type_array:
483       case nir_deref_type_array_wildcard:
484          if (instr->modes & nir_var_vec_indexable_modes) {
485             /* Shared variables and UBO/SSBOs have a bit more relaxed rules
486              * because we need to be able to handle array derefs on vectors.
487              * Fortunately, nir_lower_io handles these just fine.
488              */
489             validate_assert(state, glsl_type_is_array(parent->type) ||
490                                    glsl_type_is_matrix(parent->type) ||
491                                    glsl_type_is_vector(parent->type));
492          } else {
493             /* Most of NIR cannot handle array derefs on vectors */
494             validate_assert(state, glsl_type_is_array(parent->type) ||
495                                    glsl_type_is_matrix(parent->type));
496          }
497          validate_assert(state,
498             instr->type == glsl_get_array_element(parent->type));
499 
500          if (instr->deref_type == nir_deref_type_array) {
501             validate_src(&instr->arr.index, state,
502                          nir_dest_bit_size(instr->dest), 1);
503          }
504          break;
505 
506       case nir_deref_type_ptr_as_array:
507          /* ptr_as_array derefs must have a parent that is either an array,
508           * ptr_as_array, or cast.  If the parent is a cast, we get the stride
509           * information (if any) from the cast deref.
510           */
511          validate_assert(state,
512                          parent->deref_type == nir_deref_type_array ||
513                          parent->deref_type == nir_deref_type_ptr_as_array ||
514                          parent->deref_type == nir_deref_type_cast);
515          validate_src(&instr->arr.index, state,
516                       nir_dest_bit_size(instr->dest), 1);
517          break;
518 
519       default:
520          unreachable("Invalid deref instruction type");
521       }
522    }
523 
524    /* We intentionally don't validate the size of the destination because we
525     * want to let other compiler components such as SPIR-V decide how big
526     * pointers should be.
527     */
528    validate_dest(&instr->dest, state, 0, 0);
529 
530    /* Deref instructions as if conditions don't make sense because if
531     * conditions expect well-formed Booleans.  If you want to compare with
532     * NULL, an explicit comparison operation should be used.
533     */
534    validate_assert(state, list_is_empty(&instr->dest.ssa.if_uses));
535 
536    /* Certain modes cannot be used as sources for phi instructions because
537     * way too many passes assume that they can always chase deref chains.
538     */
539    nir_foreach_use(use, &instr->dest.ssa) {
540       if (use->parent_instr->type == nir_instr_type_phi) {
541          validate_assert(state, !(instr->modes & (nir_var_shader_in |
542                                                   nir_var_shader_out |
543                                                   nir_var_shader_out |
544                                                   nir_var_uniform)));
545       }
546    }
547 }
548 
549 static bool
vectorized_intrinsic(nir_intrinsic_instr * intr)550 vectorized_intrinsic(nir_intrinsic_instr *intr)
551 {
552    const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
553 
554    if (info->dest_components == 0)
555       return true;
556 
557    for (unsigned i = 0; i < info->num_srcs; i++)
558       if (info->src_components[i] == 0)
559          return true;
560 
561    return false;
562 }
563 
564 /** Returns the image format or PIPE_FORMAT_COUNT for incomplete derefs
565  *
566  * We use PIPE_FORMAT_COUNT for incomplete derefs because PIPE_FORMAT_NONE
567  * indicates that we found the variable but it has no format specified.
568  */
569 static enum pipe_format
image_intrin_format(nir_intrinsic_instr * instr)570 image_intrin_format(nir_intrinsic_instr *instr)
571 {
572    if (nir_intrinsic_format(instr) != PIPE_FORMAT_NONE)
573       return nir_intrinsic_format(instr);
574 
575    /* If this not a deref intrinsic, PIPE_FORMAT_NONE is the best we can do */
576    if (nir_intrinsic_infos[instr->intrinsic].src_components[0] != -1)
577       return PIPE_FORMAT_NONE;
578 
579    nir_variable *var = nir_intrinsic_get_var(instr, 0);
580    if (var == NULL)
581       return PIPE_FORMAT_COUNT;
582 
583    return var->data.image.format;
584 }
585 
586 static void
validate_intrinsic_instr(nir_intrinsic_instr * instr,validate_state * state)587 validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
588 {
589    unsigned dest_bit_size = 0;
590    unsigned src_bit_sizes[NIR_INTRINSIC_MAX_INPUTS] = { 0, };
591    switch (instr->intrinsic) {
592    case nir_intrinsic_convert_alu_types: {
593       nir_alu_type src_type = nir_intrinsic_src_type(instr);
594       nir_alu_type dest_type = nir_intrinsic_dest_type(instr);
595       dest_bit_size = nir_alu_type_get_type_size(dest_type);
596       src_bit_sizes[0] = nir_alu_type_get_type_size(src_type);
597       validate_assert(state, dest_bit_size != 0);
598       validate_assert(state, src_bit_sizes[0] != 0);
599       break;
600    }
601 
602    case nir_intrinsic_load_param: {
603       unsigned param_idx = nir_intrinsic_param_idx(instr);
604       validate_assert(state, param_idx < state->impl->function->num_params);
605       nir_parameter *param = &state->impl->function->params[param_idx];
606       validate_assert(state, instr->num_components == param->num_components);
607       dest_bit_size = param->bit_size;
608       break;
609    }
610 
611    case nir_intrinsic_load_deref: {
612       nir_deref_instr *src = nir_src_as_deref(instr->src[0]);
613       assert(src);
614       validate_assert(state, glsl_type_is_vector_or_scalar(src->type) ||
615                       (src->modes == nir_var_uniform &&
616                        glsl_get_base_type(src->type) == GLSL_TYPE_SUBROUTINE));
617       validate_assert(state, instr->num_components ==
618                              glsl_get_vector_elements(src->type));
619       dest_bit_size = glsl_get_bit_size(src->type);
620       /* Also allow 32-bit boolean load operations */
621       if (glsl_type_is_boolean(src->type))
622          dest_bit_size |= 32;
623       break;
624    }
625 
626    case nir_intrinsic_store_deref: {
627       nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
628       assert(dst);
629       validate_assert(state, glsl_type_is_vector_or_scalar(dst->type));
630       validate_assert(state, instr->num_components ==
631                              glsl_get_vector_elements(dst->type));
632       src_bit_sizes[1] = glsl_get_bit_size(dst->type);
633       /* Also allow 32-bit boolean store operations */
634       if (glsl_type_is_boolean(dst->type))
635          src_bit_sizes[1] |= 32;
636       validate_assert(state, !nir_deref_mode_may_be(dst, nir_var_read_only_modes));
637       break;
638    }
639 
640    case nir_intrinsic_copy_deref: {
641       nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
642       nir_deref_instr *src = nir_src_as_deref(instr->src[1]);
643       validate_assert(state, glsl_get_bare_type(dst->type) ==
644                              glsl_get_bare_type(src->type));
645       validate_assert(state, !nir_deref_mode_may_be(dst, nir_var_read_only_modes));
646       break;
647    }
648 
649    case nir_intrinsic_load_ubo_vec4: {
650       int bit_size = nir_dest_bit_size(instr->dest);
651       validate_assert(state, bit_size >= 8);
652       validate_assert(state, (nir_intrinsic_component(instr) +
653                               instr->num_components) * (bit_size / 8) <= 16);
654       break;
655    }
656 
657    case nir_intrinsic_load_ubo:
658       /* Make sure that the creator didn't forget to set the range_base+range. */
659       validate_assert(state, nir_intrinsic_range(instr) != 0);
660       FALLTHROUGH;
661    case nir_intrinsic_load_ssbo:
662    case nir_intrinsic_load_shared:
663    case nir_intrinsic_load_global:
664    case nir_intrinsic_load_global_constant:
665    case nir_intrinsic_load_scratch:
666    case nir_intrinsic_load_constant:
667       /* These memory load operations must have alignments */
668       validate_assert(state,
669          util_is_power_of_two_nonzero(nir_intrinsic_align_mul(instr)));
670       validate_assert(state, nir_intrinsic_align_offset(instr) <
671                              nir_intrinsic_align_mul(instr));
672       FALLTHROUGH;
673 
674    case nir_intrinsic_load_uniform:
675    case nir_intrinsic_load_input:
676    case nir_intrinsic_load_per_vertex_input:
677    case nir_intrinsic_load_interpolated_input:
678    case nir_intrinsic_load_output:
679    case nir_intrinsic_load_per_vertex_output:
680    case nir_intrinsic_load_per_primitive_output:
681    case nir_intrinsic_load_push_constant:
682       /* All memory load operations must load at least a byte */
683       validate_assert(state, nir_dest_bit_size(instr->dest) >= 8);
684       break;
685 
686    case nir_intrinsic_store_ssbo:
687    case nir_intrinsic_store_shared:
688    case nir_intrinsic_store_global:
689    case nir_intrinsic_store_scratch:
690       /* These memory store operations must also have alignments */
691       validate_assert(state,
692          util_is_power_of_two_nonzero(nir_intrinsic_align_mul(instr)));
693       validate_assert(state, nir_intrinsic_align_offset(instr) <
694                              nir_intrinsic_align_mul(instr));
695       FALLTHROUGH;
696 
697    case nir_intrinsic_store_output:
698    case nir_intrinsic_store_per_vertex_output:
699       /* All memory store operations must store at least a byte */
700       validate_assert(state, nir_src_bit_size(instr->src[0]) >= 8);
701       break;
702 
703    case nir_intrinsic_deref_mode_is:
704    case nir_intrinsic_addr_mode_is:
705       validate_assert(state,
706          util_bitcount(nir_intrinsic_memory_modes(instr)) == 1);
707       break;
708 
709    case nir_intrinsic_image_deref_atomic_add:
710    case nir_intrinsic_image_deref_atomic_imin:
711    case nir_intrinsic_image_deref_atomic_umin:
712    case nir_intrinsic_image_deref_atomic_imax:
713    case nir_intrinsic_image_deref_atomic_umax:
714    case nir_intrinsic_image_deref_atomic_and:
715    case nir_intrinsic_image_deref_atomic_or:
716    case nir_intrinsic_image_deref_atomic_xor:
717    case nir_intrinsic_image_deref_atomic_comp_swap:
718    case nir_intrinsic_image_atomic_add:
719    case nir_intrinsic_image_atomic_imin:
720    case nir_intrinsic_image_atomic_umin:
721    case nir_intrinsic_image_atomic_imax:
722    case nir_intrinsic_image_atomic_umax:
723    case nir_intrinsic_image_atomic_and:
724    case nir_intrinsic_image_atomic_or:
725    case nir_intrinsic_image_atomic_xor:
726    case nir_intrinsic_image_atomic_comp_swap:
727    case nir_intrinsic_bindless_image_atomic_add:
728    case nir_intrinsic_bindless_image_atomic_imin:
729    case nir_intrinsic_bindless_image_atomic_umin:
730    case nir_intrinsic_bindless_image_atomic_imax:
731    case nir_intrinsic_bindless_image_atomic_umax:
732    case nir_intrinsic_bindless_image_atomic_and:
733    case nir_intrinsic_bindless_image_atomic_or:
734    case nir_intrinsic_bindless_image_atomic_xor:
735    case nir_intrinsic_bindless_image_atomic_comp_swap: {
736       enum pipe_format format = image_intrin_format(instr);
737       if (format != PIPE_FORMAT_COUNT) {
738          validate_assert(state, format == PIPE_FORMAT_R32_UINT ||
739                                 format == PIPE_FORMAT_R32_SINT ||
740                                 format == PIPE_FORMAT_R64_UINT ||
741                                 format == PIPE_FORMAT_R64_SINT);
742          validate_assert(state, nir_dest_bit_size(instr->dest) ==
743                                 util_format_get_blocksizebits(format));
744       }
745       break;
746    }
747 
748    case nir_intrinsic_image_deref_atomic_exchange:
749    case nir_intrinsic_image_atomic_exchange:
750    case nir_intrinsic_bindless_image_atomic_exchange: {
751       enum pipe_format format = image_intrin_format(instr);
752       if (format != PIPE_FORMAT_COUNT) {
753          validate_assert(state, format == PIPE_FORMAT_R32_UINT ||
754                                 format == PIPE_FORMAT_R32_SINT ||
755                                 format == PIPE_FORMAT_R32_FLOAT ||
756                                 format == PIPE_FORMAT_R64_UINT ||
757                                 format == PIPE_FORMAT_R64_SINT);
758          validate_assert(state, nir_dest_bit_size(instr->dest) ==
759                                 util_format_get_blocksizebits(format));
760       }
761       break;
762    }
763 
764    case nir_intrinsic_image_deref_atomic_fadd:
765    case nir_intrinsic_image_atomic_fadd:
766    case nir_intrinsic_bindless_image_atomic_fadd: {
767       enum pipe_format format = image_intrin_format(instr);
768       validate_assert(state, format == PIPE_FORMAT_COUNT ||
769                              format == PIPE_FORMAT_R32_FLOAT);
770       validate_assert(state, nir_dest_bit_size(instr->dest) == 32);
771       break;
772    }
773 
774    case nir_intrinsic_image_deref_atomic_fmin:
775    case nir_intrinsic_image_deref_atomic_fmax:
776    case nir_intrinsic_image_atomic_fmin:
777    case nir_intrinsic_image_atomic_fmax:
778    case nir_intrinsic_bindless_image_atomic_fmin:
779    case nir_intrinsic_bindless_image_atomic_fmax: {
780       enum pipe_format format = image_intrin_format(instr);
781       validate_assert(state, format == PIPE_FORMAT_COUNT ||
782                              format == PIPE_FORMAT_R16_FLOAT ||
783                              format == PIPE_FORMAT_R32_FLOAT ||
784                              format == PIPE_FORMAT_R64_FLOAT);
785       validate_assert(state, nir_dest_bit_size(instr->dest) ==
786                              util_format_get_blocksizebits(format));
787       break;
788    }
789 
790    default:
791       break;
792    }
793 
794    if (instr->num_components > 0)
795       validate_num_components(state, instr->num_components);
796 
797    const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
798    unsigned num_srcs = info->num_srcs;
799    for (unsigned i = 0; i < num_srcs; i++) {
800       unsigned components_read = nir_intrinsic_src_components(instr, i);
801 
802       validate_num_components(state, components_read);
803 
804       validate_src(&instr->src[i], state, src_bit_sizes[i], components_read);
805    }
806 
807    if (nir_intrinsic_infos[instr->intrinsic].has_dest) {
808       unsigned components_written = nir_intrinsic_dest_components(instr);
809       unsigned bit_sizes = info->dest_bit_sizes;
810       if (!bit_sizes && info->bit_size_src >= 0)
811          bit_sizes = nir_src_bit_size(instr->src[info->bit_size_src]);
812 
813       validate_num_components(state, components_written);
814       if (dest_bit_size && bit_sizes)
815          validate_assert(state, dest_bit_size & bit_sizes);
816       else
817          dest_bit_size = dest_bit_size ? dest_bit_size : bit_sizes;
818 
819       validate_dest(&instr->dest, state, dest_bit_size, components_written);
820    }
821 
822    if (!vectorized_intrinsic(instr))
823       validate_assert(state, instr->num_components == 0);
824 
825    if (nir_intrinsic_has_write_mask(instr)) {
826       unsigned component_mask = BITFIELD_MASK(instr->num_components);
827       validate_assert(state, (nir_intrinsic_write_mask(instr) & ~component_mask) == 0);
828    }
829 
830    if (nir_intrinsic_has_io_xfb(instr)) {
831       unsigned used_mask = 0;
832 
833       for (unsigned i = 0; i < 4; i++) {
834          nir_io_xfb xfb = i < 2 ? nir_intrinsic_io_xfb(instr) :
835                                   nir_intrinsic_io_xfb2(instr);
836          unsigned xfb_mask = BITFIELD_RANGE(i, xfb.out[i % 2].num_components);
837 
838          /* Each component can be used only once by transform feedback info. */
839          validate_assert(state, (xfb_mask & used_mask) == 0);
840          used_mask |= xfb_mask;
841       }
842    }
843 
844    if (nir_intrinsic_has_io_semantics(instr) &&
845        !nir_intrinsic_infos[instr->intrinsic].has_dest) {
846       nir_io_semantics sem = nir_intrinsic_io_semantics(instr);
847 
848       /* An output that has no effect shouldn't be present in the IR. */
849       validate_assert(state,
850                       (nir_slot_is_sysval_output(sem.location) &&
851                        !sem.no_sysval_output) ||
852                       (nir_slot_is_varying(sem.location) && !sem.no_varying) ||
853                       nir_instr_xfb_write_mask(instr));
854    }
855 }
856 
857 static void
validate_tex_instr(nir_tex_instr * instr,validate_state * state)858 validate_tex_instr(nir_tex_instr *instr, validate_state *state)
859 {
860    bool src_type_seen[nir_num_tex_src_types];
861    for (unsigned i = 0; i < nir_num_tex_src_types; i++)
862       src_type_seen[i] = false;
863 
864    for (unsigned i = 0; i < instr->num_srcs; i++) {
865       validate_assert(state, !src_type_seen[instr->src[i].src_type]);
866       src_type_seen[instr->src[i].src_type] = true;
867       validate_src(&instr->src[i].src, state,
868                    0, nir_tex_instr_src_size(instr, i));
869 
870       switch (instr->src[i].src_type) {
871 
872       case nir_tex_src_comparator:
873          validate_assert(state, instr->is_shadow);
874          break;
875 
876       case nir_tex_src_bias:
877          validate_assert(state, instr->op == nir_texop_txb ||
878                                 instr->op == nir_texop_tg4);
879          break;
880 
881       case nir_tex_src_lod:
882          validate_assert(state, instr->op != nir_texop_tex &&
883                                 instr->op != nir_texop_txb &&
884                                 instr->op != nir_texop_txd &&
885                                 instr->op != nir_texop_lod);
886          break;
887 
888       case nir_tex_src_ddx:
889       case nir_tex_src_ddy:
890          validate_assert(state, instr->op == nir_texop_txd);
891          break;
892 
893       case nir_tex_src_texture_deref: {
894          nir_deref_instr *deref = nir_src_as_deref(instr->src[i].src);
895          if (!validate_assert(state, deref))
896             break;
897 
898          validate_assert(state, glsl_type_is_image(deref->type) ||
899                                 glsl_type_is_texture(deref->type) ||
900                                 glsl_type_is_sampler(deref->type));
901          break;
902       }
903 
904       case nir_tex_src_sampler_deref: {
905          nir_deref_instr *deref = nir_src_as_deref(instr->src[i].src);
906          if (!validate_assert(state, deref))
907             break;
908 
909          validate_assert(state, glsl_type_is_sampler(deref->type));
910          break;
911       }
912 
913       case nir_tex_src_coord:
914       case nir_tex_src_projector:
915       case nir_tex_src_offset:
916       case nir_tex_src_min_lod:
917       case nir_tex_src_ms_index:
918       case nir_tex_src_texture_offset:
919       case nir_tex_src_sampler_offset:
920       case nir_tex_src_plane:
921       case nir_tex_src_texture_handle:
922       case nir_tex_src_sampler_handle:
923          break;
924 
925       default:
926          break;
927       }
928    }
929 
930    if (instr->op != nir_texop_tg4)
931       validate_assert(state, instr->component == 0);
932 
933    if (nir_tex_instr_has_explicit_tg4_offsets(instr)) {
934       validate_assert(state, instr->op == nir_texop_tg4);
935       validate_assert(state, !src_type_seen[nir_tex_src_offset]);
936    }
937 
938    validate_dest(&instr->dest, state, 0, nir_tex_instr_dest_size(instr));
939 
940    validate_assert(state,
941                    nir_alu_type_get_type_size(instr->dest_type) ==
942                    nir_dest_bit_size(instr->dest));
943 }
944 
945 static void
validate_call_instr(nir_call_instr * instr,validate_state * state)946 validate_call_instr(nir_call_instr *instr, validate_state *state)
947 {
948    validate_assert(state, instr->num_params == instr->callee->num_params);
949 
950    for (unsigned i = 0; i < instr->num_params; i++) {
951       validate_src(&instr->params[i], state,
952                    instr->callee->params[i].bit_size,
953                    instr->callee->params[i].num_components);
954    }
955 }
956 
957 static void
validate_const_value(nir_const_value * val,unsigned bit_size,validate_state * state)958 validate_const_value(nir_const_value *val, unsigned bit_size,
959                      validate_state *state)
960 {
961    /* In order for block copies to work properly for things like instruction
962     * comparisons and [de]serialization, we require the unused bits of the
963     * nir_const_value to be zero.
964     */
965    nir_const_value cmp_val;
966    memset(&cmp_val, 0, sizeof(cmp_val));
967    switch (bit_size) {
968    case 1:
969       cmp_val.b = val->b;
970       break;
971    case 8:
972       cmp_val.u8 = val->u8;
973       break;
974    case 16:
975       cmp_val.u16 = val->u16;
976       break;
977    case 32:
978       cmp_val.u32 = val->u32;
979       break;
980    case 64:
981       cmp_val.u64 = val->u64;
982       break;
983    default:
984       validate_assert(state, !"Invalid load_const bit size");
985    }
986    validate_assert(state, memcmp(val, &cmp_val, sizeof(cmp_val)) == 0);
987 }
988 
989 static void
validate_load_const_instr(nir_load_const_instr * instr,validate_state * state)990 validate_load_const_instr(nir_load_const_instr *instr, validate_state *state)
991 {
992    validate_ssa_def(&instr->def, state);
993 
994    for (unsigned i = 0; i < instr->def.num_components; i++)
995       validate_const_value(&instr->value[i], instr->def.bit_size, state);
996 }
997 
998 static void
validate_ssa_undef_instr(nir_ssa_undef_instr * instr,validate_state * state)999 validate_ssa_undef_instr(nir_ssa_undef_instr *instr, validate_state *state)
1000 {
1001    validate_ssa_def(&instr->def, state);
1002 }
1003 
1004 static void
validate_phi_instr(nir_phi_instr * instr,validate_state * state)1005 validate_phi_instr(nir_phi_instr *instr, validate_state *state)
1006 {
1007    /*
1008     * don't validate the sources until we get to them from their predecessor
1009     * basic blocks, to avoid validating an SSA use before its definition.
1010     */
1011 
1012    validate_dest(&instr->dest, state, 0, 0);
1013 
1014    exec_list_validate(&instr->srcs);
1015    validate_assert(state, exec_list_length(&instr->srcs) ==
1016           state->block->predecessors->entries);
1017 }
1018 
1019 static void
validate_jump_instr(nir_jump_instr * instr,validate_state * state)1020 validate_jump_instr(nir_jump_instr *instr, validate_state *state)
1021 {
1022    nir_block *block = state->block;
1023    validate_assert(state, &instr->instr == nir_block_last_instr(block));
1024 
1025    switch (instr->type) {
1026    case nir_jump_return:
1027    case nir_jump_halt:
1028       validate_assert(state, block->successors[0] == state->impl->end_block);
1029       validate_assert(state, block->successors[1] == NULL);
1030       validate_assert(state, instr->target == NULL);
1031       validate_assert(state, instr->else_target == NULL);
1032       break;
1033 
1034    case nir_jump_break:
1035       validate_assert(state, state->impl->structured);
1036       validate_assert(state, state->loop != NULL);
1037       if (state->loop) {
1038          nir_block *after =
1039             nir_cf_node_as_block(nir_cf_node_next(&state->loop->cf_node));
1040          validate_assert(state, block->successors[0] == after);
1041       }
1042       validate_assert(state, block->successors[1] == NULL);
1043       validate_assert(state, instr->target == NULL);
1044       validate_assert(state, instr->else_target == NULL);
1045       break;
1046 
1047    case nir_jump_continue:
1048       validate_assert(state, state->impl->structured);
1049       validate_assert(state, state->loop != NULL);
1050       if (state->loop) {
1051          nir_block *first = nir_loop_first_block(state->loop);
1052          validate_assert(state, block->successors[0] == first);
1053       }
1054       validate_assert(state, block->successors[1] == NULL);
1055       validate_assert(state, instr->target == NULL);
1056       validate_assert(state, instr->else_target == NULL);
1057       break;
1058 
1059    case nir_jump_goto:
1060       validate_assert(state, !state->impl->structured);
1061       validate_assert(state, instr->target == block->successors[0]);
1062       validate_assert(state, instr->target != NULL);
1063       validate_assert(state, instr->else_target == NULL);
1064       break;
1065 
1066    case nir_jump_goto_if:
1067       validate_assert(state, !state->impl->structured);
1068       validate_assert(state, instr->target == block->successors[1]);
1069       validate_assert(state, instr->else_target == block->successors[0]);
1070       validate_src(&instr->condition, state, 0, 1);
1071       validate_assert(state, instr->target != NULL);
1072       validate_assert(state, instr->else_target != NULL);
1073       break;
1074 
1075    default:
1076       validate_assert(state, !"Invalid jump instruction type");
1077       break;
1078    }
1079 }
1080 
1081 static void
validate_instr(nir_instr * instr,validate_state * state)1082 validate_instr(nir_instr *instr, validate_state *state)
1083 {
1084    validate_assert(state, instr->block == state->block);
1085 
1086    state->instr = instr;
1087 
1088    if (state->shader_gc_list)
1089       validate_assert(state, _mesa_set_search(state->shader_gc_list, instr));
1090 
1091    switch (instr->type) {
1092    case nir_instr_type_alu:
1093       validate_alu_instr(nir_instr_as_alu(instr), state);
1094       break;
1095 
1096    case nir_instr_type_deref:
1097       validate_deref_instr(nir_instr_as_deref(instr), state);
1098       break;
1099 
1100    case nir_instr_type_call:
1101       validate_call_instr(nir_instr_as_call(instr), state);
1102       break;
1103 
1104    case nir_instr_type_intrinsic:
1105       validate_intrinsic_instr(nir_instr_as_intrinsic(instr), state);
1106       break;
1107 
1108    case nir_instr_type_tex:
1109       validate_tex_instr(nir_instr_as_tex(instr), state);
1110       break;
1111 
1112    case nir_instr_type_load_const:
1113       validate_load_const_instr(nir_instr_as_load_const(instr), state);
1114       break;
1115 
1116    case nir_instr_type_phi:
1117       validate_phi_instr(nir_instr_as_phi(instr), state);
1118       break;
1119 
1120    case nir_instr_type_ssa_undef:
1121       validate_ssa_undef_instr(nir_instr_as_ssa_undef(instr), state);
1122       break;
1123 
1124    case nir_instr_type_jump:
1125       validate_jump_instr(nir_instr_as_jump(instr), state);
1126       break;
1127 
1128    default:
1129       validate_assert(state, !"Invalid ALU instruction type");
1130       break;
1131    }
1132 
1133    state->instr = NULL;
1134 }
1135 
1136 static void
validate_phi_src(nir_phi_instr * instr,nir_block * pred,validate_state * state)1137 validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state)
1138 {
1139    state->instr = &instr->instr;
1140 
1141    validate_assert(state, instr->dest.is_ssa);
1142 
1143    exec_list_validate(&instr->srcs);
1144    nir_foreach_phi_src(src, instr) {
1145       if (src->pred == pred) {
1146          validate_assert(state, src->src.is_ssa);
1147          validate_src(&src->src, state, instr->dest.ssa.bit_size,
1148                       instr->dest.ssa.num_components);
1149          state->instr = NULL;
1150          return;
1151       }
1152    }
1153    validate_assert(state, !"Phi does not have a source corresponding to one "
1154                            "of its predecessor blocks");
1155 }
1156 
1157 static void
validate_phi_srcs(nir_block * block,nir_block * succ,validate_state * state)1158 validate_phi_srcs(nir_block *block, nir_block *succ, validate_state *state)
1159 {
1160    nir_foreach_instr(instr, succ) {
1161       if (instr->type != nir_instr_type_phi)
1162          break;
1163 
1164       validate_phi_src(nir_instr_as_phi(instr), block, state);
1165    }
1166 }
1167 
1168 static void
collect_blocks(struct exec_list * cf_list,validate_state * state)1169 collect_blocks(struct exec_list *cf_list, validate_state *state)
1170 {
1171    /* We walk the blocks manually here rather than using nir_foreach_block for
1172     * a few reasons:
1173     *
1174     *  1. nir_foreach_block() doesn't work properly for unstructured NIR and
1175     *     we need to be able to handle all forms of NIR here.
1176     *
1177     *  2. We want to call exec_list_validate() on every linked list in the IR
1178     *     which means we need to touch every linked and just walking blocks
1179     *     with nir_foreach_block() would make that difficult.  In particular,
1180     *     we want to validate each list before the first time we walk it so
1181     *     that we catch broken lists in exec_list_validate() instead of
1182     *     getting stuck in a hard-to-debug infinite loop in the validator.
1183     *
1184     *  3. nir_foreach_block() depends on several invariants of the CF node
1185     *     hierarchy which nir_validate_shader() is responsible for verifying.
1186     *     If we used nir_foreach_block() in nir_validate_shader(), we could
1187     *     end up blowing up on a bad list walk instead of throwing the much
1188     *     easier to debug validation error.
1189     */
1190    exec_list_validate(cf_list);
1191    foreach_list_typed(nir_cf_node, node, node, cf_list) {
1192       switch (node->type) {
1193       case nir_cf_node_block:
1194          _mesa_set_add(state->blocks, nir_cf_node_as_block(node));
1195          break;
1196 
1197       case nir_cf_node_if:
1198          collect_blocks(&nir_cf_node_as_if(node)->then_list, state);
1199          collect_blocks(&nir_cf_node_as_if(node)->else_list, state);
1200          break;
1201 
1202       case nir_cf_node_loop:
1203          collect_blocks(&nir_cf_node_as_loop(node)->body, state);
1204          break;
1205 
1206       default:
1207          unreachable("Invalid CF node type");
1208       }
1209    }
1210 }
1211 
1212 static void validate_cf_node(nir_cf_node *node, validate_state *state);
1213 
1214 static void
validate_block_predecessors(nir_block * block,validate_state * state)1215 validate_block_predecessors(nir_block *block, validate_state *state)
1216 {
1217    for (unsigned i = 0; i < 2; i++) {
1218       if (block->successors[i] == NULL)
1219          continue;
1220 
1221       /* The block has to exist in the nir_function_impl */
1222       validate_assert(state, _mesa_set_search(state->blocks,
1223                                               block->successors[i]));
1224 
1225       /* And we have to be in our successor's predecessors set */
1226       validate_assert(state,
1227          _mesa_set_search(block->successors[i]->predecessors, block));
1228 
1229       validate_phi_srcs(block, block->successors[i], state);
1230    }
1231 
1232    /* The start block cannot have any predecessors */
1233    if (block == nir_start_block(state->impl))
1234       validate_assert(state, block->predecessors->entries == 0);
1235 
1236    set_foreach(block->predecessors, entry) {
1237       const nir_block *pred = entry->key;
1238       validate_assert(state, _mesa_set_search(state->blocks, pred));
1239       validate_assert(state, pred->successors[0] == block ||
1240                              pred->successors[1] == block);
1241    }
1242 }
1243 
1244 static void
validate_block(nir_block * block,validate_state * state)1245 validate_block(nir_block *block, validate_state *state)
1246 {
1247    validate_assert(state, block->cf_node.parent == state->parent_node);
1248 
1249    state->block = block;
1250 
1251    exec_list_validate(&block->instr_list);
1252    nir_foreach_instr(instr, block) {
1253       if (instr->type == nir_instr_type_phi) {
1254          validate_assert(state, instr == nir_block_first_instr(block) ||
1255                 nir_instr_prev(instr)->type == nir_instr_type_phi);
1256       }
1257 
1258       validate_instr(instr, state);
1259    }
1260 
1261    validate_assert(state, block->successors[0] != NULL);
1262    validate_assert(state, block->successors[0] != block->successors[1]);
1263    validate_block_predecessors(block, state);
1264 
1265    if (!state->impl->structured) {
1266       validate_assert(state, nir_block_ends_in_jump(block));
1267    } else if (!nir_block_ends_in_jump(block)) {
1268       nir_cf_node *next = nir_cf_node_next(&block->cf_node);
1269       if (next == NULL) {
1270          switch (state->parent_node->type) {
1271          case nir_cf_node_loop: {
1272             nir_block *first = nir_loop_first_block(state->loop);
1273             validate_assert(state, block->successors[0] == first);
1274             /* due to the hack for infinite loops, block->successors[1] may
1275              * point to the block after the loop.
1276              */
1277             break;
1278          }
1279 
1280          case nir_cf_node_if: {
1281             nir_block *after =
1282                nir_cf_node_as_block(nir_cf_node_next(state->parent_node));
1283             validate_assert(state, block->successors[0] == after);
1284             validate_assert(state, block->successors[1] == NULL);
1285             break;
1286          }
1287 
1288          case nir_cf_node_function:
1289             validate_assert(state, block->successors[0] == state->impl->end_block);
1290             validate_assert(state, block->successors[1] == NULL);
1291             break;
1292 
1293          default:
1294             unreachable("unknown control flow node type");
1295          }
1296       } else {
1297          if (next->type == nir_cf_node_if) {
1298             nir_if *if_stmt = nir_cf_node_as_if(next);
1299             validate_assert(state, block->successors[0] ==
1300                    nir_if_first_then_block(if_stmt));
1301             validate_assert(state, block->successors[1] ==
1302                    nir_if_first_else_block(if_stmt));
1303          } else if (next->type == nir_cf_node_loop) {
1304             nir_loop *loop = nir_cf_node_as_loop(next);
1305             validate_assert(state, block->successors[0] ==
1306                    nir_loop_first_block(loop));
1307             validate_assert(state, block->successors[1] == NULL);
1308          } else {
1309             validate_assert(state,
1310                !"Structured NIR cannot have consecutive blocks");
1311          }
1312       }
1313    }
1314 }
1315 
1316 
1317 static void
validate_end_block(nir_block * block,validate_state * state)1318 validate_end_block(nir_block *block, validate_state *state)
1319 {
1320    validate_assert(state, block->cf_node.parent == &state->impl->cf_node);
1321 
1322    exec_list_validate(&block->instr_list);
1323    validate_assert(state, exec_list_is_empty(&block->instr_list));
1324 
1325    validate_assert(state, block->successors[0] == NULL);
1326    validate_assert(state, block->successors[1] == NULL);
1327    validate_block_predecessors(block, state);
1328 }
1329 
1330 static void
validate_if(nir_if * if_stmt,validate_state * state)1331 validate_if(nir_if *if_stmt, validate_state *state)
1332 {
1333    validate_assert(state, state->impl->structured);
1334 
1335    state->if_stmt = if_stmt;
1336 
1337    validate_assert(state, !exec_node_is_head_sentinel(if_stmt->cf_node.node.prev));
1338    nir_cf_node *prev_node = nir_cf_node_prev(&if_stmt->cf_node);
1339    validate_assert(state, prev_node->type == nir_cf_node_block);
1340 
1341    validate_assert(state, !exec_node_is_tail_sentinel(if_stmt->cf_node.node.next));
1342    nir_cf_node *next_node = nir_cf_node_next(&if_stmt->cf_node);
1343    validate_assert(state, next_node->type == nir_cf_node_block);
1344 
1345    validate_src(&if_stmt->condition, state, 0, 1);
1346 
1347    validate_assert(state, !exec_list_is_empty(&if_stmt->then_list));
1348    validate_assert(state, !exec_list_is_empty(&if_stmt->else_list));
1349 
1350    nir_cf_node *old_parent = state->parent_node;
1351    state->parent_node = &if_stmt->cf_node;
1352 
1353    foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->then_list) {
1354       validate_cf_node(cf_node, state);
1355    }
1356 
1357    foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->else_list) {
1358       validate_cf_node(cf_node, state);
1359    }
1360 
1361    state->parent_node = old_parent;
1362    state->if_stmt = NULL;
1363 }
1364 
1365 static void
validate_loop(nir_loop * loop,validate_state * state)1366 validate_loop(nir_loop *loop, validate_state *state)
1367 {
1368    validate_assert(state, state->impl->structured);
1369 
1370    validate_assert(state, !exec_node_is_head_sentinel(loop->cf_node.node.prev));
1371    nir_cf_node *prev_node = nir_cf_node_prev(&loop->cf_node);
1372    validate_assert(state, prev_node->type == nir_cf_node_block);
1373 
1374    validate_assert(state, !exec_node_is_tail_sentinel(loop->cf_node.node.next));
1375    nir_cf_node *next_node = nir_cf_node_next(&loop->cf_node);
1376    validate_assert(state, next_node->type == nir_cf_node_block);
1377 
1378    validate_assert(state, !exec_list_is_empty(&loop->body));
1379 
1380    nir_cf_node *old_parent = state->parent_node;
1381    state->parent_node = &loop->cf_node;
1382    nir_loop *old_loop = state->loop;
1383    state->loop = loop;
1384 
1385    foreach_list_typed(nir_cf_node, cf_node, node, &loop->body) {
1386       validate_cf_node(cf_node, state);
1387    }
1388 
1389    state->parent_node = old_parent;
1390    state->loop = old_loop;
1391 }
1392 
1393 static void
validate_cf_node(nir_cf_node * node,validate_state * state)1394 validate_cf_node(nir_cf_node *node, validate_state *state)
1395 {
1396    validate_assert(state, node->parent == state->parent_node);
1397 
1398    switch (node->type) {
1399    case nir_cf_node_block:
1400       validate_block(nir_cf_node_as_block(node), state);
1401       break;
1402 
1403    case nir_cf_node_if:
1404       validate_if(nir_cf_node_as_if(node), state);
1405       break;
1406 
1407    case nir_cf_node_loop:
1408       validate_loop(nir_cf_node_as_loop(node), state);
1409       break;
1410 
1411    default:
1412       unreachable("Invalid CF node type");
1413    }
1414 }
1415 
1416 static void
prevalidate_reg_decl(nir_register * reg,validate_state * state)1417 prevalidate_reg_decl(nir_register *reg, validate_state *state)
1418 {
1419    validate_assert(state, reg->index < state->impl->reg_alloc);
1420    validate_assert(state, !BITSET_TEST(state->regs_found, reg->index));
1421    validate_num_components(state, reg->num_components);
1422    BITSET_SET(state->regs_found, reg->index);
1423 
1424    list_validate(&reg->uses);
1425    list_validate(&reg->defs);
1426    list_validate(&reg->if_uses);
1427 
1428    reg_validate_state *reg_state = ralloc(state->regs, reg_validate_state);
1429    reg_state->uses = _mesa_pointer_set_create(reg_state);
1430    reg_state->if_uses = _mesa_pointer_set_create(reg_state);
1431    reg_state->defs = _mesa_pointer_set_create(reg_state);
1432 
1433    reg_state->where_defined = state->impl;
1434 
1435    _mesa_hash_table_insert(state->regs, reg, reg_state);
1436 }
1437 
1438 static void
postvalidate_reg_decl(nir_register * reg,validate_state * state)1439 postvalidate_reg_decl(nir_register *reg, validate_state *state)
1440 {
1441    struct hash_entry *entry = _mesa_hash_table_search(state->regs, reg);
1442 
1443    assume(entry);
1444    reg_validate_state *reg_state = (reg_validate_state *) entry->data;
1445 
1446    nir_foreach_use(src, reg) {
1447       struct set_entry *entry = _mesa_set_search(reg_state->uses, src);
1448       validate_assert(state, entry);
1449       _mesa_set_remove(reg_state->uses, entry);
1450    }
1451    validate_assert(state, reg_state->uses->entries == 0);
1452 
1453    nir_foreach_if_use(src, reg) {
1454       struct set_entry *entry = _mesa_set_search(reg_state->if_uses, src);
1455       validate_assert(state, entry);
1456       _mesa_set_remove(reg_state->if_uses, entry);
1457    }
1458    validate_assert(state, reg_state->if_uses->entries == 0);
1459 
1460    nir_foreach_def(src, reg) {
1461       struct set_entry *entry = _mesa_set_search(reg_state->defs, src);
1462       validate_assert(state, entry);
1463       _mesa_set_remove(reg_state->defs, entry);
1464    }
1465    validate_assert(state, reg_state->defs->entries == 0);
1466 }
1467 
1468 static void
validate_constant(nir_constant * c,const struct glsl_type * type,validate_state * state)1469 validate_constant(nir_constant *c, const struct glsl_type *type,
1470                   validate_state *state)
1471 {
1472    if (glsl_type_is_vector_or_scalar(type)) {
1473       unsigned num_components = glsl_get_vector_elements(type);
1474       unsigned bit_size = glsl_get_bit_size(type);
1475       for (unsigned i = 0; i < num_components; i++)
1476          validate_const_value(&c->values[i], bit_size, state);
1477       for (unsigned i = num_components; i < NIR_MAX_VEC_COMPONENTS; i++)
1478          validate_assert(state, c->values[i].u64 == 0);
1479    } else {
1480       validate_assert(state, c->num_elements == glsl_get_length(type));
1481       if (glsl_type_is_struct_or_ifc(type)) {
1482          for (unsigned i = 0; i < c->num_elements; i++) {
1483             const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
1484             validate_constant(c->elements[i], elem_type, state);
1485          }
1486       } else if (glsl_type_is_array_or_matrix(type)) {
1487          const struct glsl_type *elem_type = glsl_get_array_element(type);
1488          for (unsigned i = 0; i < c->num_elements; i++)
1489             validate_constant(c->elements[i], elem_type, state);
1490       } else {
1491          validate_assert(state, !"Invalid type for nir_constant");
1492       }
1493    }
1494 }
1495 
1496 static void
validate_var_decl(nir_variable * var,nir_variable_mode valid_modes,validate_state * state)1497 validate_var_decl(nir_variable *var, nir_variable_mode valid_modes,
1498                   validate_state *state)
1499 {
1500    state->var = var;
1501 
1502    /* Must have exactly one mode set */
1503    validate_assert(state, util_is_power_of_two_nonzero(var->data.mode));
1504    validate_assert(state, var->data.mode & valid_modes);
1505 
1506    if (var->data.compact) {
1507       /* The "compact" flag is only valid on arrays of scalars. */
1508       assert(glsl_type_is_array(var->type));
1509 
1510       const struct glsl_type *type = glsl_get_array_element(var->type);
1511       if (nir_is_arrayed_io(var, state->shader->info.stage)) {
1512          if (var->data.per_view) {
1513             assert(glsl_type_is_array(type));
1514             type = glsl_get_array_element(type);
1515          }
1516          assert(glsl_type_is_array(type));
1517          assert(glsl_type_is_scalar(glsl_get_array_element(type)));
1518       } else {
1519          assert(glsl_type_is_scalar(type));
1520       }
1521    }
1522 
1523    if (var->num_members > 0) {
1524       const struct glsl_type *without_array = glsl_without_array(var->type);
1525       validate_assert(state, glsl_type_is_struct_or_ifc(without_array));
1526       validate_assert(state, var->num_members == glsl_get_length(without_array));
1527       validate_assert(state, var->members != NULL);
1528    }
1529 
1530    if (var->data.per_view)
1531       validate_assert(state, glsl_type_is_array(var->type));
1532 
1533    if (var->constant_initializer)
1534       validate_constant(var->constant_initializer, var->type, state);
1535 
1536    if (var->data.mode == nir_var_image) {
1537       validate_assert(state, !var->data.bindless);
1538       validate_assert(state, glsl_type_is_image(glsl_without_array(var->type)));
1539    }
1540 
1541    /*
1542     * TODO validate some things ir_validate.cpp does (requires more GLSL type
1543     * support)
1544     */
1545 
1546    _mesa_hash_table_insert(state->var_defs, var,
1547                            valid_modes == nir_var_function_temp ?
1548                            state->impl : NULL);
1549 
1550    state->var = NULL;
1551 }
1552 
1553 static bool
validate_ssa_def_dominance(nir_ssa_def * def,void * _state)1554 validate_ssa_def_dominance(nir_ssa_def *def, void *_state)
1555 {
1556    validate_state *state = _state;
1557 
1558    validate_assert(state, def->index < state->impl->ssa_alloc);
1559    validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
1560    BITSET_SET(state->ssa_defs_found, def->index);
1561 
1562    return true;
1563 }
1564 
1565 static bool
validate_src_dominance(nir_src * src,void * _state)1566 validate_src_dominance(nir_src *src, void *_state)
1567 {
1568    validate_state *state = _state;
1569    if (!src->is_ssa)
1570       return true;
1571 
1572    if (src->ssa->parent_instr->block == src->parent_instr->block) {
1573       validate_assert(state, src->ssa->index < state->impl->ssa_alloc);
1574       validate_assert(state, BITSET_TEST(state->ssa_defs_found,
1575                                          src->ssa->index));
1576    } else {
1577       validate_assert(state, nir_block_dominates(src->ssa->parent_instr->block,
1578                                                  src->parent_instr->block));
1579    }
1580    return true;
1581 }
1582 
1583 static void
validate_ssa_dominance(nir_function_impl * impl,validate_state * state)1584 validate_ssa_dominance(nir_function_impl *impl, validate_state *state)
1585 {
1586    nir_metadata_require(impl, nir_metadata_dominance);
1587 
1588    nir_foreach_block(block, impl) {
1589       state->block = block;
1590       nir_foreach_instr(instr, block) {
1591          state->instr = instr;
1592          if (instr->type == nir_instr_type_phi) {
1593             nir_phi_instr *phi = nir_instr_as_phi(instr);
1594             nir_foreach_phi_src(src, phi) {
1595                validate_assert(state,
1596                   nir_block_dominates(src->src.ssa->parent_instr->block,
1597                                       src->pred));
1598             }
1599          } else {
1600             nir_foreach_src(instr, validate_src_dominance, state);
1601          }
1602          nir_foreach_ssa_def(instr, validate_ssa_def_dominance, state);
1603       }
1604    }
1605 }
1606 
1607 static void
validate_function_impl(nir_function_impl * impl,validate_state * state)1608 validate_function_impl(nir_function_impl *impl, validate_state *state)
1609 {
1610    /* Resize the ssa_srcs set.  It's likely that the size of this set will
1611     * never actually hit the number of SSA defs because we remove sources from
1612     * the set as we visit them.  (It could actually be much larger because
1613     * each SSA def can be used more than once.)  However, growing it now costs
1614     * us very little (the extra memory is already dwarfed by the SSA defs
1615     * themselves) and makes collisions much less likely.
1616     */
1617    _mesa_set_resize(state->ssa_srcs, impl->ssa_alloc);
1618 
1619    validate_assert(state, impl->function->impl == impl);
1620    validate_assert(state, impl->cf_node.parent == NULL);
1621 
1622    if (impl->preamble) {
1623       validate_assert(state, impl->function->is_entrypoint);
1624       validate_assert(state, impl->preamble->is_preamble);
1625    }
1626 
1627    validate_assert(state, exec_list_is_empty(&impl->end_block->instr_list));
1628    validate_assert(state, impl->end_block->successors[0] == NULL);
1629    validate_assert(state, impl->end_block->successors[1] == NULL);
1630 
1631    state->impl = impl;
1632    state->parent_node = &impl->cf_node;
1633 
1634    exec_list_validate(&impl->locals);
1635    nir_foreach_function_temp_variable(var, impl) {
1636       validate_var_decl(var, nir_var_function_temp, state);
1637    }
1638 
1639    state->regs_found = reralloc(state->mem_ctx, state->regs_found,
1640                                 BITSET_WORD, BITSET_WORDS(impl->reg_alloc));
1641    memset(state->regs_found, 0, BITSET_WORDS(impl->reg_alloc) *
1642                                 sizeof(BITSET_WORD));
1643    exec_list_validate(&impl->registers);
1644    foreach_list_typed(nir_register, reg, node, &impl->registers) {
1645       prevalidate_reg_decl(reg, state);
1646    }
1647 
1648    state->ssa_defs_found = reralloc(state->mem_ctx, state->ssa_defs_found,
1649                                     BITSET_WORD, BITSET_WORDS(impl->ssa_alloc));
1650    memset(state->ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) *
1651                                     sizeof(BITSET_WORD));
1652 
1653    _mesa_set_clear(state->blocks, NULL);
1654    _mesa_set_resize(state->blocks, impl->num_blocks);
1655    collect_blocks(&impl->body, state);
1656    _mesa_set_add(state->blocks, impl->end_block);
1657    validate_assert(state, !exec_list_is_empty(&impl->body));
1658    foreach_list_typed(nir_cf_node, node, node, &impl->body) {
1659       validate_cf_node(node, state);
1660    }
1661    validate_end_block(impl->end_block, state);
1662 
1663    foreach_list_typed(nir_register, reg, node, &impl->registers) {
1664       postvalidate_reg_decl(reg, state);
1665    }
1666 
1667    validate_assert(state, state->ssa_srcs->entries == 0);
1668    _mesa_set_clear(state->ssa_srcs, NULL);
1669 
1670    static int validate_dominance = -1;
1671    if (validate_dominance < 0) {
1672       validate_dominance =
1673          NIR_DEBUG(VALIDATE_SSA_DOMINANCE);
1674    }
1675    if (validate_dominance)
1676       validate_ssa_dominance(impl, state);
1677 }
1678 
1679 static void
validate_function(nir_function * func,validate_state * state)1680 validate_function(nir_function *func, validate_state *state)
1681 {
1682    if (func->impl != NULL) {
1683       validate_assert(state, func->impl->function == func);
1684       validate_function_impl(func->impl, state);
1685    }
1686 }
1687 
1688 static void
init_validate_state(validate_state * state)1689 init_validate_state(validate_state *state)
1690 {
1691    state->mem_ctx = ralloc_context(NULL);
1692    state->regs = _mesa_pointer_hash_table_create(state->mem_ctx);
1693    state->ssa_srcs = _mesa_pointer_set_create(state->mem_ctx);
1694    state->ssa_defs_found = NULL;
1695    state->regs_found = NULL;
1696    state->blocks = _mesa_pointer_set_create(state->mem_ctx);
1697    state->var_defs = _mesa_pointer_hash_table_create(state->mem_ctx);
1698    state->errors = _mesa_pointer_hash_table_create(state->mem_ctx);
1699    state->shader_gc_list = NIR_DEBUG(VALIDATE_GC_LIST) ?
1700                            _mesa_pointer_set_create(state->mem_ctx) : NULL;
1701 
1702    state->loop = NULL;
1703    state->instr = NULL;
1704    state->var = NULL;
1705 }
1706 
1707 static void
destroy_validate_state(validate_state * state)1708 destroy_validate_state(validate_state *state)
1709 {
1710    ralloc_free(state->mem_ctx);
1711 }
1712 
1713 mtx_t fail_dump_mutex = _MTX_INITIALIZER_NP;
1714 
1715 static void
dump_errors(validate_state * state,const char * when)1716 dump_errors(validate_state *state, const char *when)
1717 {
1718    struct hash_table *errors = state->errors;
1719 
1720    /* Lock around dumping so that we get clean dumps in a multi-threaded
1721     * scenario
1722     */
1723    mtx_lock(&fail_dump_mutex);
1724 
1725    if (when) {
1726       fprintf(stderr, "NIR validation failed %s\n", when);
1727       fprintf(stderr, "%d errors:\n", _mesa_hash_table_num_entries(errors));
1728    } else {
1729       fprintf(stderr, "NIR validation failed with %d errors:\n",
1730               _mesa_hash_table_num_entries(errors));
1731    }
1732 
1733    nir_print_shader_annotated(state->shader, stderr, errors);
1734 
1735    if (_mesa_hash_table_num_entries(errors) > 0) {
1736       fprintf(stderr, "%d additional errors:\n",
1737               _mesa_hash_table_num_entries(errors));
1738       hash_table_foreach(errors, entry) {
1739          fprintf(stderr, "%s\n", (char *)entry->data);
1740       }
1741    }
1742 
1743    mtx_unlock(&fail_dump_mutex);
1744 
1745    abort();
1746 }
1747 
1748 void
nir_validate_shader(nir_shader * shader,const char * when)1749 nir_validate_shader(nir_shader *shader, const char *when)
1750 {
1751    if (NIR_DEBUG(NOVALIDATE))
1752       return;
1753 
1754    validate_state state;
1755    init_validate_state(&state);
1756 
1757    if (state.shader_gc_list) {
1758       list_for_each_entry(nir_instr, instr, &shader->gc_list, gc_node) {
1759          if (instr->node.prev || instr->node.next)
1760             _mesa_set_add(state.shader_gc_list, instr);
1761       }
1762    }
1763 
1764    state.shader = shader;
1765 
1766    nir_variable_mode valid_modes =
1767       nir_var_shader_in |
1768       nir_var_shader_out |
1769       nir_var_shader_temp |
1770       nir_var_uniform |
1771       nir_var_mem_ubo |
1772       nir_var_system_value |
1773       nir_var_mem_ssbo |
1774       nir_var_mem_shared |
1775       nir_var_mem_global |
1776       nir_var_mem_push_const |
1777       nir_var_mem_constant |
1778       nir_var_image;
1779 
1780    if (gl_shader_stage_is_callable(shader->info.stage))
1781       valid_modes |= nir_var_shader_call_data;
1782 
1783    if (shader->info.stage == MESA_SHADER_ANY_HIT ||
1784        shader->info.stage == MESA_SHADER_CLOSEST_HIT ||
1785        shader->info.stage == MESA_SHADER_INTERSECTION)
1786       valid_modes |= nir_var_ray_hit_attrib;
1787 
1788    if (shader->info.stage == MESA_SHADER_TASK ||
1789        shader->info.stage == MESA_SHADER_MESH)
1790       valid_modes |= nir_var_mem_task_payload;
1791 
1792    exec_list_validate(&shader->variables);
1793    nir_foreach_variable_in_shader(var, shader)
1794      validate_var_decl(var, valid_modes, &state);
1795 
1796    exec_list_validate(&shader->functions);
1797    foreach_list_typed(nir_function, func, node, &shader->functions) {
1798       validate_function(func, &state);
1799    }
1800 
1801    if (_mesa_hash_table_num_entries(state.errors) > 0)
1802       dump_errors(&state, when);
1803 
1804    destroy_validate_state(&state);
1805 }
1806 
1807 void
nir_validate_ssa_dominance(nir_shader * shader,const char * when)1808 nir_validate_ssa_dominance(nir_shader *shader, const char *when)
1809 {
1810    if (NIR_DEBUG(NOVALIDATE))
1811       return;
1812 
1813    validate_state state;
1814    init_validate_state(&state);
1815 
1816    state.shader = shader;
1817 
1818    nir_foreach_function(func, shader) {
1819       if (func->impl == NULL)
1820          continue;
1821 
1822       state.ssa_defs_found = reralloc(state.mem_ctx, state.ssa_defs_found,
1823                                       BITSET_WORD,
1824                                       BITSET_WORDS(func->impl->ssa_alloc));
1825       memset(state.ssa_defs_found, 0, BITSET_WORDS(func->impl->ssa_alloc) *
1826                                       sizeof(BITSET_WORD));
1827 
1828       state.impl = func->impl;
1829       validate_ssa_dominance(func->impl, &state);
1830    }
1831 
1832    if (_mesa_hash_table_num_entries(state.errors) > 0)
1833       dump_errors(&state, when);
1834 
1835    destroy_validate_state(&state);
1836 }
1837 
1838 #endif /* NDEBUG */
1839