1 /*
2  * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #ifndef IR3_SHADER_H_
28 #define IR3_SHADER_H_
29 
30 #include <stdio.h>
31 
32 #include "c11/threads.h"
33 #include "compiler/nir/nir.h"
34 #include "compiler/shader_enums.h"
35 #include "util/bitscan.h"
36 #include "util/disk_cache.h"
37 
38 #include "ir3_compiler.h"
39 
40 struct glsl_type;
41 
42 /* driver param indices: */
43 enum ir3_driver_param {
44    /* compute shader driver params: */
45    IR3_DP_NUM_WORK_GROUPS_X = 0,
46    IR3_DP_NUM_WORK_GROUPS_Y = 1,
47    IR3_DP_NUM_WORK_GROUPS_Z = 2,
48    IR3_DP_WORK_DIM          = 3,
49    IR3_DP_BASE_GROUP_X = 4,
50    IR3_DP_BASE_GROUP_Y = 5,
51    IR3_DP_BASE_GROUP_Z = 6,
52    IR3_DP_CS_SUBGROUP_SIZE = 7,
53    IR3_DP_LOCAL_GROUP_SIZE_X = 8,
54    IR3_DP_LOCAL_GROUP_SIZE_Y = 9,
55    IR3_DP_LOCAL_GROUP_SIZE_Z = 10,
56    IR3_DP_SUBGROUP_ID_SHIFT = 11,
57    IR3_DP_WORKGROUP_ID_X = 12,
58    IR3_DP_WORKGROUP_ID_Y = 13,
59    IR3_DP_WORKGROUP_ID_Z = 14,
60    /* NOTE: gl_NumWorkGroups should be vec4 aligned because
61     * glDispatchComputeIndirect() needs to load these from
62     * the info->indirect buffer.  Keep that in mind when/if
63     * adding any addition CS driver params.
64     */
65    IR3_DP_CS_COUNT = 16, /* must be aligned to vec4 */
66 
67    /* vertex shader driver params: */
68    IR3_DP_DRAWID = 0,
69    IR3_DP_VTXID_BASE = 1,
70    IR3_DP_INSTID_BASE = 2,
71    IR3_DP_VTXCNT_MAX = 3,
72    /* user-clip-plane components, up to 8x vec4's: */
73    IR3_DP_UCP0_X = 4,
74    /* .... */
75    IR3_DP_UCP7_W = 35,
76    IR3_DP_VS_COUNT = 36, /* must be aligned to vec4 */
77 
78    /* fragment shader driver params: */
79    IR3_DP_FS_SUBGROUP_SIZE = 0,
80 };
81 
82 #define IR3_MAX_SHADER_BUFFERS  32
83 #define IR3_MAX_SHADER_IMAGES   32
84 #define IR3_MAX_SO_BUFFERS      4
85 #define IR3_MAX_SO_STREAMS      4
86 #define IR3_MAX_SO_OUTPUTS      64
87 #define IR3_MAX_UBO_PUSH_RANGES 32
88 
89 /* mirrors SYSTEM_VALUE_BARYCENTRIC_ but starting from 0 */
90 enum ir3_bary {
91    IJ_PERSP_PIXEL,
92    IJ_PERSP_SAMPLE,
93    IJ_PERSP_CENTROID,
94    IJ_PERSP_SIZE,
95    IJ_LINEAR_PIXEL,
96    IJ_LINEAR_CENTROID,
97    IJ_LINEAR_SAMPLE,
98    IJ_COUNT,
99 };
100 
101 /* Description of what wavesizes are allowed. */
102 enum ir3_wavesize_option {
103    IR3_SINGLE_ONLY,
104    IR3_SINGLE_OR_DOUBLE,
105    IR3_DOUBLE_ONLY,
106 };
107 
108 /**
109  * Description of a lowered UBO.
110  */
111 struct ir3_ubo_info {
112    uint32_t block;         /* Which constant block */
113    uint16_t bindless_base; /* For bindless, which base register is used */
114    bool bindless;
115 };
116 
117 /**
118  * Description of a range of a lowered UBO access.
119  *
120  * Drivers should not assume that there are not multiple disjoint
121  * lowered ranges of a single UBO.
122  */
123 struct ir3_ubo_range {
124    struct ir3_ubo_info ubo;
125    uint32_t offset;     /* start offset to push in the const register file */
126    uint32_t start, end; /* range of block that's actually used */
127 };
128 
129 struct ir3_ubo_analysis_state {
130    struct ir3_ubo_range range[IR3_MAX_UBO_PUSH_RANGES];
131    uint32_t num_enabled;
132    uint32_t size;
133    uint32_t
134       cmdstream_size; /* for per-gen backend to stash required cmdstream size */
135 };
136 
137 /**
138  * Describes the layout of shader consts.  This includes:
139  *   + User consts + driver lowered UBO ranges
140  *   + SSBO sizes
141  *   + Image sizes/dimensions
142  *   + Driver params (ie. IR3_DP_*)
143  *   + TFBO addresses (for generations that do not have hardware streamout)
144  *   + Lowered immediates
145  *
146  * For consts needed to pass internal values to shader which may or may not
147  * be required, rather than allocating worst-case const space, we scan the
148  * shader and allocate consts as-needed:
149  *
150  *   + SSBO sizes: only needed if shader has a get_ssbo_size intrinsic
151  *     for a given SSBO
152  *
153  *   + Image dimensions: needed to calculate pixel offset, but only for
154  *     images that have a image_store intrinsic
155  *
156  * Layout of constant registers, each section aligned to vec4.  Note
157  * that pointer size (ubo, etc) changes depending on generation.
158  *
159  *    user consts
160  *    preamble consts
161  *    UBO addresses
162  *    SSBO sizes
163  *    image dimensions
164  *    if (vertex shader) {
165  *        driver params (IR3_DP_VS_COUNT)
166  *        if (stream_output.num_outputs > 0)
167  *           stream-out addresses
168  *    } else if (compute_shader) {
169  *        kernel params
170  *        driver params (IR3_DP_CS_COUNT)
171  *    }
172  *    immediates
173  *
174  * Immediates go last mostly because they are inserted in the CP pass
175  * after the nir -> ir3 frontend.
176  *
177  * Note UBO size in bytes should be aligned to vec4
178  */
179 struct ir3_const_state {
180    unsigned num_ubos;
181    unsigned num_driver_params; /* scalar */
182 
183    /* UBO that should be mapped to the NIR shader's constant_data (or -1). */
184    int32_t constant_data_ubo;
185 
186    struct {
187       /* user const start at zero */
188       unsigned ubo;
189       unsigned image_dims;
190       unsigned kernel_params;
191       unsigned driver_param;
192       unsigned tfbo;
193       unsigned primitive_param;
194       unsigned primitive_map;
195       unsigned immediate;
196    } offsets;
197 
198    struct {
199       uint32_t mask;  /* bitmask of images that have image_store */
200       uint32_t count; /* number of consts allocated */
201       /* three const allocated per image which has image_store:
202        *  + cpp         (bytes per pixel)
203        *  + pitch       (y pitch)
204        *  + array_pitch (z pitch)
205        */
206       uint32_t off[IR3_MAX_SHADER_IMAGES];
207    } image_dims;
208 
209    unsigned immediates_count;
210    unsigned immediates_size;
211    uint32_t *immediates;
212 
213    unsigned preamble_size;
214 
215    /* State of ubo access lowered to push consts: */
216    struct ir3_ubo_analysis_state ubo_state;
217 };
218 
219 /**
220  * A single output for vertex transform feedback.
221  */
222 struct ir3_stream_output {
223    unsigned register_index  : 6;  /**< 0 to 63 (OUT index) */
224    unsigned start_component : 2;  /** 0 to 3 */
225    unsigned num_components  : 3;  /** 1 to 4 */
226    unsigned output_buffer   : 3;  /**< 0 to PIPE_MAX_SO_BUFFERS */
227    unsigned dst_offset      : 16; /**< offset into the buffer in dwords */
228    unsigned stream          : 2;  /**< 0 to 3 */
229 };
230 
231 /**
232  * Stream output for vertex transform feedback.
233  */
234 struct ir3_stream_output_info {
235    unsigned num_outputs;
236    /** stride for an entire vertex for each buffer in dwords */
237    uint16_t stride[IR3_MAX_SO_BUFFERS];
238 
239    /* These correspond to the VPC_SO_STREAM_CNTL fields */
240    uint8_t streams_written;
241    uint8_t buffer_to_stream[IR3_MAX_SO_BUFFERS];
242 
243    /**
244     * Array of stream outputs, in the order they are to be written in.
245     * Selected components are tightly packed into the output buffer.
246     */
247    struct ir3_stream_output output[IR3_MAX_SO_OUTPUTS];
248 };
249 
250 /**
251  * Starting from a4xx, HW supports pre-dispatching texture sampling
252  * instructions prior to scheduling a shader stage, when the
253  * coordinate maps exactly to an output of the previous stage.
254  */
255 
256 /**
257  * There is a limit in the number of pre-dispatches allowed for any
258  * given stage.
259  */
260 #define IR3_MAX_SAMPLER_PREFETCH 4
261 
262 /**
263  * This is the output stream value for 'cmd', as used by blob. It may
264  * encode the return type (in 3 bits) but it hasn't been verified yet.
265  */
266 #define IR3_SAMPLER_PREFETCH_CMD          0x4
267 #define IR3_SAMPLER_BINDLESS_PREFETCH_CMD 0x6
268 
269 /**
270  * Stream output for texture sampling pre-dispatches.
271  */
272 struct ir3_sampler_prefetch {
273    uint8_t src;
274    uint8_t samp_id;
275    uint8_t tex_id;
276    uint16_t samp_bindless_id;
277    uint16_t tex_bindless_id;
278    uint8_t dst;
279    uint8_t wrmask;
280    uint8_t half_precision;
281    uint8_t cmd;
282 };
283 
284 /* Configuration key used to identify a shader variant.. different
285  * shader variants can be used to implement features not supported
286  * in hw (two sided color), binning-pass vertex shader, etc.
287  *
288  * When adding to this struct, please update ir3_shader_variant()'s debug
289  * output.
290  */
291 struct ir3_shader_key {
292    union {
293       struct {
294          /*
295           * Combined Vertex/Fragment shader parameters:
296           */
297          unsigned ucp_enables : 8;
298 
299          /* do we need to check {v,f}saturate_{s,t,r}? */
300          unsigned has_per_samp : 1;
301 
302          /*
303           * Fragment shader variant parameters:
304           */
305          unsigned sample_shading : 1;
306          unsigned msaa           : 1;
307          /* used when shader needs to handle flat varyings (a4xx)
308           * for front/back color inputs to frag shader:
309           */
310          unsigned rasterflat : 1;
311 
312          /* Indicates that this is a tessellation pipeline which requires a
313           * whole different kind of vertex shader.  In case of
314           * tessellation, this field also tells us which kind of output
315           * topology the TES uses, which the TCS needs to know.
316           */
317 #define IR3_TESS_NONE      0
318 #define IR3_TESS_QUADS     1
319 #define IR3_TESS_TRIANGLES 2
320 #define IR3_TESS_ISOLINES  3
321          unsigned tessellation : 2;
322 
323          unsigned has_gs : 1;
324 
325          /* Whether stages after TCS read gl_PrimitiveID, used to determine
326           * whether the TCS has to store it in the tess factor BO.
327           */
328          unsigned tcs_store_primid : 1;
329 
330          /* Whether this variant sticks to the "safe" maximum constlen,
331           * which guarantees that the combined stages will never go over
332           * the limit:
333           */
334          unsigned safe_constlen : 1;
335 
336          /* Whether gl_Layer must be forced to 0 because it isn't written. */
337          unsigned layer_zero : 1;
338 
339          /* Whether gl_ViewportIndex must be forced to 0 because it isn't
340           * written. */
341          unsigned view_zero : 1;
342       };
343       uint32_t global;
344    };
345 
346    /* bitmask of ms shifts (a3xx) */
347    uint32_t vsamples, fsamples;
348 
349    /* bitmask of samplers which need astc srgb workaround (a4xx): */
350    uint16_t vastc_srgb, fastc_srgb;
351 
352    /* per-component (3-bit) swizzles of each sampler (a4xx tg4): */
353    uint16_t vsampler_swizzles[16];
354    uint16_t fsampler_swizzles[16];
355 };
356 
357 static inline unsigned
ir3_tess_mode(enum tess_primitive_mode tess_mode)358 ir3_tess_mode(enum tess_primitive_mode tess_mode)
359 {
360    switch (tess_mode) {
361    case TESS_PRIMITIVE_ISOLINES:
362       return IR3_TESS_ISOLINES;
363    case TESS_PRIMITIVE_TRIANGLES:
364       return IR3_TESS_TRIANGLES;
365    case TESS_PRIMITIVE_QUADS:
366       return IR3_TESS_QUADS;
367    default:
368       unreachable("bad tessmode");
369    }
370 }
371 
372 static inline uint32_t
ir3_tess_factor_stride(unsigned patch_type)373 ir3_tess_factor_stride(unsigned patch_type)
374 {
375    /* note: this matches the stride used by ir3's build_tessfactor_base */
376    switch (patch_type) {
377    case IR3_TESS_ISOLINES:
378       return 12;
379    case IR3_TESS_TRIANGLES:
380       return 20;
381    case IR3_TESS_QUADS:
382       return 28;
383    default:
384       unreachable("bad tessmode");
385    }
386 }
387 
388 static inline bool
ir3_shader_key_equal(const struct ir3_shader_key * a,const struct ir3_shader_key * b)389 ir3_shader_key_equal(const struct ir3_shader_key *a,
390                      const struct ir3_shader_key *b)
391 {
392    /* slow-path if we need to check {v,f}saturate_{s,t,r} */
393    if (a->has_per_samp || b->has_per_samp)
394       return memcmp(a, b, sizeof(struct ir3_shader_key)) == 0;
395    return a->global == b->global;
396 }
397 
398 /* will the two keys produce different lowering for a fragment shader? */
399 static inline bool
ir3_shader_key_changes_fs(struct ir3_shader_key * key,struct ir3_shader_key * last_key)400 ir3_shader_key_changes_fs(struct ir3_shader_key *key,
401                           struct ir3_shader_key *last_key)
402 {
403    if (last_key->has_per_samp || key->has_per_samp) {
404       if ((last_key->fsamples != key->fsamples) ||
405           (last_key->fastc_srgb != key->fastc_srgb) ||
406           memcmp(last_key->fsampler_swizzles, key->fsampler_swizzles,
407                 sizeof(key->fsampler_swizzles)))
408          return true;
409    }
410 
411    if (last_key->rasterflat != key->rasterflat)
412       return true;
413 
414    if (last_key->layer_zero != key->layer_zero)
415       return true;
416 
417    if (last_key->ucp_enables != key->ucp_enables)
418       return true;
419 
420    if (last_key->safe_constlen != key->safe_constlen)
421       return true;
422 
423    return false;
424 }
425 
426 /* will the two keys produce different lowering for a vertex shader? */
427 static inline bool
ir3_shader_key_changes_vs(struct ir3_shader_key * key,struct ir3_shader_key * last_key)428 ir3_shader_key_changes_vs(struct ir3_shader_key *key,
429                           struct ir3_shader_key *last_key)
430 {
431    if (last_key->has_per_samp || key->has_per_samp) {
432       if ((last_key->vsamples != key->vsamples) ||
433           (last_key->vastc_srgb != key->vastc_srgb) ||
434           memcmp(last_key->vsampler_swizzles, key->vsampler_swizzles,
435                 sizeof(key->vsampler_swizzles)))
436          return true;
437    }
438 
439    if (last_key->ucp_enables != key->ucp_enables)
440       return true;
441 
442    if (last_key->safe_constlen != key->safe_constlen)
443       return true;
444 
445    return false;
446 }
447 
448 /**
449  * On a4xx+a5xx, Images share state with textures and SSBOs:
450  *
451  *   + Uses texture (cat5) state/instruction (isam) to read
452  *   + Uses SSBO state and instructions (cat6) to write and for atomics
453  *
454  * Starting with a6xx, Images and SSBOs are basically the same thing,
455  * with texture state and isam also used for SSBO reads.
456  *
457  * On top of that, gallium makes the SSBO (shader_buffers) state semi
458  * sparse, with the first half of the state space used for atomic
459  * counters lowered to atomic buffers.  We could ignore this, but I
460  * don't think we could *really* handle the case of a single shader
461  * that used the max # of textures + images + SSBOs.  And once we are
462  * offsetting images by num_ssbos (or visa versa) to map them into
463  * the same hardware state, the hardware state has become coupled to
464  * the shader state, so at this point we might as well just use a
465  * mapping table to remap things from image/SSBO idx to hw idx.
466  *
467  * To make things less (more?) confusing, for the hw "SSBO" state
468  * (since it is really both SSBO and Image) I'll use the name "IBO"
469  */
470 struct ir3_ibo_mapping {
471 #define IBO_INVALID 0xff
472    /* Maps logical SSBO state to hw tex state: */
473    uint8_t ssbo_to_tex[IR3_MAX_SHADER_BUFFERS];
474 
475    /* Maps logical Image state to hw tex state: */
476    uint8_t image_to_tex[IR3_MAX_SHADER_IMAGES];
477 
478    /* Maps hw state back to logical SSBO or Image state:
479     *
480     * note IBO_SSBO ORd into values to indicate that the
481     * hw slot is used for SSBO state vs Image state.
482     */
483 #define IBO_SSBO 0x80
484    uint8_t tex_to_image[32];
485 
486    /* including real textures */
487    uint8_t num_tex;
488    /* the number of real textures, ie. image/ssbo start here */
489    uint8_t tex_base;
490 };
491 
492 struct ir3_disasm_info {
493    bool write_disasm;
494    char *nir;
495    char *disasm;
496 };
497 
498 /* Represents half register in regid */
499 #define HALF_REG_ID 0x100
500 
501 /**
502  * Shader variant which contains the actual hw shader instructions,
503  * and necessary info for shader state setup.
504  */
505 struct ir3_shader_variant {
506    struct fd_bo *bo;
507 
508    /* variant id (for debug) */
509    uint32_t id;
510 
511    struct ir3_shader_key key;
512 
513    /* vertex shaders can have an extra version for hwbinning pass,
514     * which is pointed to by so->binning:
515     */
516    bool binning_pass;
517    //	union {
518    struct ir3_shader_variant *binning;
519    struct ir3_shader_variant *nonbinning;
520    //	};
521 
522    struct ir3 *ir; /* freed after assembling machine instructions */
523 
524    /* shader variants form a linked list: */
525    struct ir3_shader_variant *next;
526 
527    /* replicated here to avoid passing extra ptrs everywhere: */
528    gl_shader_stage type;
529    struct ir3_shader *shader;
530 
531    /* variant's copy of nir->constant_data (since we don't track the NIR in
532     * the variant, and shader->nir is before the opt pass).  Moves to v->bin
533     * after assembly.
534     */
535    void *constant_data;
536 
537    /*
538     * Below here is serialized when written to disk cache:
539     */
540 
541    /* The actual binary shader instructions, size given by info.sizedwords: */
542    uint32_t *bin;
543 
544    struct ir3_const_state *const_state;
545 
546    /*
547     * The following macros are used by the shader disk cache save/
548     * restore paths to serialize/deserialize the variant.  Any
549     * pointers that require special handling in store_variant()
550     * and retrieve_variant() should go above here.
551     */
552 #define VARIANT_CACHE_START  offsetof(struct ir3_shader_variant, info)
553 #define VARIANT_CACHE_PTR(v) (((char *)v) + VARIANT_CACHE_START)
554 #define VARIANT_CACHE_SIZE                                                     \
555    (sizeof(struct ir3_shader_variant) - VARIANT_CACHE_START)
556 
557    struct ir3_info info;
558 
559    uint32_t constant_data_size;
560 
561    /* Levels of nesting of flow control:
562     */
563    unsigned branchstack;
564 
565    unsigned loops;
566 
567    /* the instructions length is in units of instruction groups
568     * (4 instructions for a3xx, 16 instructions for a4xx.. each
569     * instruction is 2 dwords):
570     */
571    unsigned instrlen;
572 
573    /* the constants length is in units of vec4's, and is the sum of
574     * the uniforms and the built-in compiler constants
575     */
576    unsigned constlen;
577 
578    /* The private memory size in bytes */
579    unsigned pvtmem_size;
580    /* Whether we should use the new per-wave layout rather than per-fiber. */
581    bool pvtmem_per_wave;
582 
583    /* Size in bytes of required shared memory */
584    unsigned shared_size;
585 
586    /* About Linkage:
587     *   + Let the frag shader determine the position/compmask for the
588     *     varyings, since it is the place where we know if the varying
589     *     is actually used, and if so, which components are used.  So
590     *     what the hw calls "outloc" is taken from the "inloc" of the
591     *     frag shader.
592     *   + From the vert shader, we only need the output regid
593     */
594 
595    bool frag_face, color0_mrt;
596    uint8_t fragcoord_compmask;
597 
598    /* NOTE: for input/outputs, slot is:
599     *   gl_vert_attrib  - for VS inputs
600     *   gl_varying_slot - for VS output / FS input
601     *   gl_frag_result  - for FS output
602     */
603 
604    /* varyings/outputs: */
605    unsigned outputs_count;
606    struct {
607       uint8_t slot;
608       uint8_t regid;
609       uint8_t view;
610       bool half : 1;
611    } outputs[32 + 2]; /* +POSITION +PSIZE */
612    bool writes_pos, writes_smask, writes_psize, writes_stencilref;
613 
614    /* Size in dwords of all outputs for VS, size of entire patch for HS. */
615    uint32_t output_size;
616 
617    /* Expected size of incoming output_loc for HS, DS, and GS */
618    uint32_t input_size;
619 
620    /* Map from location to offset in per-primitive storage. In dwords for
621     * HS, where varyings are read in the next stage via ldg with a dword
622     * offset, and in bytes for all other stages.
623     */
624    unsigned output_loc[32 + 4]; /* +POSITION +PSIZE +CLIP_DIST0 +CLIP_DIST1 */
625 
626    /* attributes (VS) / varyings (FS):
627     * Note that sysval's should come *after* normal inputs.
628     */
629    unsigned inputs_count;
630    struct {
631       uint8_t slot;
632       uint8_t regid;
633       uint8_t compmask;
634       /* location of input (ie. offset passed to bary.f, etc).  This
635        * matches the SP_VS_VPC_DST_REG.OUTLOCn value (a3xx and a4xx
636        * have the OUTLOCn value offset by 8, presumably to account
637        * for gl_Position/gl_PointSize)
638        */
639       uint8_t inloc;
640       /* vertex shader specific: */
641       bool sysval : 1; /* slot is a gl_system_value */
642       /* fragment shader specific: */
643       bool bary       : 1; /* fetched varying (vs one loaded into reg) */
644       bool rasterflat : 1; /* special handling for emit->rasterflat */
645       bool half       : 1;
646       bool flat       : 1;
647    } inputs[32 + 2]; /* +POSITION +FACE */
648 
649    /* sum of input components (scalar).  For frag shaders, it only counts
650     * the varying inputs:
651     */
652    unsigned total_in;
653 
654    /* sum of sysval input components (scalar). */
655    unsigned sysval_in;
656 
657    /* For frag shaders, the total number of inputs (not scalar,
658     * ie. SP_VS_PARAM_REG.TOTALVSOUTVAR)
659     */
660    unsigned varying_in;
661 
662    /* Remapping table to map Image and SSBO to hw state: */
663    struct ir3_ibo_mapping image_mapping;
664 
665    /* number of samplers/textures (which are currently 1:1): */
666    int num_samp;
667 
668    /* is there an implicit sampler to read framebuffer (FS only).. if
669     * so the sampler-idx is 'num_samp - 1' (ie. it is appended after
670     * the last "real" texture)
671     */
672    bool fb_read;
673 
674    /* do we have one or more SSBO instructions: */
675    bool has_ssbo;
676 
677    /* Which bindless resources are used, for filling out sp_xs_config */
678    bool bindless_tex;
679    bool bindless_samp;
680    bool bindless_ibo;
681    bool bindless_ubo;
682 
683    /* do we need derivatives: */
684    bool need_pixlod;
685 
686    bool need_fine_derivatives;
687 
688    /* do we need VS driver params? */
689    bool need_driver_params;
690 
691    /* do we have image write, etc (which prevents early-z): */
692    bool no_earlyz;
693 
694    /* do we have kill, which also prevents early-z, but not necessarily
695     * early-lrz (as long as lrz-write is disabled, which must be handled
696     * outside of ir3.  Unlike other no_earlyz cases, kill doesn't have
697     * side effects that prevent early-lrz discard.
698     */
699    bool has_kill;
700 
701    bool per_samp;
702 
703    /* Are we using split or merged register file? */
704    bool mergedregs;
705 
706    uint8_t clip_mask, cull_mask;
707 
708    /* for astc srgb workaround, the number/base of additional
709     * alpha tex states we need, and index of original tex states
710     */
711    struct {
712       unsigned base, count;
713       unsigned orig_idx[16];
714    } astc_srgb;
715 
716    /* for tg4 workaround, the number/base of additional
717     * unswizzled tex states we need, and index of original tex states
718     */
719    struct {
720       unsigned base, count;
721       unsigned orig_idx[16];
722    } tg4;
723 
724    /* texture sampler pre-dispatches */
725    uint32_t num_sampler_prefetch;
726    struct ir3_sampler_prefetch sampler_prefetch[IR3_MAX_SAMPLER_PREFETCH];
727 
728    uint16_t local_size[3];
729    bool local_size_variable;
730 
731    /* Important for compute shader to determine max reg footprint */
732    bool has_barrier;
733 
734    struct ir3_disasm_info disasm_info;
735 };
736 
737 static inline const char *
ir3_shader_stage(struct ir3_shader_variant * v)738 ir3_shader_stage(struct ir3_shader_variant *v)
739 {
740    switch (v->type) {
741    case MESA_SHADER_VERTEX:
742       return v->binning_pass ? "BVERT" : "VERT";
743    case MESA_SHADER_TESS_CTRL:
744       return "TCS";
745    case MESA_SHADER_TESS_EVAL:
746       return "TES";
747    case MESA_SHADER_GEOMETRY:
748       return "GEOM";
749    case MESA_SHADER_FRAGMENT:
750       return "FRAG";
751    case MESA_SHADER_COMPUTE:
752    case MESA_SHADER_KERNEL:
753       return "CL";
754    default:
755       unreachable("invalid type");
756       return NULL;
757    }
758 }
759 
760 /* Currently we do not do binning for tess.  And for GS there is no
761  * cross-stage VS+GS optimization, so the full VS+GS is used in
762  * the binning pass.
763  */
764 static inline bool
ir3_has_binning_vs(const struct ir3_shader_key * key)765 ir3_has_binning_vs(const struct ir3_shader_key *key)
766 {
767    if (key->tessellation || key->has_gs)
768       return false;
769    return true;
770 }
771 
772 /**
773  * Represents a shader at the API level, before state-specific variants are
774  * generated.
775  */
776 struct ir3_shader {
777    gl_shader_stage type;
778 
779    /* shader id (for debug): */
780    uint32_t id;
781    uint32_t variant_count;
782 
783    /* Set by freedreno after shader_state_create, so we can emit debug info
784     * when recompiling a shader at draw time.
785     */
786    bool initial_variants_done;
787 
788    struct ir3_compiler *compiler;
789 
790    unsigned num_reserved_user_consts;
791 
792    /* What API-visible wavesizes are allowed. Even if only double wavesize is
793     * allowed, we may still use the smaller wavesize "under the hood" and the
794     * application simply sees the upper half as always disabled.
795     */
796    enum ir3_wavesize_option api_wavesize;
797 
798    /* What wavesizes we're allowed to actually use. If the API wavesize is
799     * single-only, then this must be single-only too.
800     */
801    enum ir3_wavesize_option real_wavesize;
802 
803    bool nir_finalized;
804    struct nir_shader *nir;
805    struct ir3_stream_output_info stream_output;
806 
807    /* per shader stage specific info: */
808    union {
809       /* for compute shaders: */
810       struct {
811          unsigned req_input_mem;    /* in dwords */
812          unsigned req_local_mem;
813       } cs;
814    };
815 
816    struct ir3_shader_variant *variants;
817    mtx_t variants_lock;
818 
819    cache_key cache_key; /* shader disk-cache key */
820 
821    /* Bitmask of bits of the shader key used by this shader.  Used to avoid
822     * recompiles for GL NOS that doesn't actually apply to the shader.
823     */
824    struct ir3_shader_key key_mask;
825 };
826 
827 /**
828  * In order to use the same cmdstream, in particular constlen setup and const
829  * emit, for both binning and draw pass (a6xx+), the binning pass re-uses it's
830  * corresponding draw pass shaders const_state.
831  */
832 static inline struct ir3_const_state *
ir3_const_state(const struct ir3_shader_variant * v)833 ir3_const_state(const struct ir3_shader_variant *v)
834 {
835    if (v->binning_pass)
836       return v->nonbinning->const_state;
837    return v->const_state;
838 }
839 
840 /* Given a variant, calculate the maximum constlen it can have.
841  */
842 
843 static inline unsigned
ir3_max_const(const struct ir3_shader_variant * v)844 ir3_max_const(const struct ir3_shader_variant *v)
845 {
846    const struct ir3_compiler *compiler = v->shader->compiler;
847 
848    if ((v->shader->type == MESA_SHADER_COMPUTE) ||
849        (v->shader->type == MESA_SHADER_KERNEL)) {
850       return compiler->max_const_compute;
851    } else if (v->key.safe_constlen) {
852       return compiler->max_const_safe;
853    } else if (v->shader->type == MESA_SHADER_FRAGMENT) {
854       return compiler->max_const_frag;
855    } else {
856       return compiler->max_const_geom;
857    }
858 }
859 
860 void *ir3_shader_assemble(struct ir3_shader_variant *v);
861 struct ir3_shader_variant *
862 ir3_shader_get_variant(struct ir3_shader *shader,
863                        const struct ir3_shader_key *key, bool binning_pass,
864                        bool keep_ir, bool *created);
865 
866 struct ir3_shader_options {
867    unsigned reserved_user_consts;
868    enum ir3_wavesize_option api_wavesize, real_wavesize;
869 };
870 
871 struct ir3_shader *
872 ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir,
873                     const struct ir3_shader_options *options,
874                     struct ir3_stream_output_info *stream_output);
875 uint32_t ir3_trim_constlen(struct ir3_shader_variant **variants,
876                            const struct ir3_compiler *compiler);
877 void ir3_shader_destroy(struct ir3_shader *shader);
878 void ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out);
879 uint64_t ir3_shader_outputs(const struct ir3_shader *so);
880 
881 int ir3_glsl_type_size(const struct glsl_type *type, bool bindless);
882 
883 /*
884  * Helper/util:
885  */
886 
887 /* clears shader-key flags which don't apply to the given shader.
888  */
889 static inline void
ir3_key_clear_unused(struct ir3_shader_key * key,struct ir3_shader * shader)890 ir3_key_clear_unused(struct ir3_shader_key *key, struct ir3_shader *shader)
891 {
892    uint32_t *key_bits = (uint32_t *)key;
893    uint32_t *key_mask = (uint32_t *)&shader->key_mask;
894    STATIC_ASSERT(sizeof(*key) % 4 == 0);
895    for (int i = 0; i < sizeof(*key) >> 2; i++)
896       key_bits[i] &= key_mask[i];
897 }
898 
899 static inline int
ir3_find_output(const struct ir3_shader_variant * so,gl_varying_slot slot)900 ir3_find_output(const struct ir3_shader_variant *so, gl_varying_slot slot)
901 {
902    int j;
903 
904    for (j = 0; j < so->outputs_count; j++)
905       if (so->outputs[j].slot == slot)
906          return j;
907 
908    /* it seems optional to have a OUT.BCOLOR[n] for each OUT.COLOR[n]
909     * in the vertex shader.. but the fragment shader doesn't know this
910     * so  it will always have both IN.COLOR[n] and IN.BCOLOR[n].  So
911     * at link time if there is no matching OUT.BCOLOR[n], we must map
912     * OUT.COLOR[n] to IN.BCOLOR[n].  And visa versa if there is only
913     * a OUT.BCOLOR[n] but no matching OUT.COLOR[n]
914     */
915    if (slot == VARYING_SLOT_BFC0) {
916       slot = VARYING_SLOT_COL0;
917    } else if (slot == VARYING_SLOT_BFC1) {
918       slot = VARYING_SLOT_COL1;
919    } else if (slot == VARYING_SLOT_COL0) {
920       slot = VARYING_SLOT_BFC0;
921    } else if (slot == VARYING_SLOT_COL1) {
922       slot = VARYING_SLOT_BFC1;
923    } else {
924       return -1;
925    }
926 
927    for (j = 0; j < so->outputs_count; j++)
928       if (so->outputs[j].slot == slot)
929          return j;
930 
931    return -1;
932 }
933 
934 static inline int
ir3_next_varying(const struct ir3_shader_variant * so,int i)935 ir3_next_varying(const struct ir3_shader_variant *so, int i)
936 {
937    while (++i < so->inputs_count)
938       if (so->inputs[i].compmask && so->inputs[i].bary)
939          break;
940    return i;
941 }
942 
943 struct ir3_shader_linkage {
944    /* Maximum location either consumed by the fragment shader or produced by
945     * the last geometry stage, i.e. the size required for each vertex in the
946     * VPC in DWORD's.
947     */
948    uint8_t max_loc;
949 
950    /* Number of entries in var. */
951    uint8_t cnt;
952 
953    /* Bitset of locations used, including ones which are only used by the FS.
954     */
955    uint32_t varmask[4];
956 
957    /* Map from VS output to location. */
958    struct {
959       uint8_t slot;
960       uint8_t regid;
961       uint8_t compmask;
962       uint8_t loc;
963    } var[32];
964 
965    /* location for fixed-function gl_PrimitiveID passthrough */
966    uint8_t primid_loc;
967 
968    /* location for fixed-function gl_ViewIndex passthrough */
969    uint8_t viewid_loc;
970 
971    /* location for combined clip/cull distance arrays */
972    uint8_t clip0_loc, clip1_loc;
973 };
974 
975 static inline void
ir3_link_add(struct ir3_shader_linkage * l,uint8_t slot,uint8_t regid_,uint8_t compmask,uint8_t loc)976 ir3_link_add(struct ir3_shader_linkage *l, uint8_t slot, uint8_t regid_,
977              uint8_t compmask, uint8_t loc)
978 {
979    for (int j = 0; j < util_last_bit(compmask); j++) {
980       uint8_t comploc = loc + j;
981       l->varmask[comploc / 32] |= 1 << (comploc % 32);
982    }
983 
984    l->max_loc = MAX2(l->max_loc, loc + util_last_bit(compmask));
985 
986    if (regid_ != regid(63, 0)) {
987       int i = l->cnt++;
988       debug_assert(i < ARRAY_SIZE(l->var));
989 
990       l->var[i].slot = slot;
991       l->var[i].regid = regid_;
992       l->var[i].compmask = compmask;
993       l->var[i].loc = loc;
994    }
995 }
996 
997 static inline void
ir3_link_shaders(struct ir3_shader_linkage * l,const struct ir3_shader_variant * vs,const struct ir3_shader_variant * fs,bool pack_vs_out)998 ir3_link_shaders(struct ir3_shader_linkage *l,
999                  const struct ir3_shader_variant *vs,
1000                  const struct ir3_shader_variant *fs, bool pack_vs_out)
1001 {
1002    /* On older platforms, varmask isn't programmed at all, and it appears
1003     * that the hardware generates a mask of used VPC locations using the VS
1004     * output map, and hangs if a FS bary instruction references a location
1005     * not in the list. This means that we need to have a dummy entry in the
1006     * VS out map for things like gl_PointCoord which aren't written by the
1007     * VS. Furthermore we can't use r63.x, so just pick a random register to
1008     * use if there is no VS output.
1009     */
1010    const unsigned default_regid = pack_vs_out ? regid(63, 0) : regid(0, 0);
1011    int j = -1, k;
1012 
1013    l->primid_loc = 0xff;
1014    l->viewid_loc = 0xff;
1015    l->clip0_loc = 0xff;
1016    l->clip1_loc = 0xff;
1017 
1018    while (l->cnt < ARRAY_SIZE(l->var)) {
1019       j = ir3_next_varying(fs, j);
1020 
1021       if (j >= fs->inputs_count)
1022          break;
1023 
1024       if (fs->inputs[j].inloc >= fs->total_in)
1025          continue;
1026 
1027       k = ir3_find_output(vs, fs->inputs[j].slot);
1028 
1029       if (k < 0 && fs->inputs[j].slot == VARYING_SLOT_PRIMITIVE_ID) {
1030          l->primid_loc = fs->inputs[j].inloc;
1031       }
1032 
1033       if (fs->inputs[j].slot == VARYING_SLOT_VIEW_INDEX) {
1034          assert(k < 0);
1035          l->viewid_loc = fs->inputs[j].inloc;
1036       }
1037 
1038       if (fs->inputs[j].slot == VARYING_SLOT_CLIP_DIST0)
1039          l->clip0_loc = fs->inputs[j].inloc;
1040 
1041       if (fs->inputs[j].slot == VARYING_SLOT_CLIP_DIST1)
1042          l->clip1_loc = fs->inputs[j].inloc;
1043 
1044       ir3_link_add(l, fs->inputs[j].slot,
1045                    k >= 0 ? vs->outputs[k].regid : default_regid,
1046                    fs->inputs[j].compmask, fs->inputs[j].inloc);
1047    }
1048 }
1049 
1050 static inline uint32_t
ir3_find_output_regid(const struct ir3_shader_variant * so,unsigned slot)1051 ir3_find_output_regid(const struct ir3_shader_variant *so, unsigned slot)
1052 {
1053    int j;
1054    for (j = 0; j < so->outputs_count; j++)
1055       if (so->outputs[j].slot == slot) {
1056          uint32_t regid = so->outputs[j].regid;
1057          if (so->outputs[j].half)
1058             regid |= HALF_REG_ID;
1059          return regid;
1060       }
1061    return regid(63, 0);
1062 }
1063 
1064 void ir3_link_stream_out(struct ir3_shader_linkage *l,
1065                          const struct ir3_shader_variant *v);
1066 
1067 #define VARYING_SLOT_GS_HEADER_IR3       (VARYING_SLOT_MAX + 0)
1068 #define VARYING_SLOT_GS_VERTEX_FLAGS_IR3 (VARYING_SLOT_MAX + 1)
1069 #define VARYING_SLOT_TCS_HEADER_IR3      (VARYING_SLOT_MAX + 2)
1070 #define VARYING_SLOT_REL_PATCH_ID_IR3    (VARYING_SLOT_MAX + 3)
1071 
1072 static inline uint32_t
ir3_find_sysval_regid(const struct ir3_shader_variant * so,unsigned slot)1073 ir3_find_sysval_regid(const struct ir3_shader_variant *so, unsigned slot)
1074 {
1075    int j;
1076    for (j = 0; j < so->inputs_count; j++)
1077       if (so->inputs[j].sysval && (so->inputs[j].slot == slot))
1078          return so->inputs[j].regid;
1079    return regid(63, 0);
1080 }
1081 
1082 /* calculate register footprint in terms of half-regs (ie. one full
1083  * reg counts as two half-regs).
1084  */
1085 static inline uint32_t
ir3_shader_halfregs(const struct ir3_shader_variant * v)1086 ir3_shader_halfregs(const struct ir3_shader_variant *v)
1087 {
1088    return (2 * (v->info.max_reg + 1)) + (v->info.max_half_reg + 1);
1089 }
1090 
1091 static inline uint32_t
ir3_shader_nibo(const struct ir3_shader_variant * v)1092 ir3_shader_nibo(const struct ir3_shader_variant *v)
1093 {
1094    /* The dummy variant used in binning mode won't have an actual shader. */
1095    if (!v->shader)
1096       return 0;
1097 
1098    return v->shader->nir->info.num_ssbos + v->shader->nir->info.num_images;
1099 }
1100 
1101 static inline uint32_t
ir3_shader_branchstack_hw(const struct ir3_shader_variant * v)1102 ir3_shader_branchstack_hw(const struct ir3_shader_variant *v)
1103 {
1104    /* Dummy shader */
1105    if (!v->shader)
1106       return 0;
1107 
1108    if (v->shader->compiler->gen < 5)
1109       return v->branchstack;
1110 
1111    if (v->branchstack > 0) {
1112       uint32_t branchstack = v->branchstack / 2 + 1;
1113       return MIN2(branchstack, v->shader->compiler->branchstack_size / 2);
1114    } else {
1115       return 0;
1116    }
1117 }
1118 
1119 #endif /* IR3_SHADER_H_ */
1120