1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include "si_pipe.h"
26 #include "tgsi/tgsi_text.h"
27 #include "tgsi/tgsi_ureg.h"
28 
si_get_blitter_vs(struct si_context * sctx,enum blitter_attrib_type type,unsigned num_layers)29 void *si_get_blitter_vs(struct si_context *sctx, enum blitter_attrib_type type, unsigned num_layers)
30 {
31    unsigned vs_blit_property;
32    void **vs;
33 
34    switch (type) {
35    case UTIL_BLITTER_ATTRIB_NONE:
36       vs = num_layers > 1 ? &sctx->vs_blit_pos_layered : &sctx->vs_blit_pos;
37       vs_blit_property = SI_VS_BLIT_SGPRS_POS;
38       break;
39    case UTIL_BLITTER_ATTRIB_COLOR:
40       vs = num_layers > 1 ? &sctx->vs_blit_color_layered : &sctx->vs_blit_color;
41       vs_blit_property = SI_VS_BLIT_SGPRS_POS_COLOR;
42       break;
43    case UTIL_BLITTER_ATTRIB_TEXCOORD_XY:
44    case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW:
45       assert(num_layers == 1);
46       vs = &sctx->vs_blit_texcoord;
47       vs_blit_property = SI_VS_BLIT_SGPRS_POS_TEXCOORD;
48       break;
49    default:
50       assert(0);
51       return NULL;
52    }
53    if (*vs)
54       return *vs;
55 
56    struct ureg_program *ureg = ureg_create(PIPE_SHADER_VERTEX);
57    if (!ureg)
58       return NULL;
59 
60    /* Tell the shader to load VS inputs from SGPRs: */
61    ureg_property(ureg, TGSI_PROPERTY_VS_BLIT_SGPRS_AMD, vs_blit_property);
62    ureg_property(ureg, TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION, true);
63 
64    /* This is just a pass-through shader with 1-3 MOV instructions. */
65    ureg_MOV(ureg, ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0), ureg_DECL_vs_input(ureg, 0));
66 
67    if (type != UTIL_BLITTER_ATTRIB_NONE) {
68       ureg_MOV(ureg, ureg_DECL_output(ureg, TGSI_SEMANTIC_GENERIC, 0), ureg_DECL_vs_input(ureg, 1));
69    }
70 
71    if (num_layers > 1) {
72       struct ureg_src instance_id = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_INSTANCEID, 0);
73       struct ureg_dst layer = ureg_DECL_output(ureg, TGSI_SEMANTIC_LAYER, 0);
74 
75       ureg_MOV(ureg, ureg_writemask(layer, TGSI_WRITEMASK_X),
76                ureg_scalar(instance_id, TGSI_SWIZZLE_X));
77    }
78    ureg_END(ureg);
79 
80    *vs = ureg_create_shader_and_destroy(ureg, &sctx->b);
81    return *vs;
82 }
83 
84 /**
85  * This is used when TCS is NULL in the VS->TCS->TES chain. In this case,
86  * VS passes its outputs to TES directly, so the fixed-function shader only
87  * has to write TESSOUTER and TESSINNER.
88  */
si_create_fixed_func_tcs(struct si_context * sctx)89 void *si_create_fixed_func_tcs(struct si_context *sctx)
90 {
91    struct ureg_src outer, inner;
92    struct ureg_dst tessouter, tessinner;
93    struct ureg_program *ureg = ureg_create(PIPE_SHADER_TESS_CTRL);
94 
95    if (!ureg)
96       return NULL;
97 
98    outer = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_TESS_DEFAULT_OUTER_LEVEL, 0);
99    inner = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_TESS_DEFAULT_INNER_LEVEL, 0);
100 
101    tessouter = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSOUTER, 0);
102    tessinner = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSINNER, 0);
103 
104    ureg_MOV(ureg, tessouter, outer);
105    ureg_MOV(ureg, tessinner, inner);
106    ureg_END(ureg);
107 
108    return ureg_create_shader_and_destroy(ureg, &sctx->b);
109 }
110 
111 /* Create a compute shader implementing clear_buffer or copy_buffer. */
si_create_dma_compute_shader(struct pipe_context * ctx,unsigned num_dwords_per_thread,bool dst_stream_cache_policy,bool is_copy)112 void *si_create_dma_compute_shader(struct pipe_context *ctx, unsigned num_dwords_per_thread,
113                                    bool dst_stream_cache_policy, bool is_copy)
114 {
115    struct si_screen *sscreen = (struct si_screen *)ctx->screen;
116    assert(util_is_power_of_two_nonzero(num_dwords_per_thread));
117 
118    unsigned store_qualifier = TGSI_MEMORY_COHERENT | TGSI_MEMORY_RESTRICT;
119    if (dst_stream_cache_policy)
120       store_qualifier |= TGSI_MEMORY_STREAM_CACHE_POLICY;
121 
122    /* Don't cache loads, because there is no reuse. */
123    unsigned load_qualifier = store_qualifier | TGSI_MEMORY_STREAM_CACHE_POLICY;
124 
125    unsigned num_mem_ops = MAX2(1, num_dwords_per_thread / 4);
126    unsigned *inst_dwords = alloca(num_mem_ops * sizeof(unsigned));
127 
128    for (unsigned i = 0; i < num_mem_ops; i++) {
129       if (i * 4 < num_dwords_per_thread)
130          inst_dwords[i] = MIN2(4, num_dwords_per_thread - i * 4);
131    }
132 
133    struct ureg_program *ureg = ureg_create(PIPE_SHADER_COMPUTE);
134    if (!ureg)
135       return NULL;
136 
137    unsigned default_wave_size = si_determine_wave_size(sscreen, NULL);
138 
139    ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH, default_wave_size);
140    ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT, 1);
141    ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH, 1);
142 
143    struct ureg_src value;
144    if (!is_copy) {
145       ureg_property(ureg, TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD, inst_dwords[0]);
146       value = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_CS_USER_DATA_AMD, 0);
147    }
148 
149    struct ureg_src tid = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_THREAD_ID, 0);
150    struct ureg_src blk = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_BLOCK_ID, 0);
151    struct ureg_dst store_addr = ureg_writemask(ureg_DECL_temporary(ureg), TGSI_WRITEMASK_X);
152    struct ureg_dst load_addr = ureg_writemask(ureg_DECL_temporary(ureg), TGSI_WRITEMASK_X);
153    struct ureg_dst dstbuf = ureg_dst(ureg_DECL_buffer(ureg, 0, false));
154    struct ureg_src srcbuf;
155    struct ureg_src *values = NULL;
156 
157    if (is_copy) {
158       srcbuf = ureg_DECL_buffer(ureg, 1, false);
159       values = malloc(num_mem_ops * sizeof(struct ureg_src));
160    }
161 
162    /* If there are multiple stores, the first store writes into 0*wavesize+tid,
163     * the 2nd store writes into 1*wavesize+tid, the 3rd store writes into 2*wavesize+tid, etc.
164     */
165    ureg_UMAD(ureg, store_addr, blk, ureg_imm1u(ureg, default_wave_size * num_mem_ops),
166              tid);
167    /* Convert from a "store size unit" into bytes. */
168    ureg_UMUL(ureg, store_addr, ureg_src(store_addr), ureg_imm1u(ureg, 4 * inst_dwords[0]));
169    ureg_MOV(ureg, load_addr, ureg_src(store_addr));
170 
171    /* Distance between a load and a store for latency hiding. */
172    unsigned load_store_distance = is_copy ? 8 : 0;
173 
174    for (unsigned i = 0; i < num_mem_ops + load_store_distance; i++) {
175       int d = i - load_store_distance;
176 
177       if (is_copy && i < num_mem_ops) {
178          if (i) {
179             ureg_UADD(ureg, load_addr, ureg_src(load_addr),
180                       ureg_imm1u(ureg, 4 * inst_dwords[i] * default_wave_size));
181          }
182 
183          values[i] = ureg_src(ureg_DECL_temporary(ureg));
184          struct ureg_dst dst =
185             ureg_writemask(ureg_dst(values[i]), u_bit_consecutive(0, inst_dwords[i]));
186          struct ureg_src srcs[] = {srcbuf, ureg_src(load_addr)};
187          ureg_memory_insn(ureg, TGSI_OPCODE_LOAD, &dst, 1, srcs, 2, load_qualifier,
188                           TGSI_TEXTURE_BUFFER, 0);
189       }
190 
191       if (d >= 0) {
192          if (d) {
193             ureg_UADD(ureg, store_addr, ureg_src(store_addr),
194                       ureg_imm1u(ureg, 4 * inst_dwords[d] * default_wave_size));
195          }
196 
197          struct ureg_dst dst = ureg_writemask(dstbuf, u_bit_consecutive(0, inst_dwords[d]));
198          struct ureg_src srcs[] = {ureg_src(store_addr), is_copy ? values[d] : value};
199          ureg_memory_insn(ureg, TGSI_OPCODE_STORE, &dst, 1, srcs, 2, store_qualifier,
200                           TGSI_TEXTURE_BUFFER, 0);
201       }
202    }
203    ureg_END(ureg);
204 
205    struct pipe_compute_state state = {};
206    state.ir_type = PIPE_SHADER_IR_TGSI;
207    state.prog = ureg_get_tokens(ureg, NULL);
208 
209    void *cs = ctx->create_compute_state(ctx, &state);
210    ureg_destroy(ureg);
211    ureg_free_tokens(state.prog);
212 
213    free(values);
214    return cs;
215 }
216 
217 /* Create the compute shader that is used to collect the results.
218  *
219  * One compute grid with a single thread is launched for every query result
220  * buffer. The thread (optionally) reads a previous summary buffer, then
221  * accumulates data from the query result buffer, and writes the result either
222  * to a summary buffer to be consumed by the next grid invocation or to the
223  * user-supplied buffer.
224  *
225  * Data layout:
226  *
227  * CONST
228  *  0.x = end_offset
229  *  0.y = result_stride
230  *  0.z = result_count
231  *  0.w = bit field:
232  *          1: read previously accumulated values
233  *          2: write accumulated values for chaining
234  *          4: write result available
235  *          8: convert result to boolean (0/1)
236  *         16: only read one dword and use that as result
237  *         32: apply timestamp conversion
238  *         64: store full 64 bits result
239  *        128: store signed 32 bits result
240  *        256: SO_OVERFLOW mode: take the difference of two successive half-pairs
241  *  1.x = fence_offset
242  *  1.y = pair_stride
243  *  1.z = pair_count
244  *
245  * BUFFER[0] = query result buffer
246  * BUFFER[1] = previous summary buffer
247  * BUFFER[2] = next summary buffer or user-supplied buffer
248  */
si_create_query_result_cs(struct si_context * sctx)249 void *si_create_query_result_cs(struct si_context *sctx)
250 {
251    /* TEMP[0].xy = accumulated result so far
252     * TEMP[0].z = result not available
253     *
254     * TEMP[1].x = current result index
255     * TEMP[1].y = current pair index
256     */
257    static const char text_tmpl[] =
258       "COMP\n"
259       "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
260       "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
261       "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
262       "DCL BUFFER[0]\n"
263       "DCL BUFFER[1]\n"
264       "DCL BUFFER[2]\n"
265       "DCL CONST[0][0..1]\n"
266       "DCL TEMP[0..5]\n"
267       "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
268       "IMM[1] UINT32 {1, 2, 4, 8}\n"
269       "IMM[2] UINT32 {16, 32, 64, 128}\n"
270       "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
271       "IMM[4] UINT32 {256, 0, 0, 0}\n"
272 
273       "AND TEMP[5], CONST[0][0].wwww, IMM[2].xxxx\n"
274       "UIF TEMP[5]\n"
275       /* Check result availability. */
276       "LOAD TEMP[1].x, BUFFER[0], CONST[0][1].xxxx\n"
277       "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
278       "MOV TEMP[1], TEMP[0].zzzz\n"
279       "NOT TEMP[0].z, TEMP[0].zzzz\n"
280 
281       /* Load result if available. */
282       "UIF TEMP[1]\n"
283       "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
284       "ENDIF\n"
285       "ELSE\n"
286       /* Load previously accumulated result if requested. */
287       "MOV TEMP[0], IMM[0].xxxx\n"
288       "AND TEMP[4], CONST[0][0].wwww, IMM[1].xxxx\n"
289       "UIF TEMP[4]\n"
290       "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
291       "ENDIF\n"
292 
293       "MOV TEMP[1].x, IMM[0].xxxx\n"
294       "BGNLOOP\n"
295       /* Break if accumulated result so far is not available. */
296       "UIF TEMP[0].zzzz\n"
297       "BRK\n"
298       "ENDIF\n"
299 
300       /* Break if result_index >= result_count. */
301       "USGE TEMP[5], TEMP[1].xxxx, CONST[0][0].zzzz\n"
302       "UIF TEMP[5]\n"
303       "BRK\n"
304       "ENDIF\n"
305 
306       /* Load fence and check result availability */
307       "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy, CONST[0][1].xxxx\n"
308       "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
309       "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
310       "NOT TEMP[0].z, TEMP[0].zzzz\n"
311       "UIF TEMP[0].zzzz\n"
312       "BRK\n"
313       "ENDIF\n"
314 
315       "MOV TEMP[1].y, IMM[0].xxxx\n"
316       "BGNLOOP\n"
317       /* Load start and end. */
318       "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy\n"
319       "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[0][1].yyyy, TEMP[5].xxxx\n"
320       "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
321 
322       "UADD TEMP[5].y, TEMP[5].xxxx, CONST[0][0].xxxx\n"
323       "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
324 
325       "U64ADD TEMP[4].xy, TEMP[3], -TEMP[2]\n"
326 
327       "AND TEMP[5].z, CONST[0][0].wwww, IMM[4].xxxx\n"
328       "UIF TEMP[5].zzzz\n"
329       /* Load second start/end half-pair and
330        * take the difference
331        */
332       "UADD TEMP[5].xy, TEMP[5], IMM[1].wwww\n"
333       "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
334       "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n"
335 
336       "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
337       "U64ADD TEMP[4].xy, TEMP[4], -TEMP[3]\n"
338       "ENDIF\n"
339 
340       "U64ADD TEMP[0].xy, TEMP[0], TEMP[4]\n"
341 
342       /* Increment pair index */
343       "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
344       "USGE TEMP[5], TEMP[1].yyyy, CONST[0][1].zzzz\n"
345       "UIF TEMP[5]\n"
346       "BRK\n"
347       "ENDIF\n"
348       "ENDLOOP\n"
349 
350       /* Increment result index */
351       "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
352       "ENDLOOP\n"
353       "ENDIF\n"
354 
355       "AND TEMP[4], CONST[0][0].wwww, IMM[1].yyyy\n"
356       "UIF TEMP[4]\n"
357       /* Store accumulated data for chaining. */
358       "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
359       "ELSE\n"
360       "AND TEMP[4], CONST[0][0].wwww, IMM[1].zzzz\n"
361       "UIF TEMP[4]\n"
362       /* Store result availability. */
363       "NOT TEMP[0].z, TEMP[0]\n"
364       "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
365       "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
366 
367       "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
368       "UIF TEMP[4]\n"
369       "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
370       "ENDIF\n"
371       "ELSE\n"
372       /* Store result if it is available. */
373       "NOT TEMP[4], TEMP[0].zzzz\n"
374       "UIF TEMP[4]\n"
375       /* Apply timestamp conversion */
376       "AND TEMP[4], CONST[0][0].wwww, IMM[2].yyyy\n"
377       "UIF TEMP[4]\n"
378       "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
379       "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
380       "ENDIF\n"
381 
382       /* Convert to boolean */
383       "AND TEMP[4], CONST[0][0].wwww, IMM[1].wwww\n"
384       "UIF TEMP[4]\n"
385       "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[4].zwzw\n"
386       "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
387       "MOV TEMP[0].y, IMM[0].xxxx\n"
388       "ENDIF\n"
389 
390       "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n"
391       "UIF TEMP[4]\n"
392       "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
393       "ELSE\n"
394       /* Clamping */
395       "UIF TEMP[0].yyyy\n"
396       "MOV TEMP[0].x, IMM[0].wwww\n"
397       "ENDIF\n"
398 
399       "AND TEMP[4], CONST[0][0].wwww, IMM[2].wwww\n"
400       "UIF TEMP[4]\n"
401       "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
402       "ENDIF\n"
403 
404       "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
405       "ENDIF\n"
406       "ENDIF\n"
407       "ENDIF\n"
408       "ENDIF\n"
409 
410       "END\n";
411 
412    char text[sizeof(text_tmpl) + 32];
413    struct tgsi_token tokens[1024];
414    struct pipe_compute_state state = {};
415 
416    /* Hard code the frequency into the shader so that the backend can
417     * use the full range of optimizations for divide-by-constant.
418     */
419    snprintf(text, sizeof(text), text_tmpl, sctx->screen->info.clock_crystal_freq);
420 
421    if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
422       assert(false);
423       return NULL;
424    }
425 
426    state.ir_type = PIPE_SHADER_IR_TGSI;
427    state.prog = tokens;
428 
429    return sctx->b.create_compute_state(&sctx->b, &state);
430 }
431 
432 /* Create a compute shader implementing DCC decompression via a blit.
433  * This is a trivial copy_image shader except that it has a variable block
434  * size and a barrier.
435  */
si_create_dcc_decompress_cs(struct pipe_context * ctx)436 void *si_create_dcc_decompress_cs(struct pipe_context *ctx)
437 {
438    static const char text[] =
439       "COMP\n"
440       "DCL SV[0], THREAD_ID\n"
441       "DCL SV[1], BLOCK_ID\n"
442       "DCL SV[2], BLOCK_SIZE\n"
443       "DCL IMAGE[0], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
444       "DCL IMAGE[1], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
445       "DCL TEMP[0..1]\n"
446 
447       "UMAD TEMP[0].xyz, SV[1].xyzz, SV[2].xyzz, SV[0].xyzz\n"
448       "LOAD TEMP[1], IMAGE[0], TEMP[0].xyzz, 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
449       /* Wait for the whole threadgroup (= DCC block) to load texels before
450        * overwriting them, because overwriting any pixel within a DCC block
451        * can break compression for the whole block.
452        */
453       "BARRIER\n"
454       "STORE IMAGE[1], TEMP[0].xyzz, TEMP[1], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
455       "END\n";
456 
457    struct tgsi_token tokens[1024];
458    struct pipe_compute_state state = {0};
459 
460    if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
461       assert(false);
462       return NULL;
463    }
464 
465    state.ir_type = PIPE_SHADER_IR_TGSI;
466    state.prog = tokens;
467 
468    return ctx->create_compute_state(ctx, &state);
469 }
470 
si_clear_render_target_shader(struct pipe_context * ctx)471 void *si_clear_render_target_shader(struct pipe_context *ctx)
472 {
473    static const char text[] =
474       "COMP\n"
475       "PROPERTY CS_FIXED_BLOCK_WIDTH 8\n"
476       "PROPERTY CS_FIXED_BLOCK_HEIGHT 8\n"
477       "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
478       "DCL SV[0], THREAD_ID\n"
479       "DCL SV[1], BLOCK_ID\n"
480       "DCL IMAGE[0], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
481       "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
482       "DCL TEMP[0..3], LOCAL\n"
483       "IMM[0] UINT32 {8, 1, 0, 0}\n"
484       "MOV TEMP[0].xyz, CONST[0][0].xyzw\n"
485       "UMAD TEMP[1].xyz, SV[1].xyzz, IMM[0].xxyy, SV[0].xyzz\n"
486       "UADD TEMP[2].xyz, TEMP[1].xyzx, TEMP[0].xyzx\n"
487       "MOV TEMP[3].xyzw, CONST[0][1].xyzw\n"
488       "STORE IMAGE[0], TEMP[2].xyzz, TEMP[3], 2D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
489       "END\n";
490 
491    struct tgsi_token tokens[1024];
492    struct pipe_compute_state state = {0};
493 
494    if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
495       assert(false);
496       return NULL;
497    }
498 
499    state.ir_type = PIPE_SHADER_IR_TGSI;
500    state.prog = tokens;
501 
502    return ctx->create_compute_state(ctx, &state);
503 }
504 
505 /* TODO: Didn't really test 1D_ARRAY */
si_clear_render_target_shader_1d_array(struct pipe_context * ctx)506 void *si_clear_render_target_shader_1d_array(struct pipe_context *ctx)
507 {
508    static const char text[] =
509       "COMP\n"
510       "PROPERTY CS_FIXED_BLOCK_WIDTH 64\n"
511       "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
512       "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
513       "DCL SV[0], THREAD_ID\n"
514       "DCL SV[1], BLOCK_ID\n"
515       "DCL IMAGE[0], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT, WR\n"
516       "DCL CONST[0][0..1]\n" // 0:xyzw 1:xyzw
517       "DCL TEMP[0..3], LOCAL\n"
518       "IMM[0] UINT32 {64, 1, 0, 0}\n"
519       "MOV TEMP[0].xy, CONST[0][0].xzzw\n"
520       "UMAD TEMP[1].xy, SV[1].xyzz, IMM[0].xyyy, SV[0].xyzz\n"
521       "UADD TEMP[2].xy, TEMP[1].xyzx, TEMP[0].xyzx\n"
522       "MOV TEMP[3].xyzw, CONST[0][1].xyzw\n"
523       "STORE IMAGE[0], TEMP[2].xyzz, TEMP[3], 1D_ARRAY, PIPE_FORMAT_R32G32B32A32_FLOAT\n"
524       "END\n";
525 
526    struct tgsi_token tokens[1024];
527    struct pipe_compute_state state = {0};
528 
529    if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
530       assert(false);
531       return NULL;
532    }
533 
534    state.ir_type = PIPE_SHADER_IR_TGSI;
535    state.prog = tokens;
536 
537    return ctx->create_compute_state(ctx, &state);
538 }
539 
si_clear_12bytes_buffer_shader(struct pipe_context * ctx)540 void *si_clear_12bytes_buffer_shader(struct pipe_context *ctx)
541 {
542    static const char text[] = "COMP\n"
543                               "PROPERTY CS_FIXED_BLOCK_WIDTH 64\n"
544                               "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
545                               "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
546                               "PROPERTY CS_USER_DATA_COMPONENTS_AMD 3\n"
547                               "DCL SV[0], THREAD_ID\n"
548                               "DCL SV[1], BLOCK_ID\n"
549                               "DCL SV[2], CS_USER_DATA_AMD\n"
550                               "DCL BUFFER[0]\n"
551                               "DCL TEMP[0..0]\n"
552                               "IMM[0] UINT32 {64, 1, 12, 0}\n"
553                               "UMAD TEMP[0].x, SV[1].xyzz, IMM[0].xyyy, SV[0].xyzz\n"
554                               "UMUL TEMP[0].x, TEMP[0].xyzz, IMM[0].zzzz\n" // 12 bytes
555                               "STORE BUFFER[0].xyz, TEMP[0].xxxx, SV[2].xyzz%s\n"
556                               "END\n";
557    char final_text[2048];
558    struct tgsi_token tokens[1024];
559    struct pipe_compute_state state = {0};
560 
561    snprintf(final_text, sizeof(final_text), text,
562             SI_COMPUTE_DST_CACHE_POLICY != L2_LRU ? ", STREAM_CACHE_POLICY" : "");
563 
564    if (!tgsi_text_translate(final_text, tokens, ARRAY_SIZE(tokens))) {
565       assert(false);
566       return NULL;
567    }
568 
569    state.ir_type = PIPE_SHADER_IR_TGSI;
570    state.prog = tokens;
571 
572    return ctx->create_compute_state(ctx, &state);
573 }
574 
575 /* Load samples from the image, and copy them to the same image. This looks like
576  * a no-op, but it's not. Loads use FMASK, while stores don't, so samples are
577  * reordered to match expanded FMASK.
578  *
579  * After the shader finishes, FMASK should be cleared to identity.
580  */
si_create_fmask_expand_cs(struct pipe_context * ctx,unsigned num_samples,bool is_array)581 void *si_create_fmask_expand_cs(struct pipe_context *ctx, unsigned num_samples, bool is_array)
582 {
583    enum tgsi_texture_type target = is_array ? TGSI_TEXTURE_2D_ARRAY_MSAA : TGSI_TEXTURE_2D_MSAA;
584    struct ureg_program *ureg = ureg_create(PIPE_SHADER_COMPUTE);
585    if (!ureg)
586       return NULL;
587 
588    ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH, 8);
589    ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT, 8);
590    ureg_property(ureg, TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH, 1);
591 
592    /* Compute the image coordinates. */
593    struct ureg_src image = ureg_DECL_image(ureg, 0, target, 0, true, false);
594    struct ureg_src tid = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_THREAD_ID, 0);
595    struct ureg_src blk = ureg_DECL_system_value(ureg, TGSI_SEMANTIC_BLOCK_ID, 0);
596    struct ureg_dst coord = ureg_writemask(ureg_DECL_temporary(ureg), TGSI_WRITEMASK_XYZW);
597    ureg_UMAD(ureg, ureg_writemask(coord, TGSI_WRITEMASK_XY), ureg_swizzle(blk, 0, 1, 1, 1),
598              ureg_imm2u(ureg, 8, 8), ureg_swizzle(tid, 0, 1, 1, 1));
599    if (is_array) {
600       ureg_MOV(ureg, ureg_writemask(coord, TGSI_WRITEMASK_Z), ureg_scalar(blk, TGSI_SWIZZLE_Z));
601    }
602 
603    /* Load samples, resolving FMASK. */
604    struct ureg_dst sample[8];
605    assert(num_samples <= ARRAY_SIZE(sample));
606 
607    for (unsigned i = 0; i < num_samples; i++) {
608       sample[i] = ureg_DECL_temporary(ureg);
609 
610       ureg_MOV(ureg, ureg_writemask(coord, TGSI_WRITEMASK_W), ureg_imm1u(ureg, i));
611 
612       struct ureg_src srcs[] = {image, ureg_src(coord)};
613       ureg_memory_insn(ureg, TGSI_OPCODE_LOAD, &sample[i], 1, srcs, 2, TGSI_MEMORY_RESTRICT, target,
614                        0);
615    }
616 
617    /* Store samples, ignoring FMASK. */
618    for (unsigned i = 0; i < num_samples; i++) {
619       ureg_MOV(ureg, ureg_writemask(coord, TGSI_WRITEMASK_W), ureg_imm1u(ureg, i));
620 
621       struct ureg_dst dst_image = ureg_dst(image);
622       struct ureg_src srcs[] = {ureg_src(coord), ureg_src(sample[i])};
623       ureg_memory_insn(ureg, TGSI_OPCODE_STORE, &dst_image, 1, srcs, 2, TGSI_MEMORY_RESTRICT,
624                        target, 0);
625    }
626    ureg_END(ureg);
627 
628    struct pipe_compute_state state = {};
629    state.ir_type = PIPE_SHADER_IR_TGSI;
630    state.prog = ureg_get_tokens(ureg, NULL);
631 
632    void *cs = ctx->create_compute_state(ctx, &state);
633    ureg_destroy(ureg);
634    return cs;
635 }
636 
637 /* Create the compute shader that is used to collect the results of gfx10+
638  * shader queries.
639  *
640  * One compute grid with a single thread is launched for every query result
641  * buffer. The thread (optionally) reads a previous summary buffer, then
642  * accumulates data from the query result buffer, and writes the result either
643  * to a summary buffer to be consumed by the next grid invocation or to the
644  * user-supplied buffer.
645  *
646  * Data layout:
647  *
648  * BUFFER[0] = query result buffer (layout is defined by gfx10_sh_query_buffer_mem)
649  * BUFFER[1] = previous summary buffer
650  * BUFFER[2] = next summary buffer or user-supplied buffer
651  *
652  * CONST
653  *  0.x = config; the low 3 bits indicate the mode:
654  *          0: sum up counts
655  *          1: determine result availability and write it as a boolean
656  *          2: SO_OVERFLOW
657  *          3: SO_ANY_OVERFLOW
658  *        the remaining bits form a bitfield:
659  *          8: write result as a 64-bit value
660  *  0.y = offset in bytes to counts or stream for SO_OVERFLOW mode
661  *  0.z = chain bit field:
662  *          1: have previous summary buffer
663  *          2: write next summary buffer
664  *  0.w = result_count
665  */
gfx10_create_sh_query_result_cs(struct si_context * sctx)666 void *gfx10_create_sh_query_result_cs(struct si_context *sctx)
667 {
668    /* TEMP[0].x = accumulated result so far
669     * TEMP[0].y = result missing
670     * TEMP[0].z = whether we're in overflow mode
671     */
672    static const char text_tmpl[] = "COMP\n"
673                                    "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
674                                    "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
675                                    "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
676                                    "DCL BUFFER[0]\n"
677                                    "DCL BUFFER[1]\n"
678                                    "DCL BUFFER[2]\n"
679                                    "DCL CONST[0][0..0]\n"
680                                    "DCL TEMP[0..5]\n"
681                                    "IMM[0] UINT32 {0, 7, 256, 4294967295}\n"
682                                    "IMM[1] UINT32 {1, 2, 4, 8}\n"
683                                    "IMM[2] UINT32 {16, 32, 64, 128}\n"
684 
685                                    /*
686                                    acc_result = 0;
687                                    acc_missing = 0;
688                                    if (chain & 1) {
689                                            acc_result = buffer[1][0];
690                                            acc_missing = buffer[1][1];
691                                    }
692                                    */
693                                    "MOV TEMP[0].xy, IMM[0].xxxx\n"
694                                    "AND TEMP[5], CONST[0][0].zzzz, IMM[1].xxxx\n"
695                                    "UIF TEMP[5]\n"
696                                    "LOAD TEMP[0].xy, BUFFER[1], IMM[0].xxxx\n"
697                                    "ENDIF\n"
698 
699                                    /*
700                                    is_overflow (TEMP[0].z) = (config & 7) >= 2;
701                                    result_remaining (TEMP[1].x) = (is_overflow && acc_result) ? 0 :
702                                    result_count; base_offset (TEMP[1].y) = 0; for (;;) { if
703                                    (!result_remaining) break; result_remaining--;
704                                    */
705                                    "AND TEMP[5].x, CONST[0][0].xxxx, IMM[0].yyyy\n"
706                                    "USGE TEMP[0].z, TEMP[5].xxxx, IMM[1].yyyy\n"
707 
708                                    "AND TEMP[5].x, TEMP[0].zzzz, TEMP[0].xxxx\n"
709                                    "UCMP TEMP[1].x, TEMP[5].xxxx, IMM[0].xxxx, CONST[0][0].wwww\n"
710                                    "MOV TEMP[1].y, IMM[0].xxxx\n"
711 
712                                    "BGNLOOP\n"
713                                    "USEQ TEMP[5], TEMP[1].xxxx, IMM[0].xxxx\n"
714                                    "UIF TEMP[5]\n"
715                                    "BRK\n"
716                                    "ENDIF\n"
717                                    "UADD TEMP[1].x, TEMP[1].xxxx, IMM[0].wwww\n"
718 
719                                    /*
720                                    fence = buffer[0]@(base_offset + sizeof(gfx10_sh_query_buffer_mem.stream));
721                                    if (!fence) {
722                                            acc_missing = ~0u;
723                                            break;
724                                    }
725                                    */
726                                    "UADD TEMP[5].x, TEMP[1].yyyy, IMM[2].wwww\n"
727                                    "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
728                                    "USEQ TEMP[5], TEMP[5].xxxx, IMM[0].xxxx\n"
729                                    "UIF TEMP[5]\n"
730                                    "MOV TEMP[0].y, TEMP[5].xxxx\n"
731                                    "BRK\n"
732                                    "ENDIF\n"
733 
734                                    /*
735                                    stream_offset (TEMP[2].x) = base_offset + offset;
736 
737                                    if (!(config & 7)) {
738                                            acc_result += buffer[0]@stream_offset;
739                                    }
740                                    */
741                                    "UADD TEMP[2].x, TEMP[1].yyyy, CONST[0][0].yyyy\n"
742 
743                                    "AND TEMP[5].x, CONST[0][0].xxxx, IMM[0].yyyy\n"
744                                    "USEQ TEMP[5], TEMP[5].xxxx, IMM[0].xxxx\n"
745                                    "UIF TEMP[5]\n"
746                                    "LOAD TEMP[5].x, BUFFER[0], TEMP[2].xxxx\n"
747                                    "UADD TEMP[0].x, TEMP[0].xxxx, TEMP[5].xxxx\n"
748                                    "ENDIF\n"
749 
750                                    /*
751                                    if ((config & 7) >= 2) {
752                                            count (TEMP[2].y) = (config & 1) ? 4 : 1;
753                                    */
754                                    "AND TEMP[5].x, CONST[0][0].xxxx, IMM[0].yyyy\n"
755                                    "USGE TEMP[5], TEMP[5].xxxx, IMM[1].yyyy\n"
756                                    "UIF TEMP[5]\n"
757                                    "AND TEMP[5].x, CONST[0][0].xxxx, IMM[1].xxxx\n"
758                                    "UCMP TEMP[2].y, TEMP[5].xxxx, IMM[1].zzzz, IMM[1].xxxx\n"
759 
760                                    /*
761                                    do {
762                                            generated = buffer[0]@(stream_offset + 2 * sizeof(uint64_t));
763                                            emitted = buffer[0]@(stream_offset + 3 * sizeof(uint64_t));
764                                            if (generated != emitted) {
765                                                    acc_result = 1;
766                                                    result_remaining = 0;
767                                                    break;
768                                            }
769 
770                                            stream_offset += sizeof(gfx10_sh_query_buffer_mem.stream[0]);
771                                    } while (--count);
772                                    */
773                                    "BGNLOOP\n"
774                                    "UADD TEMP[5].x, TEMP[2].xxxx, IMM[2].xxxx\n"
775                                    "LOAD TEMP[4].xyzw, BUFFER[0], TEMP[5].xxxx\n"
776                                    "USNE TEMP[5], TEMP[4].xyxy, TEMP[4].zwzw\n"
777                                    "UIF TEMP[5]\n"
778                                    "MOV TEMP[0].x, IMM[1].xxxx\n"
779                                    "MOV TEMP[1].y, IMM[0].xxxx\n"
780                                    "BRK\n"
781                                    "ENDIF\n"
782 
783                                    "UADD TEMP[2].y, TEMP[2].yyyy, IMM[0].wwww\n"
784                                    "USEQ TEMP[5], TEMP[2].yyyy, IMM[0].xxxx\n"
785                                    "UIF TEMP[5]\n"
786                                    "BRK\n"
787                                    "ENDIF\n"
788                                    "UADD TEMP[2].x, TEMP[2].xxxx, IMM[2].yyyy\n"
789                                    "ENDLOOP\n"
790                                    "ENDIF\n"
791 
792                                    /*
793                                            base_offset += sizeof(gfx10_sh_query_buffer_mem);
794                                    } // end outer loop
795                                    */
796                                    "UADD TEMP[1].y, TEMP[1].yyyy, IMM[0].zzzz\n"
797                                    "ENDLOOP\n"
798 
799                                    /*
800                                    if (chain & 2) {
801                                            buffer[2][0] = acc_result;
802                                            buffer[2][1] = acc_missing;
803                                    } else {
804                                    */
805                                    "AND TEMP[5], CONST[0][0].zzzz, IMM[1].yyyy\n"
806                                    "UIF TEMP[5]\n"
807                                    "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0]\n"
808                                    "ELSE\n"
809 
810                                    /*
811                                    if ((config & 7) == 1) {
812                                            acc_result = acc_missing ? 0 : 1;
813                                            acc_missing = 0;
814                                    }
815                                    */
816                                    "AND TEMP[5], CONST[0][0].xxxx, IMM[0].yyyy\n"
817                                    "USEQ TEMP[5], TEMP[5].xxxx, IMM[1].xxxx\n"
818                                    "UIF TEMP[5]\n"
819                                    "UCMP TEMP[0].x, TEMP[0].yyyy, IMM[0].xxxx, IMM[1].xxxx\n"
820                                    "MOV TEMP[0].y, IMM[0].xxxx\n"
821                                    "ENDIF\n"
822 
823                                    /*
824                                    if (!acc_missing) {
825                                            buffer[2][0] = acc_result;
826                                            if (config & 8)
827                                                    buffer[2][1] = 0;
828                                    }
829                                    */
830                                    "USEQ TEMP[5], TEMP[0].yyyy, IMM[0].xxxx\n"
831                                    "UIF TEMP[5]\n"
832                                    "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
833 
834                                    "AND TEMP[5], CONST[0][0].xxxx, IMM[1].wwww\n"
835                                    "UIF TEMP[5]\n"
836                                    "STORE BUFFER[2].x, IMM[1].zzzz, TEMP[0].yyyy\n"
837                                    "ENDIF\n"
838                                    "ENDIF\n"
839                                    "ENDIF\n"
840 
841                                    "END\n";
842 
843    struct tgsi_token tokens[1024];
844    struct pipe_compute_state state = {};
845 
846    if (!tgsi_text_translate(text_tmpl, tokens, ARRAY_SIZE(tokens))) {
847       assert(false);
848       return NULL;
849    }
850 
851    state.ir_type = PIPE_SHADER_IR_TGSI;
852    state.prog = tokens;
853 
854    return sctx->b.create_compute_state(&sctx->b, &state);
855 }
856