1 /*
2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
4 * Copyright © 2010 Intel Corporation
5 * Copyright © 2011 Bryan Cain
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27 /**
28 * \file glsl_to_tgsi.cpp
29 *
30 * Translate GLSL IR to TGSI.
31 */
32
33 #include "st_glsl_to_tgsi.h"
34 #include "st_program.h"
35
36 #include "compiler/glsl/glsl_parser_extras.h"
37 #include "compiler/glsl/ir_optimization.h"
38 #include "compiler/glsl/linker.h"
39 #include "compiler/glsl/program.h"
40 #include "compiler/glsl/string_to_uint_map.h"
41
42 #include "main/errors.h"
43 #include "main/shaderobj.h"
44 #include "main/uniforms.h"
45 #include "main/shaderapi.h"
46 #include "program/prog_instruction.h"
47
48 #include "pipe/p_context.h"
49 #include "pipe/p_screen.h"
50 #include "tgsi/tgsi_ureg.h"
51 #include "tgsi/tgsi_info.h"
52 #include "util/u_math.h"
53 #include "util/u_memory.h"
54 #include "st_program.h"
55 #include "st_format.h"
56 #include "st_glsl_to_tgsi_temprename.h"
57
58 #include "util/hash_table.h"
59 #include <algorithm>
60
61 #define PROGRAM_ANY_CONST ((1 << PROGRAM_STATE_VAR) | \
62 (1 << PROGRAM_CONSTANT) | \
63 (1 << PROGRAM_UNIFORM))
64
65 #define MAX_GLSL_TEXTURE_OFFSET 4
66
67 #ifndef NDEBUG
68 #include "util/u_atomic.h"
69 #include "util/simple_mtx.h"
70 #include <fstream>
71 #include <ios>
72
73 /* Prepare to make it possible to specify log file */
74 static std::ofstream stats_log;
75
76 /* Helper function to check whether we want to write some statistics
77 * of the shader conversion.
78 */
79
80 static simple_mtx_t print_stats_mutex = _SIMPLE_MTX_INITIALIZER_NP;
81
print_stats_enabled()82 static inline bool print_stats_enabled ()
83 {
84 static int stats_enabled = 0;
85
86 if (!stats_enabled) {
87 simple_mtx_lock(&print_stats_mutex);
88 if (!stats_enabled) {
89 const char *stats_filename = getenv("GLSL_TO_TGSI_PRINT_STATS");
90 if (stats_filename) {
91 bool write_header = std::ifstream(stats_filename).fail();
92 stats_log.open(stats_filename, std::ios_base::out | std::ios_base::app);
93 stats_enabled = stats_log.good() ? 1 : -1;
94 if (write_header)
95 stats_log << "arrays,temps,temps in arrays,total,instructions\n";
96 } else {
97 stats_enabled = -1;
98 }
99 }
100 simple_mtx_unlock(&print_stats_mutex);
101 }
102 return stats_enabled > 0;
103 }
104 #define PRINT_STATS(X) if (print_stats_enabled()) do { X; } while (false);
105 #else
106 #define PRINT_STATS(X)
107 #endif
108
109
110 namespace {
111
112 class add_uniform_to_shader : public program_resource_visitor {
113 public:
add_uniform_to_shader(struct gl_context * ctx,struct gl_shader_program * shader_program,struct gl_program_parameter_list * params)114 add_uniform_to_shader(struct gl_context *ctx,
115 struct gl_shader_program *shader_program,
116 struct gl_program_parameter_list *params)
117 : ctx(ctx), shader_program(shader_program), params(params), idx(-1),
118 var(NULL)
119 {
120 /* empty */
121 }
122
process(ir_variable * var)123 void process(ir_variable *var)
124 {
125 this->idx = -1;
126 this->var = var;
127 this->program_resource_visitor::process(var,
128 ctx->Const.UseSTD430AsDefaultPacking);
129 var->data.param_index = this->idx;
130 }
131
132 private:
133 virtual void visit_field(const glsl_type *type, const char *name,
134 bool row_major, const glsl_type *record_type,
135 const enum glsl_interface_packing packing,
136 bool last_field);
137
138 struct gl_context *ctx;
139 struct gl_shader_program *shader_program;
140 struct gl_program_parameter_list *params;
141 int idx;
142 ir_variable *var;
143 };
144
145 } /* anonymous namespace */
146
147 void
visit_field(const glsl_type * type,const char * name,bool,const glsl_type *,const enum glsl_interface_packing,bool)148 add_uniform_to_shader::visit_field(const glsl_type *type, const char *name,
149 bool /* row_major */,
150 const glsl_type * /* record_type */,
151 const enum glsl_interface_packing,
152 bool /* last_field */)
153 {
154 /* opaque types don't use storage in the param list unless they are
155 * bindless samplers or images.
156 */
157 if (type->contains_opaque() && !var->data.bindless)
158 return;
159
160 /* Add the uniform to the param list */
161 assert(_mesa_lookup_parameter_index(params, name) < 0);
162 int index = _mesa_lookup_parameter_index(params, name);
163
164 unsigned num_params = type->arrays_of_arrays_size();
165 num_params = MAX2(num_params, 1);
166 num_params *= type->without_array()->matrix_columns;
167
168 bool is_dual_slot = type->without_array()->is_dual_slot();
169 if (is_dual_slot)
170 num_params *= 2;
171
172 _mesa_reserve_parameter_storage(params, num_params, num_params);
173 index = params->NumParameters;
174
175 if (ctx->Const.PackedDriverUniformStorage) {
176 for (unsigned i = 0; i < num_params; i++) {
177 unsigned dmul = type->without_array()->is_64bit() ? 2 : 1;
178 unsigned comps = type->without_array()->vector_elements * dmul;
179 if (is_dual_slot) {
180 if (i & 0x1)
181 comps -= 4;
182 else
183 comps = 4;
184 }
185
186 _mesa_add_parameter(params, PROGRAM_UNIFORM, name, comps,
187 type->gl_type, NULL, NULL, false);
188 }
189 } else {
190 for (unsigned i = 0; i < num_params; i++) {
191 _mesa_add_parameter(params, PROGRAM_UNIFORM, name, 4,
192 type->gl_type, NULL, NULL, true);
193 }
194 }
195
196 /* The first part of the uniform that's processed determines the base
197 * location of the whole uniform (for structures).
198 */
199 if (this->idx < 0)
200 this->idx = index;
201
202 /* Each Parameter will hold the index to the backing uniform storage.
203 * This avoids relying on names to match parameters and uniform
204 * storages later when associating uniform storage.
205 */
206 unsigned location = -1;
207 ASSERTED const bool found =
208 shader_program->UniformHash->get(location, params->Parameters[index].Name);
209 assert(found);
210
211 for (unsigned i = 0; i < num_params; i++) {
212 struct gl_program_parameter *param = ¶ms->Parameters[index + i];
213 param->UniformStorageIndex = location;
214 param->MainUniformStorageIndex = params->Parameters[this->idx].UniformStorageIndex;
215 }
216 }
217
218 /**
219 * Generate the program parameters list for the user uniforms in a shader
220 *
221 * \param shader_program Linked shader program. This is only used to
222 * emit possible link errors to the info log.
223 * \param sh Shader whose uniforms are to be processed.
224 * \param params Parameter list to be filled in.
225 */
226 static void
generate_parameters_list_for_uniforms(struct gl_context * ctx,struct gl_shader_program * shader_program,struct gl_linked_shader * sh,struct gl_program_parameter_list * params)227 generate_parameters_list_for_uniforms(struct gl_context *ctx,
228 struct gl_shader_program *shader_program,
229 struct gl_linked_shader *sh,
230 struct gl_program_parameter_list *params)
231 {
232 add_uniform_to_shader add(ctx, shader_program, params);
233
234 foreach_in_list(ir_instruction, node, sh->ir) {
235 ir_variable *var = node->as_variable();
236
237 if ((var == NULL) || (var->data.mode != ir_var_uniform)
238 || var->is_in_buffer_block() || (strncmp(var->name, "gl_", 3) == 0))
239 continue;
240
241 add.process(var);
242 }
243 }
244
is_precise(const ir_variable * ir)245 static unsigned is_precise(const ir_variable *ir)
246 {
247 if (!ir)
248 return 0;
249 return ir->data.precise || ir->data.invariant;
250 }
251
252 class variable_storage {
253 DECLARE_RZALLOC_CXX_OPERATORS(variable_storage)
254
255 public:
variable_storage(ir_variable * var,gl_register_file file,int index,unsigned array_id=0)256 variable_storage(ir_variable *var, gl_register_file file, int index,
257 unsigned array_id = 0)
258 : file(file), index(index), component(0), var(var), array_id(array_id)
259 {
260 assert(file != PROGRAM_ARRAY || array_id != 0);
261 }
262
263 gl_register_file file;
264 int index;
265
266 /* Explicit component location. This is given in terms of the GLSL-style
267 * swizzles where each double is a single component, i.e. for 64-bit types
268 * it can only be 0 or 1.
269 */
270 int component;
271 ir_variable *var; /* variable that maps to this, if any */
272 unsigned array_id;
273 };
274
275 class immediate_storage : public exec_node {
276 public:
immediate_storage(gl_constant_value * values,int size32,GLenum type)277 immediate_storage(gl_constant_value *values, int size32, GLenum type)
278 {
279 memcpy(this->values, values, size32 * sizeof(gl_constant_value));
280 this->size32 = size32;
281 this->type = type;
282 }
283
284 /* doubles are stored across 2 gl_constant_values */
285 gl_constant_value values[4];
286 int size32; /**< Number of 32-bit components (1-4) */
287 GLenum type; /**< GL_DOUBLE, GL_FLOAT, GL_INT, GL_BOOL, or GL_UNSIGNED_INT */
288 };
289
290 static const st_src_reg undef_src = st_src_reg(PROGRAM_UNDEFINED, 0, GLSL_TYPE_ERROR);
291 static const st_dst_reg undef_dst = st_dst_reg(PROGRAM_UNDEFINED, SWIZZLE_NOOP, GLSL_TYPE_ERROR);
292
293 struct inout_decl {
294 unsigned mesa_index;
295 unsigned array_id; /* TGSI ArrayID; 1-based: 0 means not an array */
296 unsigned size;
297 unsigned interp_loc;
298 unsigned gs_out_streams;
299 enum glsl_interp_mode interp;
300 enum glsl_base_type base_type;
301 ubyte usage_mask; /* GLSL-style usage-mask, i.e. single bit per double */
302 bool invariant;
303 };
304
305 static struct inout_decl *
find_inout_array(struct inout_decl * decls,unsigned count,unsigned array_id)306 find_inout_array(struct inout_decl *decls, unsigned count, unsigned array_id)
307 {
308 assert(array_id != 0);
309
310 for (unsigned i = 0; i < count; i++) {
311 struct inout_decl *decl = &decls[i];
312
313 if (array_id == decl->array_id) {
314 return decl;
315 }
316 }
317
318 return NULL;
319 }
320
321 static enum glsl_base_type
find_array_type(struct inout_decl * decls,unsigned count,unsigned array_id)322 find_array_type(struct inout_decl *decls, unsigned count, unsigned array_id)
323 {
324 if (!array_id)
325 return GLSL_TYPE_ERROR;
326 struct inout_decl *decl = find_inout_array(decls, count, array_id);
327 if (decl)
328 return decl->base_type;
329 return GLSL_TYPE_ERROR;
330 }
331
332 struct hwatomic_decl {
333 unsigned location;
334 unsigned binding;
335 unsigned size;
336 unsigned array_id;
337 };
338
339 struct glsl_to_tgsi_visitor : public ir_visitor {
340 public:
341 glsl_to_tgsi_visitor();
342 ~glsl_to_tgsi_visitor();
343
344 struct gl_context *ctx;
345 struct gl_program *prog;
346 struct gl_shader_program *shader_program;
347 struct gl_linked_shader *shader;
348 struct gl_shader_compiler_options *options;
349
350 int next_temp;
351
352 unsigned *array_sizes;
353 unsigned max_num_arrays;
354 unsigned next_array;
355
356 struct inout_decl inputs[4 * PIPE_MAX_SHADER_INPUTS];
357 unsigned num_inputs;
358 unsigned num_input_arrays;
359 struct inout_decl outputs[4 * PIPE_MAX_SHADER_OUTPUTS];
360 unsigned num_outputs;
361 unsigned num_output_arrays;
362
363 struct hwatomic_decl atomic_info[PIPE_MAX_HW_ATOMIC_BUFFERS];
364 unsigned num_atomics;
365 unsigned num_atomic_arrays;
366 int num_address_regs;
367 uint32_t samplers_used;
368 glsl_base_type sampler_types[PIPE_MAX_SAMPLERS];
369 enum tgsi_texture_type sampler_targets[PIPE_MAX_SAMPLERS];
370 int images_used;
371 enum tgsi_texture_type image_targets[PIPE_MAX_SHADER_IMAGES];
372 enum pipe_format image_formats[PIPE_MAX_SHADER_IMAGES];
373 bool image_wr[PIPE_MAX_SHADER_IMAGES];
374 bool indirect_addr_consts;
375 int wpos_transform_const;
376
377 bool native_integers;
378 bool have_sqrt;
379 bool have_fma;
380 bool use_shared_memory;
381 bool has_tex_txf_lz;
382 bool precise;
383 bool tg4_component_in_swizzle;
384
385 variable_storage *find_variable_storage(ir_variable *var);
386
387 int add_constant(gl_register_file file, gl_constant_value values[8],
388 int size, GLenum datatype, uint16_t *swizzle_out);
389
390 st_src_reg get_temp(const glsl_type *type);
391 void reladdr_to_temp(ir_instruction *ir, st_src_reg *reg, int *num_reladdr);
392
393 st_src_reg st_src_reg_for_double(double val);
394 st_src_reg st_src_reg_for_float(float val);
395 st_src_reg st_src_reg_for_int(int val);
396 st_src_reg st_src_reg_for_int64(int64_t val);
397 st_src_reg st_src_reg_for_type(enum glsl_base_type type, int val);
398
399 /**
400 * \name Visit methods
401 *
402 * As typical for the visitor pattern, there must be one \c visit method for
403 * each concrete subclass of \c ir_instruction. Virtual base classes within
404 * the hierarchy should not have \c visit methods.
405 */
406 /*@{*/
407 virtual void visit(ir_variable *);
408 virtual void visit(ir_loop *);
409 virtual void visit(ir_loop_jump *);
410 virtual void visit(ir_function_signature *);
411 virtual void visit(ir_function *);
412 virtual void visit(ir_expression *);
413 virtual void visit(ir_swizzle *);
414 virtual void visit(ir_dereference_variable *);
415 virtual void visit(ir_dereference_array *);
416 virtual void visit(ir_dereference_record *);
417 virtual void visit(ir_assignment *);
418 virtual void visit(ir_constant *);
419 virtual void visit(ir_call *);
420 virtual void visit(ir_return *);
421 virtual void visit(ir_discard *);
422 virtual void visit(ir_demote *);
423 virtual void visit(ir_texture *);
424 virtual void visit(ir_if *);
425 virtual void visit(ir_emit_vertex *);
426 virtual void visit(ir_end_primitive *);
427 virtual void visit(ir_barrier *);
428 /*@}*/
429
430 void ATTRIBUTE_NOINLINE visit_expression(ir_expression *, st_src_reg *);
431
432 void visit_atomic_counter_intrinsic(ir_call *);
433 void visit_ssbo_intrinsic(ir_call *);
434 void visit_membar_intrinsic(ir_call *);
435 void visit_shared_intrinsic(ir_call *);
436 void visit_image_intrinsic(ir_call *);
437 void visit_generic_intrinsic(ir_call *, enum tgsi_opcode op);
438
439 st_src_reg result;
440
441 /** List of variable_storage */
442 struct hash_table *variables;
443
444 /** List of immediate_storage */
445 exec_list immediates;
446 unsigned num_immediates;
447
448 /** List of glsl_to_tgsi_instruction */
449 exec_list instructions;
450
451 glsl_to_tgsi_instruction *emit_asm(ir_instruction *ir, enum tgsi_opcode op,
452 st_dst_reg dst = undef_dst,
453 st_src_reg src0 = undef_src,
454 st_src_reg src1 = undef_src,
455 st_src_reg src2 = undef_src,
456 st_src_reg src3 = undef_src);
457
458 glsl_to_tgsi_instruction *emit_asm(ir_instruction *ir, enum tgsi_opcode op,
459 st_dst_reg dst, st_dst_reg dst1,
460 st_src_reg src0 = undef_src,
461 st_src_reg src1 = undef_src,
462 st_src_reg src2 = undef_src,
463 st_src_reg src3 = undef_src);
464
465 enum tgsi_opcode get_opcode(enum tgsi_opcode op,
466 st_dst_reg dst,
467 st_src_reg src0, st_src_reg src1);
468
469 /**
470 * Emit the correct dot-product instruction for the type of arguments
471 */
472 glsl_to_tgsi_instruction *emit_dp(ir_instruction *ir,
473 st_dst_reg dst,
474 st_src_reg src0,
475 st_src_reg src1,
476 unsigned elements);
477
478 void emit_scalar(ir_instruction *ir, enum tgsi_opcode op,
479 st_dst_reg dst, st_src_reg src0);
480
481 void emit_scalar(ir_instruction *ir, enum tgsi_opcode op,
482 st_dst_reg dst, st_src_reg src0, st_src_reg src1);
483
484 void emit_arl(ir_instruction *ir, st_dst_reg dst, st_src_reg src0);
485
486 void get_deref_offsets(ir_dereference *ir,
487 unsigned *array_size,
488 unsigned *base,
489 uint16_t *index,
490 st_src_reg *reladdr,
491 bool opaque);
492 void calc_deref_offsets(ir_dereference *tail,
493 unsigned *array_elements,
494 uint16_t *index,
495 st_src_reg *indirect,
496 unsigned *location);
497 st_src_reg canonicalize_gather_offset(st_src_reg offset);
498 bool handle_bound_deref(ir_dereference *ir);
499
500 bool try_emit_mad(ir_expression *ir,
501 int mul_operand);
502 bool try_emit_mad_for_and_not(ir_expression *ir,
503 int mul_operand);
504
505 void emit_swz(ir_expression *ir);
506
507 bool process_move_condition(ir_rvalue *ir);
508
509 void simplify_cmp(void);
510
511 void rename_temp_registers(struct rename_reg_pair *renames);
512 void get_first_temp_read(int *first_reads);
513 void get_first_temp_write(int *first_writes);
514 void get_last_temp_read_first_temp_write(int *last_reads, int *first_writes);
515 void get_last_temp_write(int *last_writes);
516
517 void copy_propagate(void);
518 int eliminate_dead_code(void);
519
520 void split_arrays(void);
521 void merge_two_dsts(void);
522 void merge_registers(void);
523 void renumber_registers(void);
524
525 void emit_block_mov(ir_assignment *ir, const struct glsl_type *type,
526 st_dst_reg *l, st_src_reg *r,
527 st_src_reg *cond, bool cond_swap);
528
529 void print_stats();
530
531 void *mem_ctx;
532 };
533
534 static st_dst_reg address_reg = st_dst_reg(PROGRAM_ADDRESS, WRITEMASK_X,
535 GLSL_TYPE_FLOAT, 0);
536 static st_dst_reg address_reg2 = st_dst_reg(PROGRAM_ADDRESS, WRITEMASK_X,
537 GLSL_TYPE_FLOAT, 1);
538 static st_dst_reg sampler_reladdr = st_dst_reg(PROGRAM_ADDRESS, WRITEMASK_X,
539 GLSL_TYPE_FLOAT, 2);
540
541 static void
542 fail_link(struct gl_shader_program *prog, const char *fmt, ...)
543 PRINTFLIKE(2, 3);
544
545 static void
fail_link(struct gl_shader_program * prog,const char * fmt,...)546 fail_link(struct gl_shader_program *prog, const char *fmt, ...)
547 {
548 va_list args;
549 va_start(args, fmt);
550 ralloc_vasprintf_append(&prog->data->InfoLog, fmt, args);
551 va_end(args);
552
553 prog->data->LinkStatus = LINKING_FAILURE;
554 }
555
556 int
swizzle_for_size(int size)557 swizzle_for_size(int size)
558 {
559 static const int size_swizzles[4] = {
560 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_X),
561 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_Y),
562 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_Z),
563 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W),
564 };
565
566 assert((size >= 1) && (size <= 4));
567 return size_swizzles[size - 1];
568 }
569
570
571 /**
572 * Map mesa texture target to TGSI texture target.
573 */
574 static enum tgsi_texture_type
st_translate_texture_target(gl_texture_index textarget,GLboolean shadow)575 st_translate_texture_target(gl_texture_index textarget, GLboolean shadow)
576 {
577 if (shadow) {
578 switch (textarget) {
579 case TEXTURE_1D_INDEX:
580 return TGSI_TEXTURE_SHADOW1D;
581 case TEXTURE_2D_INDEX:
582 return TGSI_TEXTURE_SHADOW2D;
583 case TEXTURE_RECT_INDEX:
584 return TGSI_TEXTURE_SHADOWRECT;
585 case TEXTURE_1D_ARRAY_INDEX:
586 return TGSI_TEXTURE_SHADOW1D_ARRAY;
587 case TEXTURE_2D_ARRAY_INDEX:
588 return TGSI_TEXTURE_SHADOW2D_ARRAY;
589 case TEXTURE_CUBE_INDEX:
590 return TGSI_TEXTURE_SHADOWCUBE;
591 case TEXTURE_CUBE_ARRAY_INDEX:
592 return TGSI_TEXTURE_SHADOWCUBE_ARRAY;
593 default:
594 break;
595 }
596 }
597
598 switch (textarget) {
599 case TEXTURE_2D_MULTISAMPLE_INDEX:
600 return TGSI_TEXTURE_2D_MSAA;
601 case TEXTURE_2D_MULTISAMPLE_ARRAY_INDEX:
602 return TGSI_TEXTURE_2D_ARRAY_MSAA;
603 case TEXTURE_BUFFER_INDEX:
604 return TGSI_TEXTURE_BUFFER;
605 case TEXTURE_1D_INDEX:
606 return TGSI_TEXTURE_1D;
607 case TEXTURE_2D_INDEX:
608 return TGSI_TEXTURE_2D;
609 case TEXTURE_3D_INDEX:
610 return TGSI_TEXTURE_3D;
611 case TEXTURE_CUBE_INDEX:
612 return TGSI_TEXTURE_CUBE;
613 case TEXTURE_CUBE_ARRAY_INDEX:
614 return TGSI_TEXTURE_CUBE_ARRAY;
615 case TEXTURE_RECT_INDEX:
616 return TGSI_TEXTURE_RECT;
617 case TEXTURE_1D_ARRAY_INDEX:
618 return TGSI_TEXTURE_1D_ARRAY;
619 case TEXTURE_2D_ARRAY_INDEX:
620 return TGSI_TEXTURE_2D_ARRAY;
621 case TEXTURE_EXTERNAL_INDEX:
622 return TGSI_TEXTURE_2D;
623 default:
624 debug_assert(!"unexpected texture target index");
625 return TGSI_TEXTURE_1D;
626 }
627 }
628
629
630 /**
631 * Map GLSL base type to TGSI return type.
632 */
633 static enum tgsi_return_type
st_translate_texture_type(enum glsl_base_type type)634 st_translate_texture_type(enum glsl_base_type type)
635 {
636 switch (type) {
637 case GLSL_TYPE_INT:
638 return TGSI_RETURN_TYPE_SINT;
639 case GLSL_TYPE_UINT:
640 return TGSI_RETURN_TYPE_UINT;
641 case GLSL_TYPE_FLOAT:
642 return TGSI_RETURN_TYPE_FLOAT;
643 default:
644 assert(!"unexpected texture type");
645 return TGSI_RETURN_TYPE_UNKNOWN;
646 }
647 }
648
649
650 glsl_to_tgsi_instruction *
emit_asm(ir_instruction * ir,enum tgsi_opcode op,st_dst_reg dst,st_dst_reg dst1,st_src_reg src0,st_src_reg src1,st_src_reg src2,st_src_reg src3)651 glsl_to_tgsi_visitor::emit_asm(ir_instruction *ir, enum tgsi_opcode op,
652 st_dst_reg dst, st_dst_reg dst1,
653 st_src_reg src0, st_src_reg src1,
654 st_src_reg src2, st_src_reg src3)
655 {
656 glsl_to_tgsi_instruction *inst = new(mem_ctx) glsl_to_tgsi_instruction();
657 int num_reladdr = 0, i, j;
658 bool dst_is_64bit[2];
659
660 op = get_opcode(op, dst, src0, src1);
661
662 /* If we have to do relative addressing, we want to load the ARL
663 * reg directly for one of the regs, and preload the other reladdr
664 * sources into temps.
665 */
666 num_reladdr += dst.reladdr != NULL || dst.reladdr2;
667 assert(!dst1.reladdr); /* should be lowered in earlier passes */
668 num_reladdr += src0.reladdr != NULL || src0.reladdr2 != NULL;
669 num_reladdr += src1.reladdr != NULL || src1.reladdr2 != NULL;
670 num_reladdr += src2.reladdr != NULL || src2.reladdr2 != NULL;
671 num_reladdr += src3.reladdr != NULL || src3.reladdr2 != NULL;
672
673 reladdr_to_temp(ir, &src3, &num_reladdr);
674 reladdr_to_temp(ir, &src2, &num_reladdr);
675 reladdr_to_temp(ir, &src1, &num_reladdr);
676 reladdr_to_temp(ir, &src0, &num_reladdr);
677
678 if (dst.reladdr || dst.reladdr2) {
679 if (dst.reladdr)
680 emit_arl(ir, address_reg, *dst.reladdr);
681 if (dst.reladdr2)
682 emit_arl(ir, address_reg2, *dst.reladdr2);
683 num_reladdr--;
684 }
685
686 assert(num_reladdr == 0);
687
688 /* inst->op has only 8 bits. */
689 STATIC_ASSERT(TGSI_OPCODE_LAST <= 255);
690
691 inst->op = op;
692 inst->precise = this->precise;
693 inst->info = tgsi_get_opcode_info(op);
694 inst->dst[0] = dst;
695 inst->dst[1] = dst1;
696 inst->src[0] = src0;
697 inst->src[1] = src1;
698 inst->src[2] = src2;
699 inst->src[3] = src3;
700 inst->is_64bit_expanded = false;
701 inst->ir = ir;
702 inst->dead_mask = 0;
703 inst->tex_offsets = NULL;
704 inst->tex_offset_num_offset = 0;
705 inst->saturate = 0;
706 inst->tex_shadow = 0;
707 /* default to float, for paths where this is not initialized
708 * (since 0==UINT which is likely wrong):
709 */
710 inst->tex_type = GLSL_TYPE_FLOAT;
711
712 /* Update indirect addressing status used by TGSI */
713 if (dst.reladdr || dst.reladdr2) {
714 switch (dst.file) {
715 case PROGRAM_STATE_VAR:
716 case PROGRAM_CONSTANT:
717 case PROGRAM_UNIFORM:
718 this->indirect_addr_consts = true;
719 break;
720 case PROGRAM_IMMEDIATE:
721 assert(!"immediates should not have indirect addressing");
722 break;
723 default:
724 break;
725 }
726 }
727 else {
728 for (i = 0; i < 4; i++) {
729 if (inst->src[i].reladdr) {
730 switch (inst->src[i].file) {
731 case PROGRAM_STATE_VAR:
732 case PROGRAM_CONSTANT:
733 case PROGRAM_UNIFORM:
734 this->indirect_addr_consts = true;
735 break;
736 case PROGRAM_IMMEDIATE:
737 assert(!"immediates should not have indirect addressing");
738 break;
739 default:
740 break;
741 }
742 }
743 }
744 }
745
746 /*
747 * This section contains the double processing.
748 * GLSL just represents doubles as single channel values,
749 * however most HW and TGSI represent doubles as pairs of register channels.
750 *
751 * so we have to fixup destination writemask/index and src swizzle/indexes.
752 * dest writemasks need to translate from single channel write mask
753 * to a dual-channel writemask, but also need to modify the index,
754 * if we are touching the Z,W fields in the pre-translated writemask.
755 *
756 * src channels have similiar index modifications along with swizzle
757 * changes to we pick the XY, ZW pairs from the correct index.
758 *
759 * GLSL [0].x -> TGSI [0].xy
760 * GLSL [0].y -> TGSI [0].zw
761 * GLSL [0].z -> TGSI [1].xy
762 * GLSL [0].w -> TGSI [1].zw
763 */
764 for (j = 0; j < 2; j++) {
765 dst_is_64bit[j] = glsl_base_type_is_64bit(inst->dst[j].type);
766 if (!dst_is_64bit[j] && inst->dst[j].file == PROGRAM_OUTPUT &&
767 inst->dst[j].type == GLSL_TYPE_ARRAY) {
768 enum glsl_base_type type = find_array_type(this->outputs,
769 this->num_outputs,
770 inst->dst[j].array_id);
771 if (glsl_base_type_is_64bit(type))
772 dst_is_64bit[j] = true;
773 }
774 }
775
776 if (dst_is_64bit[0] || dst_is_64bit[1] ||
777 glsl_base_type_is_64bit(inst->src[0].type)) {
778 glsl_to_tgsi_instruction *dinst = NULL;
779 int initial_src_swz[4], initial_src_idx[4];
780 int initial_dst_idx[2], initial_dst_writemask[2];
781 /* select the writemask for dst0 or dst1 */
782 unsigned writemask = inst->dst[1].file == PROGRAM_UNDEFINED
783 ? inst->dst[0].writemask : inst->dst[1].writemask;
784
785 /* copy out the writemask, index and swizzles for all src/dsts. */
786 for (j = 0; j < 2; j++) {
787 initial_dst_writemask[j] = inst->dst[j].writemask;
788 initial_dst_idx[j] = inst->dst[j].index;
789 }
790
791 for (j = 0; j < 4; j++) {
792 initial_src_swz[j] = inst->src[j].swizzle;
793 initial_src_idx[j] = inst->src[j].index;
794 }
795
796 /*
797 * scan all the components in the dst writemask
798 * generate an instruction for each of them if required.
799 */
800 st_src_reg addr;
801 while (writemask) {
802
803 int i = u_bit_scan(&writemask);
804
805 /* before emitting the instruction, see if we have to adjust
806 * load / store address */
807 if (i > 1 && (inst->op == TGSI_OPCODE_LOAD ||
808 inst->op == TGSI_OPCODE_STORE) &&
809 addr.file == PROGRAM_UNDEFINED) {
810 /* We have to advance the buffer address by 16 */
811 addr = get_temp(glsl_type::uint_type);
812 emit_asm(ir, TGSI_OPCODE_UADD, st_dst_reg(addr),
813 inst->src[0], st_src_reg_for_int(16));
814 }
815
816 /* first time use previous instruction */
817 if (dinst == NULL) {
818 dinst = inst;
819 } else {
820 /* create a new instructions for subsequent attempts */
821 dinst = new(mem_ctx) glsl_to_tgsi_instruction();
822 *dinst = *inst;
823 dinst->next = NULL;
824 dinst->prev = NULL;
825 }
826 this->instructions.push_tail(dinst);
827 dinst->is_64bit_expanded = true;
828
829 /* modify the destination if we are splitting */
830 for (j = 0; j < 2; j++) {
831 if (dst_is_64bit[j]) {
832 dinst->dst[j].writemask = (i & 1) ? WRITEMASK_ZW : WRITEMASK_XY;
833 dinst->dst[j].index = initial_dst_idx[j];
834 if (i > 1) {
835 if (dinst->op == TGSI_OPCODE_LOAD ||
836 dinst->op == TGSI_OPCODE_STORE)
837 dinst->src[0] = addr;
838 if (dinst->op != TGSI_OPCODE_STORE)
839 dinst->dst[j].index++;
840 }
841 } else {
842 /* if we aren't writing to a double, just get the bit of the
843 * initial writemask for this channel
844 */
845 dinst->dst[j].writemask = initial_dst_writemask[j] & (1 << i);
846 }
847 }
848
849 /* modify the src registers */
850 for (j = 0; j < 4; j++) {
851 int swz = GET_SWZ(initial_src_swz[j], i);
852
853 if (glsl_base_type_is_64bit(dinst->src[j].type)) {
854 dinst->src[j].index = initial_src_idx[j];
855 if (swz > 1) {
856 dinst->src[j].double_reg2 = true;
857 dinst->src[j].index++;
858 }
859
860 if (swz & 1)
861 dinst->src[j].swizzle = MAKE_SWIZZLE4(SWIZZLE_Z, SWIZZLE_W,
862 SWIZZLE_Z, SWIZZLE_W);
863 else
864 dinst->src[j].swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y,
865 SWIZZLE_X, SWIZZLE_Y);
866
867 } else {
868 /* some opcodes are special case in what they use as sources
869 * - [FUI]2D/[UI]2I64 is a float/[u]int src0, (D)LDEXP is
870 * integer src1
871 */
872 if (op == TGSI_OPCODE_F2D || op == TGSI_OPCODE_U2D ||
873 op == TGSI_OPCODE_I2D ||
874 op == TGSI_OPCODE_I2I64 || op == TGSI_OPCODE_U2I64 ||
875 op == TGSI_OPCODE_DLDEXP || op == TGSI_OPCODE_LDEXP ||
876 (op == TGSI_OPCODE_UCMP && dst_is_64bit[0])) {
877 dinst->src[j].swizzle = MAKE_SWIZZLE4(swz, swz, swz, swz);
878 }
879 }
880 }
881 }
882 inst = dinst;
883 } else {
884 this->instructions.push_tail(inst);
885 }
886
887
888 return inst;
889 }
890
891 glsl_to_tgsi_instruction *
emit_asm(ir_instruction * ir,enum tgsi_opcode op,st_dst_reg dst,st_src_reg src0,st_src_reg src1,st_src_reg src2,st_src_reg src3)892 glsl_to_tgsi_visitor::emit_asm(ir_instruction *ir, enum tgsi_opcode op,
893 st_dst_reg dst,
894 st_src_reg src0, st_src_reg src1,
895 st_src_reg src2, st_src_reg src3)
896 {
897 return emit_asm(ir, op, dst, undef_dst, src0, src1, src2, src3);
898 }
899
900 /**
901 * Determines whether to use an integer, unsigned integer, or float opcode
902 * based on the operands and input opcode, then emits the result.
903 */
904 enum tgsi_opcode
get_opcode(enum tgsi_opcode op,st_dst_reg dst,st_src_reg src0,st_src_reg src1)905 glsl_to_tgsi_visitor::get_opcode(enum tgsi_opcode op,
906 st_dst_reg dst,
907 st_src_reg src0, st_src_reg src1)
908 {
909 enum glsl_base_type type = GLSL_TYPE_FLOAT;
910
911 if (op == TGSI_OPCODE_MOV)
912 return op;
913
914 assert(src0.type != GLSL_TYPE_ARRAY);
915 assert(src0.type != GLSL_TYPE_STRUCT);
916 assert(src1.type != GLSL_TYPE_ARRAY);
917 assert(src1.type != GLSL_TYPE_STRUCT);
918
919 if (is_resource_instruction(op))
920 type = src1.type;
921 else if (src0.type == GLSL_TYPE_INT64 || src1.type == GLSL_TYPE_INT64)
922 type = GLSL_TYPE_INT64;
923 else if (src0.type == GLSL_TYPE_UINT64 || src1.type == GLSL_TYPE_UINT64)
924 type = GLSL_TYPE_UINT64;
925 else if (src0.type == GLSL_TYPE_DOUBLE || src1.type == GLSL_TYPE_DOUBLE)
926 type = GLSL_TYPE_DOUBLE;
927 else if (src0.type == GLSL_TYPE_FLOAT || src1.type == GLSL_TYPE_FLOAT)
928 type = GLSL_TYPE_FLOAT;
929 else if (native_integers)
930 type = src0.type == GLSL_TYPE_BOOL ? GLSL_TYPE_INT : src0.type;
931
932 #define case7(c, f, i, u, d, i64, ui64) \
933 case TGSI_OPCODE_##c: \
934 if (type == GLSL_TYPE_UINT64) \
935 op = TGSI_OPCODE_##ui64; \
936 else if (type == GLSL_TYPE_INT64) \
937 op = TGSI_OPCODE_##i64; \
938 else if (type == GLSL_TYPE_DOUBLE) \
939 op = TGSI_OPCODE_##d; \
940 else if (type == GLSL_TYPE_INT) \
941 op = TGSI_OPCODE_##i; \
942 else if (type == GLSL_TYPE_UINT) \
943 op = TGSI_OPCODE_##u; \
944 else \
945 op = TGSI_OPCODE_##f; \
946 break;
947
948 #define casecomp(c, f, i, u, d, i64, ui64) \
949 case TGSI_OPCODE_##c: \
950 if (type == GLSL_TYPE_INT64) \
951 op = TGSI_OPCODE_##i64; \
952 else if (type == GLSL_TYPE_UINT64) \
953 op = TGSI_OPCODE_##ui64; \
954 else if (type == GLSL_TYPE_DOUBLE) \
955 op = TGSI_OPCODE_##d; \
956 else if (type == GLSL_TYPE_INT || type == GLSL_TYPE_SUBROUTINE) \
957 op = TGSI_OPCODE_##i; \
958 else if (type == GLSL_TYPE_UINT) \
959 op = TGSI_OPCODE_##u; \
960 else if (native_integers) \
961 op = TGSI_OPCODE_##f; \
962 else \
963 op = TGSI_OPCODE_##c; \
964 break;
965
966 switch (op) {
967 /* Some instructions are initially selected without considering the type.
968 * This fixes the type:
969 *
970 * INIT FLOAT SINT UINT DOUBLE SINT64 UINT64
971 */
972 case7(ADD, ADD, UADD, UADD, DADD, U64ADD, U64ADD);
973 case7(CEIL, CEIL, LAST, LAST, DCEIL, LAST, LAST);
974 case7(DIV, DIV, IDIV, UDIV, DDIV, I64DIV, U64DIV);
975 case7(FMA, FMA, UMAD, UMAD, DFMA, LAST, LAST);
976 case7(FLR, FLR, LAST, LAST, DFLR, LAST, LAST);
977 case7(FRC, FRC, LAST, LAST, DFRAC, LAST, LAST);
978 case7(MUL, MUL, UMUL, UMUL, DMUL, U64MUL, U64MUL);
979 case7(MAD, MAD, UMAD, UMAD, DMAD, LAST, LAST);
980 case7(MAX, MAX, IMAX, UMAX, DMAX, I64MAX, U64MAX);
981 case7(MIN, MIN, IMIN, UMIN, DMIN, I64MIN, U64MIN);
982 case7(RCP, RCP, LAST, LAST, DRCP, LAST, LAST);
983 case7(ROUND, ROUND,LAST, LAST, DROUND, LAST, LAST);
984 case7(RSQ, RSQ, LAST, LAST, DRSQ, LAST, LAST);
985 case7(SQRT, SQRT, LAST, LAST, DSQRT, LAST, LAST);
986 case7(SSG, SSG, ISSG, ISSG, DSSG, I64SSG, I64SSG);
987 case7(TRUNC, TRUNC,LAST, LAST, DTRUNC, LAST, LAST);
988
989 case7(MOD, LAST, MOD, UMOD, LAST, I64MOD, U64MOD);
990 case7(SHL, LAST, SHL, SHL, LAST, U64SHL, U64SHL);
991 case7(IBFE, LAST, IBFE, UBFE, LAST, LAST, LAST);
992 case7(IMSB, LAST, IMSB, UMSB, LAST, LAST, LAST);
993 case7(IMUL_HI, LAST, IMUL_HI, UMUL_HI, LAST, LAST, LAST);
994 case7(ISHR, LAST, ISHR, USHR, LAST, I64SHR, U64SHR);
995 case7(ATOMIMAX,LAST, ATOMIMAX,ATOMUMAX,LAST, LAST, LAST);
996 case7(ATOMIMIN,LAST, ATOMIMIN,ATOMUMIN,LAST, LAST, LAST);
997 case7(ATOMUADD,ATOMFADD,ATOMUADD,ATOMUADD,LAST, LAST, LAST);
998
999 casecomp(SEQ, FSEQ, USEQ, USEQ, DSEQ, U64SEQ, U64SEQ);
1000 casecomp(SNE, FSNE, USNE, USNE, DSNE, U64SNE, U64SNE);
1001 casecomp(SGE, FSGE, ISGE, USGE, DSGE, I64SGE, U64SGE);
1002 casecomp(SLT, FSLT, ISLT, USLT, DSLT, I64SLT, U64SLT);
1003
1004 default:
1005 break;
1006 }
1007
1008 assert(op != TGSI_OPCODE_LAST);
1009 return op;
1010 }
1011
1012 glsl_to_tgsi_instruction *
emit_dp(ir_instruction * ir,st_dst_reg dst,st_src_reg src0,st_src_reg src1,unsigned elements)1013 glsl_to_tgsi_visitor::emit_dp(ir_instruction *ir,
1014 st_dst_reg dst, st_src_reg src0, st_src_reg src1,
1015 unsigned elements)
1016 {
1017 static const enum tgsi_opcode dot_opcodes[] = {
1018 TGSI_OPCODE_DP2, TGSI_OPCODE_DP3, TGSI_OPCODE_DP4
1019 };
1020
1021 return emit_asm(ir, dot_opcodes[elements - 2], dst, src0, src1);
1022 }
1023
1024 /**
1025 * Emits TGSI scalar opcodes to produce unique answers across channels.
1026 *
1027 * Some TGSI opcodes are scalar-only, like ARB_fp/vp. The src X
1028 * channel determines the result across all channels. So to do a vec4
1029 * of this operation, we want to emit a scalar per source channel used
1030 * to produce dest channels.
1031 */
1032 void
emit_scalar(ir_instruction * ir,enum tgsi_opcode op,st_dst_reg dst,st_src_reg orig_src0,st_src_reg orig_src1)1033 glsl_to_tgsi_visitor::emit_scalar(ir_instruction *ir, enum tgsi_opcode op,
1034 st_dst_reg dst,
1035 st_src_reg orig_src0, st_src_reg orig_src1)
1036 {
1037 int i, j;
1038 int done_mask = ~dst.writemask;
1039
1040 /* TGSI RCP is a scalar operation splatting results to all channels,
1041 * like ARB_fp/vp. So emit as many RCPs as necessary to cover our
1042 * dst channels.
1043 */
1044 for (i = 0; i < 4; i++) {
1045 GLuint this_mask = (1 << i);
1046 st_src_reg src0 = orig_src0;
1047 st_src_reg src1 = orig_src1;
1048
1049 if (done_mask & this_mask)
1050 continue;
1051
1052 GLuint src0_swiz = GET_SWZ(src0.swizzle, i);
1053 GLuint src1_swiz = GET_SWZ(src1.swizzle, i);
1054 for (j = i + 1; j < 4; j++) {
1055 /* If there is another enabled component in the destination that is
1056 * derived from the same inputs, generate its value on this pass as
1057 * well.
1058 */
1059 if (!(done_mask & (1 << j)) &&
1060 GET_SWZ(src0.swizzle, j) == src0_swiz &&
1061 GET_SWZ(src1.swizzle, j) == src1_swiz) {
1062 this_mask |= (1 << j);
1063 }
1064 }
1065 src0.swizzle = MAKE_SWIZZLE4(src0_swiz, src0_swiz,
1066 src0_swiz, src0_swiz);
1067 src1.swizzle = MAKE_SWIZZLE4(src1_swiz, src1_swiz,
1068 src1_swiz, src1_swiz);
1069
1070 dst.writemask = this_mask;
1071 emit_asm(ir, op, dst, src0, src1);
1072 done_mask |= this_mask;
1073 }
1074 }
1075
1076 void
emit_scalar(ir_instruction * ir,enum tgsi_opcode op,st_dst_reg dst,st_src_reg src0)1077 glsl_to_tgsi_visitor::emit_scalar(ir_instruction *ir, enum tgsi_opcode op,
1078 st_dst_reg dst, st_src_reg src0)
1079 {
1080 st_src_reg undef = undef_src;
1081
1082 undef.swizzle = SWIZZLE_XXXX;
1083
1084 emit_scalar(ir, op, dst, src0, undef);
1085 }
1086
1087 void
emit_arl(ir_instruction * ir,st_dst_reg dst,st_src_reg src0)1088 glsl_to_tgsi_visitor::emit_arl(ir_instruction *ir,
1089 st_dst_reg dst, st_src_reg src0)
1090 {
1091 enum tgsi_opcode op = TGSI_OPCODE_ARL;
1092
1093 if (src0.type == GLSL_TYPE_INT || src0.type == GLSL_TYPE_UINT) {
1094 op = TGSI_OPCODE_UARL;
1095 }
1096
1097 assert(dst.file == PROGRAM_ADDRESS);
1098 if (dst.index >= this->num_address_regs)
1099 this->num_address_regs = dst.index + 1;
1100
1101 emit_asm(NULL, op, dst, src0);
1102 }
1103
1104 int
add_constant(gl_register_file file,gl_constant_value values[8],int size,GLenum datatype,uint16_t * swizzle_out)1105 glsl_to_tgsi_visitor::add_constant(gl_register_file file,
1106 gl_constant_value values[8], int size,
1107 GLenum datatype,
1108 uint16_t *swizzle_out)
1109 {
1110 if (file == PROGRAM_CONSTANT) {
1111 GLuint swizzle = swizzle_out ? *swizzle_out : 0;
1112 int result = _mesa_add_typed_unnamed_constant(this->prog->Parameters,
1113 values, size, datatype,
1114 &swizzle);
1115 if (swizzle_out)
1116 *swizzle_out = swizzle;
1117 return result;
1118 }
1119
1120 assert(file == PROGRAM_IMMEDIATE);
1121
1122 int index = 0;
1123 immediate_storage *entry;
1124 int size32 = size * ((datatype == GL_DOUBLE ||
1125 datatype == GL_INT64_ARB ||
1126 datatype == GL_UNSIGNED_INT64_ARB) ? 2 : 1);
1127 int i;
1128
1129 /* Search immediate storage to see if we already have an identical
1130 * immediate that we can use instead of adding a duplicate entry.
1131 */
1132 foreach_in_list(immediate_storage, entry, &this->immediates) {
1133 immediate_storage *tmp = entry;
1134
1135 for (i = 0; i * 4 < size32; i++) {
1136 int slot_size = MIN2(size32 - (i * 4), 4);
1137 if (tmp->type != datatype || tmp->size32 != slot_size)
1138 break;
1139 if (memcmp(tmp->values, &values[i * 4],
1140 slot_size * sizeof(gl_constant_value)))
1141 break;
1142
1143 /* Everything matches, keep going until the full size is matched */
1144 tmp = (immediate_storage *)tmp->next;
1145 }
1146
1147 /* The full value matched */
1148 if (i * 4 >= size32)
1149 return index;
1150
1151 index++;
1152 }
1153
1154 for (i = 0; i * 4 < size32; i++) {
1155 int slot_size = MIN2(size32 - (i * 4), 4);
1156 /* Add this immediate to the list. */
1157 entry = new(mem_ctx) immediate_storage(&values[i * 4],
1158 slot_size, datatype);
1159 this->immediates.push_tail(entry);
1160 this->num_immediates++;
1161 }
1162 return index;
1163 }
1164
1165 st_src_reg
st_src_reg_for_float(float val)1166 glsl_to_tgsi_visitor::st_src_reg_for_float(float val)
1167 {
1168 st_src_reg src(PROGRAM_IMMEDIATE, -1, GLSL_TYPE_FLOAT);
1169 union gl_constant_value uval;
1170
1171 uval.f = val;
1172 src.index = add_constant(src.file, &uval, 1, GL_FLOAT, &src.swizzle);
1173
1174 return src;
1175 }
1176
1177 st_src_reg
st_src_reg_for_double(double val)1178 glsl_to_tgsi_visitor::st_src_reg_for_double(double val)
1179 {
1180 st_src_reg src(PROGRAM_IMMEDIATE, -1, GLSL_TYPE_DOUBLE);
1181 union gl_constant_value uval[2];
1182
1183 memcpy(uval, &val, sizeof(uval));
1184 src.index = add_constant(src.file, uval, 1, GL_DOUBLE, &src.swizzle);
1185 src.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_X, SWIZZLE_Y);
1186 return src;
1187 }
1188
1189 st_src_reg
st_src_reg_for_int(int val)1190 glsl_to_tgsi_visitor::st_src_reg_for_int(int val)
1191 {
1192 st_src_reg src(PROGRAM_IMMEDIATE, -1, GLSL_TYPE_INT);
1193 union gl_constant_value uval;
1194
1195 assert(native_integers);
1196
1197 uval.i = val;
1198 src.index = add_constant(src.file, &uval, 1, GL_INT, &src.swizzle);
1199
1200 return src;
1201 }
1202
1203 st_src_reg
st_src_reg_for_int64(int64_t val)1204 glsl_to_tgsi_visitor::st_src_reg_for_int64(int64_t val)
1205 {
1206 st_src_reg src(PROGRAM_IMMEDIATE, -1, GLSL_TYPE_INT64);
1207 union gl_constant_value uval[2];
1208
1209 memcpy(uval, &val, sizeof(uval));
1210 src.index = add_constant(src.file, uval, 1, GL_DOUBLE, &src.swizzle);
1211 src.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_X, SWIZZLE_Y);
1212
1213 return src;
1214 }
1215
1216 st_src_reg
st_src_reg_for_type(enum glsl_base_type type,int val)1217 glsl_to_tgsi_visitor::st_src_reg_for_type(enum glsl_base_type type, int val)
1218 {
1219 if (native_integers)
1220 return type == GLSL_TYPE_FLOAT ? st_src_reg_for_float(val) :
1221 st_src_reg_for_int(val);
1222 else
1223 return st_src_reg_for_float(val);
1224 }
1225
1226 static int
attrib_type_size(const struct glsl_type * type,bool is_vs_input)1227 attrib_type_size(const struct glsl_type *type, bool is_vs_input)
1228 {
1229 return type->count_attribute_slots(is_vs_input);
1230 }
1231
1232 static int
type_size(const struct glsl_type * type)1233 type_size(const struct glsl_type *type)
1234 {
1235 return type->count_attribute_slots(false);
1236 }
1237
1238 static void
add_buffer_to_load_and_stores(glsl_to_tgsi_instruction * inst,st_src_reg * buf,exec_list * instructions,ir_constant * access)1239 add_buffer_to_load_and_stores(glsl_to_tgsi_instruction *inst, st_src_reg *buf,
1240 exec_list *instructions, ir_constant *access)
1241 {
1242 /**
1243 * emit_asm() might have actually split the op into pieces, e.g. for
1244 * double stores. We have to go back and fix up all the generated ops.
1245 */
1246 enum tgsi_opcode op = inst->op;
1247 do {
1248 inst->resource = *buf;
1249 if (access)
1250 inst->buffer_access = access->value.u[0];
1251
1252 if (inst == instructions->get_head_raw())
1253 break;
1254 inst = (glsl_to_tgsi_instruction *)inst->get_prev();
1255
1256 if (inst->op == TGSI_OPCODE_UADD) {
1257 if (inst == instructions->get_head_raw())
1258 break;
1259 inst = (glsl_to_tgsi_instruction *)inst->get_prev();
1260 }
1261 } while (inst->op == op && inst->resource.file == PROGRAM_UNDEFINED);
1262 }
1263
1264 /**
1265 * If the given GLSL type is an array or matrix or a structure containing
1266 * an array/matrix member, return true. Else return false.
1267 *
1268 * This is used to determine which kind of temp storage (PROGRAM_TEMPORARY
1269 * or PROGRAM_ARRAY) should be used for variables of this type. Anytime
1270 * we have an array that might be indexed with a variable, we need to use
1271 * the later storage type.
1272 */
1273 static bool
type_has_array_or_matrix(const glsl_type * type)1274 type_has_array_or_matrix(const glsl_type *type)
1275 {
1276 if (type->is_array() || type->is_matrix())
1277 return true;
1278
1279 if (type->is_struct()) {
1280 for (unsigned i = 0; i < type->length; i++) {
1281 if (type_has_array_or_matrix(type->fields.structure[i].type)) {
1282 return true;
1283 }
1284 }
1285 }
1286
1287 return false;
1288 }
1289
1290
1291 /**
1292 * In the initial pass of codegen, we assign temporary numbers to
1293 * intermediate results. (not SSA -- variable assignments will reuse
1294 * storage).
1295 */
1296 st_src_reg
get_temp(const glsl_type * type)1297 glsl_to_tgsi_visitor::get_temp(const glsl_type *type)
1298 {
1299 st_src_reg src;
1300
1301 src.type = native_integers ? type->base_type : GLSL_TYPE_FLOAT;
1302 src.reladdr = NULL;
1303 src.negate = 0;
1304 src.abs = 0;
1305
1306 if (!options->EmitNoIndirectTemp && type_has_array_or_matrix(type)) {
1307 if (next_array >= max_num_arrays) {
1308 max_num_arrays += 32;
1309 array_sizes = (unsigned*)
1310 realloc(array_sizes, sizeof(array_sizes[0]) * max_num_arrays);
1311 }
1312
1313 src.file = PROGRAM_ARRAY;
1314 src.index = 0;
1315 src.array_id = next_array + 1;
1316 array_sizes[next_array] = type_size(type);
1317 ++next_array;
1318
1319 } else {
1320 src.file = PROGRAM_TEMPORARY;
1321 src.index = next_temp;
1322 next_temp += type_size(type);
1323 }
1324
1325 if (type->is_array() || type->is_struct()) {
1326 src.swizzle = SWIZZLE_NOOP;
1327 } else {
1328 src.swizzle = swizzle_for_size(type->vector_elements);
1329 }
1330
1331 return src;
1332 }
1333
1334 variable_storage *
find_variable_storage(ir_variable * var)1335 glsl_to_tgsi_visitor::find_variable_storage(ir_variable *var)
1336 {
1337 struct hash_entry *entry;
1338
1339 entry = _mesa_hash_table_search(this->variables, var);
1340 if (!entry)
1341 return NULL;
1342
1343 return (variable_storage *)entry->data;
1344 }
1345
1346 void
visit(ir_variable * ir)1347 glsl_to_tgsi_visitor::visit(ir_variable *ir)
1348 {
1349 if (ir->data.mode == ir_var_uniform && strncmp(ir->name, "gl_", 3) == 0) {
1350 unsigned int i;
1351 const ir_state_slot *const slots = ir->get_state_slots();
1352 assert(slots != NULL);
1353
1354 /* Check if this statevar's setup in the STATE file exactly
1355 * matches how we'll want to reference it as a
1356 * struct/array/whatever. If not, then we need to move it into
1357 * temporary storage and hope that it'll get copy-propagated
1358 * out.
1359 */
1360 for (i = 0; i < ir->get_num_state_slots(); i++) {
1361 if (slots[i].swizzle != SWIZZLE_XYZW) {
1362 break;
1363 }
1364 }
1365
1366 variable_storage *storage;
1367 st_dst_reg dst;
1368 if (i == ir->get_num_state_slots()) {
1369 /* We'll set the index later. */
1370 storage = new(mem_ctx) variable_storage(ir, PROGRAM_STATE_VAR, -1);
1371
1372 _mesa_hash_table_insert(this->variables, ir, storage);
1373
1374 dst = undef_dst;
1375 } else {
1376 /* The variable_storage constructor allocates slots based on the size
1377 * of the type. However, this had better match the number of state
1378 * elements that we're going to copy into the new temporary.
1379 */
1380 assert((int) ir->get_num_state_slots() == type_size(ir->type));
1381
1382 dst = st_dst_reg(get_temp(ir->type));
1383
1384 storage = new(mem_ctx) variable_storage(ir, dst.file, dst.index,
1385 dst.array_id);
1386
1387 _mesa_hash_table_insert(this->variables, ir, storage);
1388 }
1389
1390
1391 for (unsigned int i = 0; i < ir->get_num_state_slots(); i++) {
1392 int index = _mesa_add_state_reference(this->prog->Parameters,
1393 slots[i].tokens);
1394
1395 if (storage->file == PROGRAM_STATE_VAR) {
1396 if (storage->index == -1) {
1397 storage->index = index;
1398 } else {
1399 assert(index == storage->index + (int)i);
1400 }
1401 } else {
1402 /* We use GLSL_TYPE_FLOAT here regardless of the actual type of
1403 * the data being moved since MOV does not care about the type of
1404 * data it is moving, and we don't want to declare registers with
1405 * array or struct types.
1406 */
1407 st_src_reg src(PROGRAM_STATE_VAR, index, GLSL_TYPE_FLOAT);
1408 src.swizzle = slots[i].swizzle;
1409 emit_asm(ir, TGSI_OPCODE_MOV, dst, src);
1410 /* even a float takes up a whole vec4 reg in a struct/array. */
1411 dst.index++;
1412 }
1413 }
1414
1415 if (storage->file == PROGRAM_TEMPORARY &&
1416 dst.index != storage->index + (int) ir->get_num_state_slots()) {
1417 fail_link(this->shader_program,
1418 "failed to load builtin uniform `%s' (%d/%d regs loaded)\n",
1419 ir->name, dst.index - storage->index,
1420 type_size(ir->type));
1421 }
1422 }
1423 }
1424
1425 void
visit(ir_loop * ir)1426 glsl_to_tgsi_visitor::visit(ir_loop *ir)
1427 {
1428 emit_asm(NULL, TGSI_OPCODE_BGNLOOP);
1429
1430 visit_exec_list(&ir->body_instructions, this);
1431
1432 emit_asm(NULL, TGSI_OPCODE_ENDLOOP);
1433 }
1434
1435 void
visit(ir_loop_jump * ir)1436 glsl_to_tgsi_visitor::visit(ir_loop_jump *ir)
1437 {
1438 switch (ir->mode) {
1439 case ir_loop_jump::jump_break:
1440 emit_asm(NULL, TGSI_OPCODE_BRK);
1441 break;
1442 case ir_loop_jump::jump_continue:
1443 emit_asm(NULL, TGSI_OPCODE_CONT);
1444 break;
1445 }
1446 }
1447
1448
1449 void
visit(ir_function_signature * ir)1450 glsl_to_tgsi_visitor::visit(ir_function_signature *ir)
1451 {
1452 assert(0);
1453 (void)ir;
1454 }
1455
1456 void
visit(ir_function * ir)1457 glsl_to_tgsi_visitor::visit(ir_function *ir)
1458 {
1459 /* Ignore function bodies other than main() -- we shouldn't see calls to
1460 * them since they should all be inlined before we get to glsl_to_tgsi.
1461 */
1462 if (strcmp(ir->name, "main") == 0) {
1463 const ir_function_signature *sig;
1464 exec_list empty;
1465
1466 sig = ir->matching_signature(NULL, &empty, false);
1467
1468 assert(sig);
1469
1470 foreach_in_list(ir_instruction, ir, &sig->body) {
1471 ir->accept(this);
1472 }
1473 }
1474 }
1475
1476 bool
try_emit_mad(ir_expression * ir,int mul_operand)1477 glsl_to_tgsi_visitor::try_emit_mad(ir_expression *ir, int mul_operand)
1478 {
1479 int nonmul_operand = 1 - mul_operand;
1480 st_src_reg a, b, c;
1481 st_dst_reg result_dst;
1482
1483 // there is no TGSI opcode for this
1484 if (ir->type->is_integer_64())
1485 return false;
1486
1487 ir_expression *expr = ir->operands[mul_operand]->as_expression();
1488 if (!expr || expr->operation != ir_binop_mul)
1489 return false;
1490
1491 expr->operands[0]->accept(this);
1492 a = this->result;
1493 expr->operands[1]->accept(this);
1494 b = this->result;
1495 ir->operands[nonmul_operand]->accept(this);
1496 c = this->result;
1497
1498 this->result = get_temp(ir->type);
1499 result_dst = st_dst_reg(this->result);
1500 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
1501 emit_asm(ir, TGSI_OPCODE_MAD, result_dst, a, b, c);
1502
1503 return true;
1504 }
1505
1506 /**
1507 * Emit MAD(a, -b, a) instead of AND(a, NOT(b))
1508 *
1509 * The logic values are 1.0 for true and 0.0 for false. Logical-and is
1510 * implemented using multiplication, and logical-or is implemented using
1511 * addition. Logical-not can be implemented as (true - x), or (1.0 - x).
1512 * As result, the logical expression (a & !b) can be rewritten as:
1513 *
1514 * - a * !b
1515 * - a * (1 - b)
1516 * - (a * 1) - (a * b)
1517 * - a + -(a * b)
1518 * - a + (a * -b)
1519 *
1520 * This final expression can be implemented as a single MAD(a, -b, a)
1521 * instruction.
1522 */
1523 bool
try_emit_mad_for_and_not(ir_expression * ir,int try_operand)1524 glsl_to_tgsi_visitor::try_emit_mad_for_and_not(ir_expression *ir,
1525 int try_operand)
1526 {
1527 const int other_operand = 1 - try_operand;
1528 st_src_reg a, b;
1529
1530 ir_expression *expr = ir->operands[try_operand]->as_expression();
1531 if (!expr || expr->operation != ir_unop_logic_not)
1532 return false;
1533
1534 ir->operands[other_operand]->accept(this);
1535 a = this->result;
1536 expr->operands[0]->accept(this);
1537 b = this->result;
1538
1539 b.negate = ~b.negate;
1540
1541 this->result = get_temp(ir->type);
1542 emit_asm(ir, TGSI_OPCODE_MAD, st_dst_reg(this->result), a, b, a);
1543
1544 return true;
1545 }
1546
1547 void
reladdr_to_temp(ir_instruction * ir,st_src_reg * reg,int * num_reladdr)1548 glsl_to_tgsi_visitor::reladdr_to_temp(ir_instruction *ir,
1549 st_src_reg *reg, int *num_reladdr)
1550 {
1551 if (!reg->reladdr && !reg->reladdr2)
1552 return;
1553
1554 if (reg->reladdr)
1555 emit_arl(ir, address_reg, *reg->reladdr);
1556 if (reg->reladdr2)
1557 emit_arl(ir, address_reg2, *reg->reladdr2);
1558
1559 if (*num_reladdr != 1) {
1560 st_src_reg temp = get_temp(glsl_type::get_instance(reg->type, 4, 1));
1561
1562 emit_asm(ir, TGSI_OPCODE_MOV, st_dst_reg(temp), *reg);
1563 *reg = temp;
1564 }
1565
1566 (*num_reladdr)--;
1567 }
1568
1569 void
visit(ir_expression * ir)1570 glsl_to_tgsi_visitor::visit(ir_expression *ir)
1571 {
1572 st_src_reg op[ARRAY_SIZE(ir->operands)];
1573
1574 /* Quick peephole: Emit MAD(a, b, c) instead of ADD(MUL(a, b), c)
1575 */
1576 if (!this->precise && ir->operation == ir_binop_add) {
1577 if (try_emit_mad(ir, 1))
1578 return;
1579 if (try_emit_mad(ir, 0))
1580 return;
1581 }
1582
1583 /* Quick peephole: Emit OPCODE_MAD(-a, -b, a) instead of AND(a, NOT(b))
1584 */
1585 if (!native_integers && ir->operation == ir_binop_logic_and) {
1586 if (try_emit_mad_for_and_not(ir, 1))
1587 return;
1588 if (try_emit_mad_for_and_not(ir, 0))
1589 return;
1590 }
1591
1592 if (ir->operation == ir_quadop_vector)
1593 assert(!"ir_quadop_vector should have been lowered");
1594
1595 for (unsigned int operand = 0; operand < ir->num_operands; operand++) {
1596 this->result.file = PROGRAM_UNDEFINED;
1597 ir->operands[operand]->accept(this);
1598 if (this->result.file == PROGRAM_UNDEFINED) {
1599 printf("Failed to get tree for expression operand:\n");
1600 ir->operands[operand]->print();
1601 printf("\n");
1602 exit(1);
1603 }
1604 op[operand] = this->result;
1605
1606 /* Matrix expression operands should have been broken down to vector
1607 * operations already.
1608 */
1609 assert(!ir->operands[operand]->type->is_matrix());
1610 }
1611
1612 visit_expression(ir, op);
1613 }
1614
1615 /* The non-recursive part of the expression visitor lives in a separate
1616 * function and should be prevented from being inlined, to avoid a stack
1617 * explosion when deeply nested expressions are visited.
1618 */
1619 void
visit_expression(ir_expression * ir,st_src_reg * op)1620 glsl_to_tgsi_visitor::visit_expression(ir_expression* ir, st_src_reg *op)
1621 {
1622 st_src_reg result_src;
1623 st_dst_reg result_dst;
1624
1625 int vector_elements = ir->operands[0]->type->vector_elements;
1626 if (ir->operands[1] &&
1627 ir->operation != ir_binop_interpolate_at_offset &&
1628 ir->operation != ir_binop_interpolate_at_sample) {
1629 st_src_reg *swz_op = NULL;
1630 if (vector_elements > ir->operands[1]->type->vector_elements) {
1631 assert(ir->operands[1]->type->vector_elements == 1);
1632 swz_op = &op[1];
1633 } else if (vector_elements < ir->operands[1]->type->vector_elements) {
1634 assert(ir->operands[0]->type->vector_elements == 1);
1635 swz_op = &op[0];
1636 }
1637 if (swz_op) {
1638 uint16_t swizzle_x = GET_SWZ(swz_op->swizzle, 0);
1639 swz_op->swizzle = MAKE_SWIZZLE4(swizzle_x, swizzle_x,
1640 swizzle_x, swizzle_x);
1641 }
1642 vector_elements = MAX2(vector_elements,
1643 ir->operands[1]->type->vector_elements);
1644 }
1645 if (ir->operands[2] &&
1646 ir->operands[2]->type->vector_elements != vector_elements) {
1647 /* This can happen with ir_triop_lrp, i.e. glsl mix */
1648 assert(ir->operands[2]->type->vector_elements == 1);
1649 uint16_t swizzle_x = GET_SWZ(op[2].swizzle, 0);
1650 op[2].swizzle = MAKE_SWIZZLE4(swizzle_x, swizzle_x,
1651 swizzle_x, swizzle_x);
1652 }
1653
1654 this->result.file = PROGRAM_UNDEFINED;
1655
1656 /* Storage for our result. Ideally for an assignment we'd be using
1657 * the actual storage for the result here, instead.
1658 */
1659 result_src = get_temp(ir->type);
1660 /* convenience for the emit functions below. */
1661 result_dst = st_dst_reg(result_src);
1662 /* Limit writes to the channels that will be used by result_src later.
1663 * This does limit this temp's use as a temporary for multi-instruction
1664 * sequences.
1665 */
1666 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
1667
1668 switch (ir->operation) {
1669 case ir_unop_logic_not:
1670 if (result_dst.type != GLSL_TYPE_FLOAT)
1671 emit_asm(ir, TGSI_OPCODE_NOT, result_dst, op[0]);
1672 else {
1673 /* Previously 'SEQ dst, src, 0.0' was used for this. However, many
1674 * older GPUs implement SEQ using multiple instructions (i915 uses two
1675 * SGE instructions and a MUL instruction). Since our logic values are
1676 * 0.0 and 1.0, 1-x also implements !x.
1677 */
1678 op[0].negate = ~op[0].negate;
1679 emit_asm(ir, TGSI_OPCODE_ADD, result_dst, op[0],
1680 st_src_reg_for_float(1.0));
1681 }
1682 break;
1683 case ir_unop_neg:
1684 if (result_dst.type == GLSL_TYPE_INT64 ||
1685 result_dst.type == GLSL_TYPE_UINT64)
1686 emit_asm(ir, TGSI_OPCODE_I64NEG, result_dst, op[0]);
1687 else if (result_dst.type == GLSL_TYPE_INT ||
1688 result_dst.type == GLSL_TYPE_UINT)
1689 emit_asm(ir, TGSI_OPCODE_INEG, result_dst, op[0]);
1690 else if (result_dst.type == GLSL_TYPE_DOUBLE)
1691 emit_asm(ir, TGSI_OPCODE_DNEG, result_dst, op[0]);
1692 else {
1693 op[0].negate = ~op[0].negate;
1694 result_src = op[0];
1695 }
1696 break;
1697 case ir_unop_subroutine_to_int:
1698 emit_asm(ir, TGSI_OPCODE_MOV, result_dst, op[0]);
1699 break;
1700 case ir_unop_abs:
1701 if (result_dst.type == GLSL_TYPE_FLOAT)
1702 emit_asm(ir, TGSI_OPCODE_MOV, result_dst, op[0].get_abs());
1703 else if (result_dst.type == GLSL_TYPE_DOUBLE)
1704 emit_asm(ir, TGSI_OPCODE_DABS, result_dst, op[0]);
1705 else if (result_dst.type == GLSL_TYPE_INT64 ||
1706 result_dst.type == GLSL_TYPE_UINT64)
1707 emit_asm(ir, TGSI_OPCODE_I64ABS, result_dst, op[0]);
1708 else
1709 emit_asm(ir, TGSI_OPCODE_IABS, result_dst, op[0]);
1710 break;
1711 case ir_unop_sign:
1712 emit_asm(ir, TGSI_OPCODE_SSG, result_dst, op[0]);
1713 break;
1714 case ir_unop_rcp:
1715 emit_scalar(ir, TGSI_OPCODE_RCP, result_dst, op[0]);
1716 break;
1717
1718 case ir_unop_exp2:
1719 emit_scalar(ir, TGSI_OPCODE_EX2, result_dst, op[0]);
1720 break;
1721 case ir_unop_exp:
1722 assert(!"not reached: should be handled by exp_to_exp2");
1723 break;
1724 case ir_unop_log:
1725 assert(!"not reached: should be handled by log_to_log2");
1726 break;
1727 case ir_unop_log2:
1728 emit_scalar(ir, TGSI_OPCODE_LG2, result_dst, op[0]);
1729 break;
1730 case ir_unop_sin:
1731 emit_scalar(ir, TGSI_OPCODE_SIN, result_dst, op[0]);
1732 break;
1733 case ir_unop_cos:
1734 emit_scalar(ir, TGSI_OPCODE_COS, result_dst, op[0]);
1735 break;
1736 case ir_unop_saturate: {
1737 glsl_to_tgsi_instruction *inst;
1738 inst = emit_asm(ir, TGSI_OPCODE_MOV, result_dst, op[0]);
1739 inst->saturate = true;
1740 break;
1741 }
1742
1743 case ir_unop_dFdx:
1744 case ir_unop_dFdx_coarse:
1745 emit_asm(ir, TGSI_OPCODE_DDX, result_dst, op[0]);
1746 break;
1747 case ir_unop_dFdx_fine:
1748 emit_asm(ir, TGSI_OPCODE_DDX_FINE, result_dst, op[0]);
1749 break;
1750 case ir_unop_dFdy:
1751 case ir_unop_dFdy_coarse:
1752 case ir_unop_dFdy_fine:
1753 {
1754 /* The X component contains 1 or -1 depending on whether the framebuffer
1755 * is a FBO or the window system buffer, respectively.
1756 * It is then multiplied with the source operand of DDY.
1757 */
1758 static const gl_state_index16 transform_y_state[STATE_LENGTH]
1759 = { STATE_FB_WPOS_Y_TRANSFORM };
1760
1761 unsigned transform_y_index =
1762 _mesa_add_state_reference(this->prog->Parameters,
1763 transform_y_state);
1764
1765 st_src_reg transform_y = st_src_reg(PROGRAM_STATE_VAR,
1766 transform_y_index,
1767 glsl_type::vec4_type);
1768 transform_y.swizzle = SWIZZLE_XXXX;
1769
1770 st_src_reg temp = get_temp(glsl_type::vec4_type);
1771
1772 emit_asm(ir, TGSI_OPCODE_MUL, st_dst_reg(temp), transform_y, op[0]);
1773 emit_asm(ir, ir->operation == ir_unop_dFdy_fine ?
1774 TGSI_OPCODE_DDY_FINE : TGSI_OPCODE_DDY, result_dst, temp);
1775 break;
1776 }
1777
1778 case ir_unop_frexp_sig:
1779 emit_asm(ir, TGSI_OPCODE_DFRACEXP, result_dst, undef_dst, op[0]);
1780 break;
1781
1782 case ir_unop_frexp_exp:
1783 emit_asm(ir, TGSI_OPCODE_DFRACEXP, undef_dst, result_dst, op[0]);
1784 break;
1785
1786 case ir_binop_add:
1787 emit_asm(ir, TGSI_OPCODE_ADD, result_dst, op[0], op[1]);
1788 break;
1789 case ir_binop_sub:
1790 op[1].negate = ~op[1].negate;
1791 emit_asm(ir, TGSI_OPCODE_ADD, result_dst, op[0], op[1]);
1792 break;
1793
1794 case ir_binop_mul:
1795 emit_asm(ir, TGSI_OPCODE_MUL, result_dst, op[0], op[1]);
1796 break;
1797 case ir_binop_div:
1798 emit_asm(ir, TGSI_OPCODE_DIV, result_dst, op[0], op[1]);
1799 break;
1800 case ir_binop_mod:
1801 if (result_dst.type == GLSL_TYPE_FLOAT)
1802 assert(!"ir_binop_mod should have been converted to b * fract(a/b)");
1803 else
1804 emit_asm(ir, TGSI_OPCODE_MOD, result_dst, op[0], op[1]);
1805 break;
1806
1807 case ir_binop_less:
1808 emit_asm(ir, TGSI_OPCODE_SLT, result_dst, op[0], op[1]);
1809 break;
1810 case ir_binop_gequal:
1811 emit_asm(ir, TGSI_OPCODE_SGE, result_dst, op[0], op[1]);
1812 break;
1813 case ir_binop_equal:
1814 emit_asm(ir, TGSI_OPCODE_SEQ, result_dst, op[0], op[1]);
1815 break;
1816 case ir_binop_nequal:
1817 emit_asm(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]);
1818 break;
1819 case ir_binop_all_equal:
1820 /* "==" operator producing a scalar boolean. */
1821 if (ir->operands[0]->type->is_vector() ||
1822 ir->operands[1]->type->is_vector()) {
1823 st_src_reg temp = get_temp(native_integers ?
1824 glsl_type::uvec4_type :
1825 glsl_type::vec4_type);
1826
1827 if (native_integers) {
1828 st_dst_reg temp_dst = st_dst_reg(temp);
1829 st_src_reg temp1 = st_src_reg(temp), temp2 = st_src_reg(temp);
1830
1831 if (ir->operands[0]->type->is_boolean() &&
1832 ir->operands[1]->as_constant() &&
1833 ir->operands[1]->as_constant()->is_one()) {
1834 emit_asm(ir, TGSI_OPCODE_MOV, st_dst_reg(temp), op[0]);
1835 } else {
1836 emit_asm(ir, TGSI_OPCODE_SEQ, st_dst_reg(temp), op[0], op[1]);
1837 }
1838
1839 /* Emit 1-3 AND operations to combine the SEQ results. */
1840 switch (ir->operands[0]->type->vector_elements) {
1841 case 2:
1842 break;
1843 case 3:
1844 temp_dst.writemask = WRITEMASK_Y;
1845 temp1.swizzle = SWIZZLE_YYYY;
1846 temp2.swizzle = SWIZZLE_ZZZZ;
1847 emit_asm(ir, TGSI_OPCODE_AND, temp_dst, temp1, temp2);
1848 break;
1849 case 4:
1850 temp_dst.writemask = WRITEMASK_X;
1851 temp1.swizzle = SWIZZLE_XXXX;
1852 temp2.swizzle = SWIZZLE_YYYY;
1853 emit_asm(ir, TGSI_OPCODE_AND, temp_dst, temp1, temp2);
1854 temp_dst.writemask = WRITEMASK_Y;
1855 temp1.swizzle = SWIZZLE_ZZZZ;
1856 temp2.swizzle = SWIZZLE_WWWW;
1857 emit_asm(ir, TGSI_OPCODE_AND, temp_dst, temp1, temp2);
1858 }
1859
1860 temp1.swizzle = SWIZZLE_XXXX;
1861 temp2.swizzle = SWIZZLE_YYYY;
1862 emit_asm(ir, TGSI_OPCODE_AND, result_dst, temp1, temp2);
1863 } else {
1864 emit_asm(ir, TGSI_OPCODE_SNE, st_dst_reg(temp), op[0], op[1]);
1865
1866 /* After the dot-product, the value will be an integer on the
1867 * range [0,4]. Zero becomes 1.0, and positive values become zero.
1868 */
1869 emit_dp(ir, result_dst, temp, temp, vector_elements);
1870
1871 /* Negating the result of the dot-product gives values on the range
1872 * [-4, 0]. Zero becomes 1.0, and negative values become zero.
1873 * This is achieved using SGE.
1874 */
1875 st_src_reg sge_src = result_src;
1876 sge_src.negate = ~sge_src.negate;
1877 emit_asm(ir, TGSI_OPCODE_SGE, result_dst, sge_src,
1878 st_src_reg_for_float(0.0));
1879 }
1880 } else {
1881 emit_asm(ir, TGSI_OPCODE_SEQ, result_dst, op[0], op[1]);
1882 }
1883 break;
1884 case ir_binop_any_nequal:
1885 /* "!=" operator producing a scalar boolean. */
1886 if (ir->operands[0]->type->is_vector() ||
1887 ir->operands[1]->type->is_vector()) {
1888 st_src_reg temp = get_temp(native_integers ?
1889 glsl_type::uvec4_type :
1890 glsl_type::vec4_type);
1891 if (ir->operands[0]->type->is_boolean() &&
1892 ir->operands[1]->as_constant() &&
1893 ir->operands[1]->as_constant()->is_zero()) {
1894 emit_asm(ir, TGSI_OPCODE_MOV, st_dst_reg(temp), op[0]);
1895 } else {
1896 emit_asm(ir, TGSI_OPCODE_SNE, st_dst_reg(temp), op[0], op[1]);
1897 }
1898
1899 if (native_integers) {
1900 st_dst_reg temp_dst = st_dst_reg(temp);
1901 st_src_reg temp1 = st_src_reg(temp), temp2 = st_src_reg(temp);
1902
1903 /* Emit 1-3 OR operations to combine the SNE results. */
1904 switch (ir->operands[0]->type->vector_elements) {
1905 case 2:
1906 break;
1907 case 3:
1908 temp_dst.writemask = WRITEMASK_Y;
1909 temp1.swizzle = SWIZZLE_YYYY;
1910 temp2.swizzle = SWIZZLE_ZZZZ;
1911 emit_asm(ir, TGSI_OPCODE_OR, temp_dst, temp1, temp2);
1912 break;
1913 case 4:
1914 temp_dst.writemask = WRITEMASK_X;
1915 temp1.swizzle = SWIZZLE_XXXX;
1916 temp2.swizzle = SWIZZLE_YYYY;
1917 emit_asm(ir, TGSI_OPCODE_OR, temp_dst, temp1, temp2);
1918 temp_dst.writemask = WRITEMASK_Y;
1919 temp1.swizzle = SWIZZLE_ZZZZ;
1920 temp2.swizzle = SWIZZLE_WWWW;
1921 emit_asm(ir, TGSI_OPCODE_OR, temp_dst, temp1, temp2);
1922 }
1923
1924 temp1.swizzle = SWIZZLE_XXXX;
1925 temp2.swizzle = SWIZZLE_YYYY;
1926 emit_asm(ir, TGSI_OPCODE_OR, result_dst, temp1, temp2);
1927 } else {
1928 /* After the dot-product, the value will be an integer on the
1929 * range [0,4]. Zero stays zero, and positive values become 1.0.
1930 */
1931 glsl_to_tgsi_instruction *const dp =
1932 emit_dp(ir, result_dst, temp, temp, vector_elements);
1933 if (this->prog->Target == GL_FRAGMENT_PROGRAM_ARB) {
1934 /* The clamping to [0,1] can be done for free in the fragment
1935 * shader with a saturate.
1936 */
1937 dp->saturate = true;
1938 } else {
1939 /* Negating the result of the dot-product gives values on the
1940 * range [-4, 0]. Zero stays zero, and negative values become
1941 * 1.0. This achieved using SLT.
1942 */
1943 st_src_reg slt_src = result_src;
1944 slt_src.negate = ~slt_src.negate;
1945 emit_asm(ir, TGSI_OPCODE_SLT, result_dst, slt_src,
1946 st_src_reg_for_float(0.0));
1947 }
1948 }
1949 } else {
1950 emit_asm(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]);
1951 }
1952 break;
1953
1954 case ir_binop_logic_xor:
1955 if (native_integers)
1956 emit_asm(ir, TGSI_OPCODE_XOR, result_dst, op[0], op[1]);
1957 else
1958 emit_asm(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]);
1959 break;
1960
1961 case ir_binop_logic_or: {
1962 if (native_integers) {
1963 /* If integers are used as booleans, we can use an actual "or"
1964 * instruction.
1965 */
1966 assert(native_integers);
1967 emit_asm(ir, TGSI_OPCODE_OR, result_dst, op[0], op[1]);
1968 } else {
1969 /* After the addition, the value will be an integer on the
1970 * range [0,2]. Zero stays zero, and positive values become 1.0.
1971 */
1972 glsl_to_tgsi_instruction *add =
1973 emit_asm(ir, TGSI_OPCODE_ADD, result_dst, op[0], op[1]);
1974 if (this->prog->Target == GL_FRAGMENT_PROGRAM_ARB) {
1975 /* The clamping to [0,1] can be done for free in the fragment
1976 * shader with a saturate if floats are being used as boolean
1977 * values.
1978 */
1979 add->saturate = true;
1980 } else {
1981 /* Negating the result of the addition gives values on the range
1982 * [-2, 0]. Zero stays zero, and negative values become 1.0
1983 * This is achieved using SLT.
1984 */
1985 st_src_reg slt_src = result_src;
1986 slt_src.negate = ~slt_src.negate;
1987 emit_asm(ir, TGSI_OPCODE_SLT, result_dst, slt_src,
1988 st_src_reg_for_float(0.0));
1989 }
1990 }
1991 break;
1992 }
1993
1994 case ir_binop_logic_and:
1995 /* If native integers are disabled, the bool args are stored as float 0.0
1996 * or 1.0, so "mul" gives us "and". If they're enabled, just use the
1997 * actual AND opcode.
1998 */
1999 if (native_integers)
2000 emit_asm(ir, TGSI_OPCODE_AND, result_dst, op[0], op[1]);
2001 else
2002 emit_asm(ir, TGSI_OPCODE_MUL, result_dst, op[0], op[1]);
2003 break;
2004
2005 case ir_binop_dot:
2006 assert(ir->operands[0]->type->is_vector());
2007 assert(ir->operands[0]->type == ir->operands[1]->type);
2008 emit_dp(ir, result_dst, op[0], op[1],
2009 ir->operands[0]->type->vector_elements);
2010 break;
2011
2012 case ir_unop_sqrt:
2013 if (have_sqrt) {
2014 emit_scalar(ir, TGSI_OPCODE_SQRT, result_dst, op[0]);
2015 } else {
2016 /* This is the only instruction sequence that makes the game "Risen"
2017 * render correctly. ABS is not required for the game, but since GLSL
2018 * declares negative values as "undefined", allowing us to do whatever
2019 * we want, I choose to use ABS to match DX9 and pre-GLSL RSQ
2020 * behavior.
2021 */
2022 emit_scalar(ir, TGSI_OPCODE_RSQ, result_dst, op[0].get_abs());
2023 emit_scalar(ir, TGSI_OPCODE_RCP, result_dst, result_src);
2024 }
2025 break;
2026 case ir_unop_rsq:
2027 emit_scalar(ir, TGSI_OPCODE_RSQ, result_dst, op[0]);
2028 break;
2029 case ir_unop_i2f:
2030 if (native_integers) {
2031 emit_asm(ir, TGSI_OPCODE_I2F, result_dst, op[0]);
2032 break;
2033 }
2034 FALLTHROUGH;
2035 case ir_unop_b2f:
2036 if (native_integers) {
2037 emit_asm(ir, TGSI_OPCODE_AND, result_dst, op[0],
2038 st_src_reg_for_float(1.0));
2039 break;
2040 }
2041 FALLTHROUGH;
2042 case ir_unop_i2u:
2043 case ir_unop_u2i:
2044 case ir_unop_i642u64:
2045 case ir_unop_u642i64:
2046 /* Converting between signed and unsigned integers is a no-op. */
2047 result_src = op[0];
2048 result_src.type = result_dst.type;
2049 break;
2050 case ir_unop_b2i:
2051 if (native_integers) {
2052 /* Booleans are stored as integers using ~0 for true and 0 for false.
2053 * GLSL requires that int(bool) return 1 for true and 0 for false.
2054 * This conversion is done with AND, but it could be done with NEG.
2055 */
2056 emit_asm(ir, TGSI_OPCODE_AND, result_dst, op[0],
2057 st_src_reg_for_int(1));
2058 } else {
2059 /* Booleans and integers are both stored as floats when native
2060 * integers are disabled.
2061 */
2062 result_src = op[0];
2063 }
2064 break;
2065 case ir_unop_f2i:
2066 if (native_integers)
2067 emit_asm(ir, TGSI_OPCODE_F2I, result_dst, op[0]);
2068 else
2069 emit_asm(ir, TGSI_OPCODE_TRUNC, result_dst, op[0]);
2070 break;
2071 case ir_unop_f2u:
2072 if (native_integers)
2073 emit_asm(ir, TGSI_OPCODE_F2U, result_dst, op[0]);
2074 else
2075 emit_asm(ir, TGSI_OPCODE_TRUNC, result_dst, op[0]);
2076 break;
2077 case ir_unop_bitcast_f2i:
2078 case ir_unop_bitcast_f2u:
2079 /* Make sure we don't propagate the negate modifier to integer opcodes. */
2080 if (op[0].negate || op[0].abs)
2081 emit_asm(ir, TGSI_OPCODE_MOV, result_dst, op[0]);
2082 else
2083 result_src = op[0];
2084 result_src.type = ir->operation == ir_unop_bitcast_f2i ? GLSL_TYPE_INT :
2085 GLSL_TYPE_UINT;
2086 break;
2087 case ir_unop_bitcast_i2f:
2088 case ir_unop_bitcast_u2f:
2089 result_src = op[0];
2090 result_src.type = GLSL_TYPE_FLOAT;
2091 break;
2092 case ir_unop_f2b:
2093 emit_asm(ir, TGSI_OPCODE_SNE, result_dst, op[0],
2094 st_src_reg_for_float(0.0));
2095 break;
2096 case ir_unop_d2b:
2097 emit_asm(ir, TGSI_OPCODE_SNE, result_dst, op[0],
2098 st_src_reg_for_double(0.0));
2099 break;
2100 case ir_unop_i2b:
2101 if (native_integers)
2102 emit_asm(ir, TGSI_OPCODE_USNE, result_dst, op[0],
2103 st_src_reg_for_int(0));
2104 else
2105 emit_asm(ir, TGSI_OPCODE_SNE, result_dst, op[0],
2106 st_src_reg_for_float(0.0));
2107 break;
2108 case ir_unop_bitcast_u642d:
2109 case ir_unop_bitcast_i642d:
2110 result_src = op[0];
2111 result_src.type = GLSL_TYPE_DOUBLE;
2112 break;
2113 case ir_unop_bitcast_d2i64:
2114 result_src = op[0];
2115 result_src.type = GLSL_TYPE_INT64;
2116 break;
2117 case ir_unop_bitcast_d2u64:
2118 result_src = op[0];
2119 result_src.type = GLSL_TYPE_UINT64;
2120 break;
2121 case ir_unop_trunc:
2122 emit_asm(ir, TGSI_OPCODE_TRUNC, result_dst, op[0]);
2123 break;
2124 case ir_unop_ceil:
2125 emit_asm(ir, TGSI_OPCODE_CEIL, result_dst, op[0]);
2126 break;
2127 case ir_unop_floor:
2128 emit_asm(ir, TGSI_OPCODE_FLR, result_dst, op[0]);
2129 break;
2130 case ir_unop_round_even:
2131 emit_asm(ir, TGSI_OPCODE_ROUND, result_dst, op[0]);
2132 break;
2133 case ir_unop_fract:
2134 emit_asm(ir, TGSI_OPCODE_FRC, result_dst, op[0]);
2135 break;
2136
2137 case ir_binop_min:
2138 emit_asm(ir, TGSI_OPCODE_MIN, result_dst, op[0], op[1]);
2139 break;
2140 case ir_binop_max:
2141 emit_asm(ir, TGSI_OPCODE_MAX, result_dst, op[0], op[1]);
2142 break;
2143 case ir_binop_pow:
2144 emit_scalar(ir, TGSI_OPCODE_POW, result_dst, op[0], op[1]);
2145 break;
2146
2147 case ir_unop_bit_not:
2148 if (native_integers) {
2149 emit_asm(ir, TGSI_OPCODE_NOT, result_dst, op[0]);
2150 break;
2151 }
2152 FALLTHROUGH;
2153 case ir_unop_u2f:
2154 if (native_integers) {
2155 emit_asm(ir, TGSI_OPCODE_U2F, result_dst, op[0]);
2156 break;
2157 }
2158 FALLTHROUGH;
2159 case ir_binop_lshift:
2160 case ir_binop_rshift:
2161 if (native_integers) {
2162 enum tgsi_opcode opcode = ir->operation == ir_binop_lshift
2163 ? TGSI_OPCODE_SHL : TGSI_OPCODE_ISHR;
2164 st_src_reg count;
2165
2166 if (glsl_base_type_is_64bit(op[0].type)) {
2167 /* GLSL shift operations have 32-bit shift counts, but TGSI uses
2168 * 64 bits.
2169 */
2170 count = get_temp(glsl_type::u64vec(ir->operands[1]
2171 ->type->components()));
2172 emit_asm(ir, TGSI_OPCODE_U2I64, st_dst_reg(count), op[1]);
2173 } else {
2174 count = op[1];
2175 }
2176
2177 emit_asm(ir, opcode, result_dst, op[0], count);
2178 break;
2179 }
2180 FALLTHROUGH;
2181 case ir_binop_bit_and:
2182 if (native_integers) {
2183 emit_asm(ir, TGSI_OPCODE_AND, result_dst, op[0], op[1]);
2184 break;
2185 }
2186 FALLTHROUGH;
2187 case ir_binop_bit_xor:
2188 if (native_integers) {
2189 emit_asm(ir, TGSI_OPCODE_XOR, result_dst, op[0], op[1]);
2190 break;
2191 }
2192 FALLTHROUGH;
2193 case ir_binop_bit_or:
2194 if (native_integers) {
2195 emit_asm(ir, TGSI_OPCODE_OR, result_dst, op[0], op[1]);
2196 break;
2197 }
2198
2199 assert(!"GLSL 1.30 features unsupported");
2200 break;
2201
2202 case ir_binop_ubo_load: {
2203 if (ctx->Const.UseSTD430AsDefaultPacking) {
2204 ir_rvalue *block = ir->operands[0];
2205 ir_rvalue *offset = ir->operands[1];
2206 ir_constant *const_block = block->as_constant();
2207
2208 st_src_reg cbuf(PROGRAM_CONSTANT,
2209 (const_block ? const_block->value.u[0] + 1 : 1),
2210 ir->type->base_type);
2211
2212 cbuf.has_index2 = true;
2213
2214 if (!const_block) {
2215 block->accept(this);
2216 cbuf.reladdr = ralloc(mem_ctx, st_src_reg);
2217 *cbuf.reladdr = this->result;
2218 emit_arl(ir, sampler_reladdr, this->result);
2219 }
2220
2221 /* Calculate the surface offset */
2222 offset->accept(this);
2223 st_src_reg off = this->result;
2224
2225 glsl_to_tgsi_instruction *inst =
2226 emit_asm(ir, TGSI_OPCODE_LOAD, result_dst, off);
2227
2228 if (result_dst.type == GLSL_TYPE_BOOL)
2229 emit_asm(ir, TGSI_OPCODE_USNE, result_dst, st_src_reg(result_dst),
2230 st_src_reg_for_int(0));
2231
2232 add_buffer_to_load_and_stores(inst, &cbuf, &this->instructions,
2233 NULL);
2234 } else {
2235 ir_constant *const_uniform_block = ir->operands[0]->as_constant();
2236 ir_constant *const_offset_ir = ir->operands[1]->as_constant();
2237 unsigned const_offset = const_offset_ir ?
2238 const_offset_ir->value.u[0] : 0;
2239 unsigned const_block = const_uniform_block ?
2240 const_uniform_block->value.u[0] + 1 : 1;
2241 st_src_reg index_reg = get_temp(glsl_type::uint_type);
2242 st_src_reg cbuf;
2243
2244 cbuf.type = ir->type->base_type;
2245 cbuf.file = PROGRAM_CONSTANT;
2246 cbuf.index = 0;
2247 cbuf.reladdr = NULL;
2248 cbuf.negate = 0;
2249 cbuf.abs = 0;
2250 cbuf.index2D = const_block;
2251
2252 assert(ir->type->is_vector() || ir->type->is_scalar());
2253
2254 if (const_offset_ir) {
2255 /* Constant index into constant buffer */
2256 cbuf.reladdr = NULL;
2257 cbuf.index = const_offset / 16;
2258 } else {
2259 ir_expression *offset_expr = ir->operands[1]->as_expression();
2260 st_src_reg offset = op[1];
2261
2262 /* The OpenGL spec is written in such a way that accesses with
2263 * non-constant offset are almost always vec4-aligned. The only
2264 * exception to this are members of structs in arrays of structs:
2265 * each struct in an array of structs is at least vec4-aligned,
2266 * but single-element and [ui]vec2 members of the struct may be at
2267 * an offset that is not a multiple of 16 bytes.
2268 *
2269 * Here, we extract that offset, relying on previous passes to
2270 * always generate offset expressions of the form
2271 * (+ expr constant_offset).
2272 *
2273 * Note that the std430 layout, which allows more cases of
2274 * alignment less than vec4 in arrays, is not supported for
2275 * uniform blocks, so we do not have to deal with it here.
2276 */
2277 if (offset_expr && offset_expr->operation == ir_binop_add) {
2278 const_offset_ir = offset_expr->operands[1]->as_constant();
2279 if (const_offset_ir) {
2280 const_offset = const_offset_ir->value.u[0];
2281 cbuf.index = const_offset / 16;
2282 offset_expr->operands[0]->accept(this);
2283 offset = this->result;
2284 }
2285 }
2286
2287 /* Relative/variable index into constant buffer */
2288 emit_asm(ir, TGSI_OPCODE_USHR, st_dst_reg(index_reg), offset,
2289 st_src_reg_for_int(4));
2290 cbuf.reladdr = ralloc(mem_ctx, st_src_reg);
2291 *cbuf.reladdr = index_reg;
2292 }
2293
2294 if (const_uniform_block) {
2295 /* Constant constant buffer */
2296 cbuf.reladdr2 = NULL;
2297 } else {
2298 /* Relative/variable constant buffer */
2299 cbuf.reladdr2 = ralloc(mem_ctx, st_src_reg);
2300 *cbuf.reladdr2 = op[0];
2301 }
2302 cbuf.has_index2 = true;
2303
2304 cbuf.swizzle = swizzle_for_size(ir->type->vector_elements);
2305 if (glsl_base_type_is_64bit(cbuf.type))
2306 cbuf.swizzle += MAKE_SWIZZLE4(const_offset % 16 / 8,
2307 const_offset % 16 / 8,
2308 const_offset % 16 / 8,
2309 const_offset % 16 / 8);
2310 else
2311 cbuf.swizzle += MAKE_SWIZZLE4(const_offset % 16 / 4,
2312 const_offset % 16 / 4,
2313 const_offset % 16 / 4,
2314 const_offset % 16 / 4);
2315
2316 if (ir->type->is_boolean()) {
2317 emit_asm(ir, TGSI_OPCODE_USNE, result_dst, cbuf,
2318 st_src_reg_for_int(0));
2319 } else {
2320 emit_asm(ir, TGSI_OPCODE_MOV, result_dst, cbuf);
2321 }
2322 }
2323 break;
2324 }
2325 case ir_triop_lrp:
2326 /* note: we have to reorder the three args here */
2327 emit_asm(ir, TGSI_OPCODE_LRP, result_dst, op[2], op[1], op[0]);
2328 break;
2329 case ir_triop_csel:
2330 if (this->ctx->Const.NativeIntegers)
2331 emit_asm(ir, TGSI_OPCODE_UCMP, result_dst, op[0], op[1], op[2]);
2332 else {
2333 op[0].negate = ~op[0].negate;
2334 emit_asm(ir, TGSI_OPCODE_CMP, result_dst, op[0], op[1], op[2]);
2335 }
2336 break;
2337 case ir_triop_bitfield_extract:
2338 emit_asm(ir, TGSI_OPCODE_IBFE, result_dst, op[0], op[1], op[2]);
2339 break;
2340 case ir_quadop_bitfield_insert:
2341 emit_asm(ir, TGSI_OPCODE_BFI, result_dst, op[0], op[1], op[2], op[3]);
2342 break;
2343 case ir_unop_bitfield_reverse:
2344 emit_asm(ir, TGSI_OPCODE_BREV, result_dst, op[0]);
2345 break;
2346 case ir_unop_bit_count:
2347 emit_asm(ir, TGSI_OPCODE_POPC, result_dst, op[0]);
2348 break;
2349 case ir_unop_find_msb:
2350 emit_asm(ir, TGSI_OPCODE_IMSB, result_dst, op[0]);
2351 break;
2352 case ir_unop_find_lsb:
2353 emit_asm(ir, TGSI_OPCODE_LSB, result_dst, op[0]);
2354 break;
2355 case ir_binop_imul_high:
2356 emit_asm(ir, TGSI_OPCODE_IMUL_HI, result_dst, op[0], op[1]);
2357 break;
2358 case ir_triop_fma:
2359 /* In theory, MAD is incorrect here. */
2360 if (have_fma)
2361 emit_asm(ir, TGSI_OPCODE_FMA, result_dst, op[0], op[1], op[2]);
2362 else
2363 emit_asm(ir, TGSI_OPCODE_MAD, result_dst, op[0], op[1], op[2]);
2364 break;
2365 case ir_unop_interpolate_at_centroid:
2366 emit_asm(ir, TGSI_OPCODE_INTERP_CENTROID, result_dst, op[0]);
2367 break;
2368 case ir_binop_interpolate_at_offset: {
2369 /* The y coordinate needs to be flipped for the default fb */
2370 static const gl_state_index16 transform_y_state[STATE_LENGTH]
2371 = { STATE_FB_WPOS_Y_TRANSFORM };
2372
2373 unsigned transform_y_index =
2374 _mesa_add_state_reference(this->prog->Parameters,
2375 transform_y_state);
2376
2377 st_src_reg transform_y = st_src_reg(PROGRAM_STATE_VAR,
2378 transform_y_index,
2379 glsl_type::vec4_type);
2380 transform_y.swizzle = SWIZZLE_XXXX;
2381
2382 st_src_reg temp = get_temp(glsl_type::vec2_type);
2383 st_dst_reg temp_dst = st_dst_reg(temp);
2384
2385 emit_asm(ir, TGSI_OPCODE_MOV, temp_dst, op[1]);
2386 temp_dst.writemask = WRITEMASK_Y;
2387 emit_asm(ir, TGSI_OPCODE_MUL, temp_dst, transform_y, op[1]);
2388 emit_asm(ir, TGSI_OPCODE_INTERP_OFFSET, result_dst, op[0], temp);
2389 break;
2390 }
2391 case ir_binop_interpolate_at_sample:
2392 emit_asm(ir, TGSI_OPCODE_INTERP_SAMPLE, result_dst, op[0], op[1]);
2393 break;
2394
2395 case ir_unop_d2f:
2396 emit_asm(ir, TGSI_OPCODE_D2F, result_dst, op[0]);
2397 break;
2398 case ir_unop_f2d:
2399 emit_asm(ir, TGSI_OPCODE_F2D, result_dst, op[0]);
2400 break;
2401 case ir_unop_d2i:
2402 emit_asm(ir, TGSI_OPCODE_D2I, result_dst, op[0]);
2403 break;
2404 case ir_unop_i2d:
2405 emit_asm(ir, TGSI_OPCODE_I2D, result_dst, op[0]);
2406 break;
2407 case ir_unop_d2u:
2408 emit_asm(ir, TGSI_OPCODE_D2U, result_dst, op[0]);
2409 break;
2410 case ir_unop_u2d:
2411 emit_asm(ir, TGSI_OPCODE_U2D, result_dst, op[0]);
2412 break;
2413 case ir_unop_unpack_double_2x32:
2414 case ir_unop_pack_double_2x32:
2415 case ir_unop_unpack_int_2x32:
2416 case ir_unop_pack_int_2x32:
2417 case ir_unop_unpack_uint_2x32:
2418 case ir_unop_pack_uint_2x32:
2419 case ir_unop_unpack_sampler_2x32:
2420 case ir_unop_pack_sampler_2x32:
2421 case ir_unop_unpack_image_2x32:
2422 case ir_unop_pack_image_2x32:
2423 emit_asm(ir, TGSI_OPCODE_MOV, result_dst, op[0]);
2424 break;
2425
2426 case ir_binop_ldexp:
2427 if (ir->operands[0]->type->is_double()) {
2428 emit_asm(ir, TGSI_OPCODE_DLDEXP, result_dst, op[0], op[1]);
2429 } else if (ir->operands[0]->type->is_float()) {
2430 emit_asm(ir, TGSI_OPCODE_LDEXP, result_dst, op[0], op[1]);
2431 } else {
2432 assert(!"Invalid ldexp for non-double opcode in glsl_to_tgsi_visitor::visit()");
2433 }
2434 break;
2435
2436 case ir_unop_pack_half_2x16:
2437 emit_asm(ir, TGSI_OPCODE_PK2H, result_dst, op[0]);
2438 break;
2439 case ir_unop_unpack_half_2x16:
2440 emit_asm(ir, TGSI_OPCODE_UP2H, result_dst, op[0]);
2441 break;
2442
2443 case ir_unop_get_buffer_size: {
2444 ir_constant *const_offset = ir->operands[0]->as_constant();
2445 st_src_reg buffer(
2446 PROGRAM_BUFFER,
2447 const_offset ? const_offset->value.u[0] : 0,
2448 GLSL_TYPE_UINT);
2449 if (!const_offset) {
2450 buffer.reladdr = ralloc(mem_ctx, st_src_reg);
2451 *buffer.reladdr = op[0];
2452 emit_arl(ir, sampler_reladdr, op[0]);
2453 }
2454 emit_asm(ir, TGSI_OPCODE_RESQ, result_dst)->resource = buffer;
2455 break;
2456 }
2457
2458 case ir_unop_u2i64:
2459 case ir_unop_u2u64:
2460 case ir_unop_b2i64: {
2461 st_src_reg temp = get_temp(glsl_type::uvec4_type);
2462 st_dst_reg temp_dst = st_dst_reg(temp);
2463 unsigned orig_swz = op[0].swizzle;
2464 /*
2465 * To convert unsigned to 64-bit:
2466 * zero Y channel, copy X channel.
2467 */
2468 temp_dst.writemask = WRITEMASK_Y;
2469 if (vector_elements > 1)
2470 temp_dst.writemask |= WRITEMASK_W;
2471 emit_asm(ir, TGSI_OPCODE_MOV, temp_dst, st_src_reg_for_int(0));
2472 temp_dst.writemask = WRITEMASK_X;
2473 if (vector_elements > 1)
2474 temp_dst.writemask |= WRITEMASK_Z;
2475 op[0].swizzle = MAKE_SWIZZLE4(GET_SWZ(orig_swz, 0), GET_SWZ(orig_swz, 0),
2476 GET_SWZ(orig_swz, 1), GET_SWZ(orig_swz, 1));
2477 if (ir->operation == ir_unop_u2i64 || ir->operation == ir_unop_u2u64)
2478 emit_asm(ir, TGSI_OPCODE_MOV, temp_dst, op[0]);
2479 else
2480 emit_asm(ir, TGSI_OPCODE_AND, temp_dst, op[0], st_src_reg_for_int(1));
2481 result_src = temp;
2482 result_src.type = GLSL_TYPE_UINT64;
2483 if (vector_elements > 2) {
2484 /* Subtle: We rely on the fact that get_temp here returns the next
2485 * TGSI temporary register directly after the temp register used for
2486 * the first two components, so that the result gets picked up
2487 * automatically.
2488 */
2489 st_src_reg temp = get_temp(glsl_type::uvec4_type);
2490 st_dst_reg temp_dst = st_dst_reg(temp);
2491 temp_dst.writemask = WRITEMASK_Y;
2492 if (vector_elements > 3)
2493 temp_dst.writemask |= WRITEMASK_W;
2494 emit_asm(ir, TGSI_OPCODE_MOV, temp_dst, st_src_reg_for_int(0));
2495
2496 temp_dst.writemask = WRITEMASK_X;
2497 if (vector_elements > 3)
2498 temp_dst.writemask |= WRITEMASK_Z;
2499 op[0].swizzle = MAKE_SWIZZLE4(GET_SWZ(orig_swz, 2),
2500 GET_SWZ(orig_swz, 2),
2501 GET_SWZ(orig_swz, 3),
2502 GET_SWZ(orig_swz, 3));
2503 if (ir->operation == ir_unop_u2i64 || ir->operation == ir_unop_u2u64)
2504 emit_asm(ir, TGSI_OPCODE_MOV, temp_dst, op[0]);
2505 else
2506 emit_asm(ir, TGSI_OPCODE_AND, temp_dst, op[0],
2507 st_src_reg_for_int(1));
2508 }
2509 break;
2510 }
2511 case ir_unop_i642i:
2512 case ir_unop_u642i:
2513 case ir_unop_u642u:
2514 case ir_unop_i642u: {
2515 st_src_reg temp = get_temp(glsl_type::uvec4_type);
2516 st_dst_reg temp_dst = st_dst_reg(temp);
2517 unsigned orig_swz = op[0].swizzle;
2518 unsigned orig_idx = op[0].index;
2519 int el;
2520 temp_dst.writemask = WRITEMASK_X;
2521
2522 for (el = 0; el < vector_elements; el++) {
2523 unsigned swz = GET_SWZ(orig_swz, el);
2524 if (swz & 1)
2525 op[0].swizzle = MAKE_SWIZZLE4(SWIZZLE_Z, SWIZZLE_Z,
2526 SWIZZLE_Z, SWIZZLE_Z);
2527 else
2528 op[0].swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X,
2529 SWIZZLE_X, SWIZZLE_X);
2530 if (swz > 2)
2531 op[0].index = orig_idx + 1;
2532 op[0].type = GLSL_TYPE_UINT;
2533 temp_dst.writemask = WRITEMASK_X << el;
2534 emit_asm(ir, TGSI_OPCODE_MOV, temp_dst, op[0]);
2535 }
2536 result_src = temp;
2537 if (ir->operation == ir_unop_u642u || ir->operation == ir_unop_i642u)
2538 result_src.type = GLSL_TYPE_UINT;
2539 else
2540 result_src.type = GLSL_TYPE_INT;
2541 break;
2542 }
2543 case ir_unop_i642b:
2544 emit_asm(ir, TGSI_OPCODE_U64SNE, result_dst, op[0],
2545 st_src_reg_for_int64(0));
2546 break;
2547 case ir_unop_i642f:
2548 emit_asm(ir, TGSI_OPCODE_I642F, result_dst, op[0]);
2549 break;
2550 case ir_unop_u642f:
2551 emit_asm(ir, TGSI_OPCODE_U642F, result_dst, op[0]);
2552 break;
2553 case ir_unop_i642d:
2554 emit_asm(ir, TGSI_OPCODE_I642D, result_dst, op[0]);
2555 break;
2556 case ir_unop_u642d:
2557 emit_asm(ir, TGSI_OPCODE_U642D, result_dst, op[0]);
2558 break;
2559 case ir_unop_i2i64:
2560 emit_asm(ir, TGSI_OPCODE_I2I64, result_dst, op[0]);
2561 break;
2562 case ir_unop_f2i64:
2563 emit_asm(ir, TGSI_OPCODE_F2I64, result_dst, op[0]);
2564 break;
2565 case ir_unop_d2i64:
2566 emit_asm(ir, TGSI_OPCODE_D2I64, result_dst, op[0]);
2567 break;
2568 case ir_unop_i2u64:
2569 emit_asm(ir, TGSI_OPCODE_I2I64, result_dst, op[0]);
2570 break;
2571 case ir_unop_f2u64:
2572 emit_asm(ir, TGSI_OPCODE_F2U64, result_dst, op[0]);
2573 break;
2574 case ir_unop_d2u64:
2575 emit_asm(ir, TGSI_OPCODE_D2U64, result_dst, op[0]);
2576 break;
2577 /* these might be needed */
2578 case ir_unop_pack_snorm_2x16:
2579 case ir_unop_pack_unorm_2x16:
2580 case ir_unop_pack_snorm_4x8:
2581 case ir_unop_pack_unorm_4x8:
2582
2583 case ir_unop_unpack_snorm_2x16:
2584 case ir_unop_unpack_unorm_2x16:
2585 case ir_unop_unpack_snorm_4x8:
2586 case ir_unop_unpack_unorm_4x8:
2587
2588 case ir_quadop_vector:
2589 case ir_binop_vector_extract:
2590 case ir_triop_vector_insert:
2591 case ir_binop_carry:
2592 case ir_binop_borrow:
2593 case ir_unop_ssbo_unsized_array_length:
2594 case ir_unop_implicitly_sized_array_length:
2595 case ir_unop_atan:
2596 case ir_binop_atan2:
2597 case ir_unop_clz:
2598 case ir_binop_add_sat:
2599 case ir_binop_sub_sat:
2600 case ir_binop_abs_sub:
2601 case ir_binop_avg:
2602 case ir_binop_avg_round:
2603 case ir_binop_mul_32x16:
2604 case ir_unop_f162f:
2605 case ir_unop_f2f16:
2606 case ir_unop_f2fmp:
2607 case ir_unop_f162b:
2608 case ir_unop_b2f16:
2609 case ir_unop_i2i:
2610 case ir_unop_i2imp:
2611 case ir_unop_u2u:
2612 case ir_unop_u2ump:
2613 /* This operation is not supported, or should have already been handled.
2614 */
2615 assert(!"Invalid ir opcode in glsl_to_tgsi_visitor::visit()");
2616 break;
2617 }
2618
2619 this->result = result_src;
2620 }
2621
2622
2623 void
visit(ir_swizzle * ir)2624 glsl_to_tgsi_visitor::visit(ir_swizzle *ir)
2625 {
2626 st_src_reg src;
2627 int i;
2628 int swizzle[4] = {0};
2629
2630 /* Note that this is only swizzles in expressions, not those on the left
2631 * hand side of an assignment, which do write masking. See ir_assignment
2632 * for that.
2633 */
2634
2635 ir->val->accept(this);
2636 src = this->result;
2637 assert(src.file != PROGRAM_UNDEFINED);
2638 assert(ir->type->vector_elements > 0);
2639
2640 for (i = 0; i < 4; i++) {
2641 if (i < ir->type->vector_elements) {
2642 switch (i) {
2643 case 0:
2644 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.x);
2645 break;
2646 case 1:
2647 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.y);
2648 break;
2649 case 2:
2650 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.z);
2651 break;
2652 case 3:
2653 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.w);
2654 break;
2655 }
2656 } else {
2657 /* If the type is smaller than a vec4, replicate the last
2658 * channel out.
2659 */
2660 swizzle[i] = swizzle[ir->type->vector_elements - 1];
2661 }
2662 }
2663
2664 src.swizzle = MAKE_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
2665
2666 this->result = src;
2667 }
2668
2669 /* Test if the variable is an array. Note that geometry and
2670 * tessellation shader inputs are outputs are always arrays (except
2671 * for patch inputs), so only the array element type is considered.
2672 */
2673 static bool
is_inout_array(unsigned stage,ir_variable * var,bool * remove_array)2674 is_inout_array(unsigned stage, ir_variable *var, bool *remove_array)
2675 {
2676 const glsl_type *type = var->type;
2677
2678 *remove_array = false;
2679
2680 if ((stage == MESA_SHADER_VERTEX && var->data.mode == ir_var_shader_in) ||
2681 (stage == MESA_SHADER_FRAGMENT && var->data.mode == ir_var_shader_out))
2682 return false;
2683
2684 if (((stage == MESA_SHADER_GEOMETRY && var->data.mode == ir_var_shader_in) ||
2685 (stage == MESA_SHADER_TESS_EVAL && var->data.mode == ir_var_shader_in) ||
2686 stage == MESA_SHADER_TESS_CTRL) &&
2687 !var->data.patch) {
2688 if (!var->type->is_array())
2689 return false; /* a system value probably */
2690
2691 type = var->type->fields.array;
2692 *remove_array = true;
2693 }
2694
2695 return type->is_array() || type->is_matrix();
2696 }
2697
2698 static unsigned
st_translate_interp_loc(ir_variable * var)2699 st_translate_interp_loc(ir_variable *var)
2700 {
2701 if (var->data.centroid)
2702 return TGSI_INTERPOLATE_LOC_CENTROID;
2703 else if (var->data.sample)
2704 return TGSI_INTERPOLATE_LOC_SAMPLE;
2705 else
2706 return TGSI_INTERPOLATE_LOC_CENTER;
2707 }
2708
2709 void
visit(ir_dereference_variable * ir)2710 glsl_to_tgsi_visitor::visit(ir_dereference_variable *ir)
2711 {
2712 variable_storage *entry;
2713 ir_variable *var = ir->var;
2714 bool remove_array;
2715
2716 if (handle_bound_deref(ir->as_dereference()))
2717 return;
2718
2719 entry = find_variable_storage(ir->var);
2720
2721 if (!entry) {
2722 switch (var->data.mode) {
2723 case ir_var_uniform:
2724 entry = new(mem_ctx) variable_storage(var, PROGRAM_UNIFORM,
2725 var->data.param_index);
2726 _mesa_hash_table_insert(this->variables, var, entry);
2727 break;
2728 case ir_var_shader_in: {
2729 /* The linker assigns locations for varyings and attributes,
2730 * including deprecated builtins (like gl_Color), user-assign
2731 * generic attributes (glBindVertexLocation), and
2732 * user-defined varyings.
2733 */
2734 assert(var->data.location != -1);
2735
2736 const glsl_type *type_without_array = var->type->without_array();
2737 struct inout_decl *decl = &inputs[num_inputs];
2738 unsigned component = var->data.location_frac;
2739 unsigned num_components;
2740 num_inputs++;
2741
2742 if (type_without_array->is_64bit())
2743 component = component / 2;
2744 if (type_without_array->vector_elements)
2745 num_components = type_without_array->vector_elements;
2746 else
2747 num_components = 4;
2748
2749 decl->mesa_index = var->data.location;
2750 decl->interp = (glsl_interp_mode) var->data.interpolation;
2751 decl->interp_loc = st_translate_interp_loc(var);
2752 decl->base_type = type_without_array->base_type;
2753 decl->usage_mask = u_bit_consecutive(component, num_components);
2754
2755 if (is_inout_array(shader->Stage, var, &remove_array)) {
2756 decl->array_id = num_input_arrays + 1;
2757 num_input_arrays++;
2758 } else {
2759 decl->array_id = 0;
2760 }
2761
2762 if (remove_array)
2763 decl->size = type_size(var->type->fields.array);
2764 else
2765 decl->size = type_size(var->type);
2766
2767 entry = new(mem_ctx) variable_storage(var,
2768 PROGRAM_INPUT,
2769 decl->mesa_index,
2770 decl->array_id);
2771 entry->component = component;
2772
2773 _mesa_hash_table_insert(this->variables, var, entry);
2774
2775 break;
2776 }
2777 case ir_var_shader_out: {
2778 assert(var->data.location != -1);
2779
2780 const glsl_type *type_without_array = var->type->without_array();
2781 struct inout_decl *decl = &outputs[num_outputs];
2782 unsigned component = var->data.location_frac;
2783 unsigned num_components;
2784 num_outputs++;
2785
2786 decl->invariant = var->data.invariant;
2787
2788 if (type_without_array->is_64bit())
2789 component = component / 2;
2790 if (type_without_array->vector_elements)
2791 num_components = type_without_array->vector_elements;
2792 else
2793 num_components = 4;
2794
2795 decl->mesa_index = var->data.location + FRAG_RESULT_MAX * var->data.index;
2796 decl->base_type = type_without_array->base_type;
2797 decl->usage_mask = u_bit_consecutive(component, num_components);
2798 if (var->data.stream & (1u << 31)) {
2799 decl->gs_out_streams = var->data.stream & ~(1u << 31);
2800 } else {
2801 assert(var->data.stream < 4);
2802 decl->gs_out_streams = 0;
2803 for (unsigned i = 0; i < num_components; ++i)
2804 decl->gs_out_streams |= var->data.stream << (2 * (component + i));
2805 }
2806
2807 if (is_inout_array(shader->Stage, var, &remove_array)) {
2808 decl->array_id = num_output_arrays + 1;
2809 num_output_arrays++;
2810 } else {
2811 decl->array_id = 0;
2812 }
2813
2814 if (remove_array)
2815 decl->size = type_size(var->type->fields.array);
2816 else
2817 decl->size = type_size(var->type);
2818
2819 if (var->data.fb_fetch_output) {
2820 st_dst_reg dst = st_dst_reg(get_temp(var->type));
2821 st_src_reg src = st_src_reg(PROGRAM_OUTPUT, decl->mesa_index,
2822 var->type, component, decl->array_id);
2823 emit_asm(NULL, TGSI_OPCODE_FBFETCH, dst, src);
2824 entry = new(mem_ctx) variable_storage(var, dst.file, dst.index,
2825 dst.array_id);
2826 } else {
2827 entry = new(mem_ctx) variable_storage(var,
2828 PROGRAM_OUTPUT,
2829 decl->mesa_index,
2830 decl->array_id);
2831 }
2832 entry->component = component;
2833
2834 _mesa_hash_table_insert(this->variables, var, entry);
2835
2836 break;
2837 }
2838 case ir_var_system_value:
2839 entry = new(mem_ctx) variable_storage(var,
2840 PROGRAM_SYSTEM_VALUE,
2841 var->data.location);
2842 break;
2843 case ir_var_auto:
2844 case ir_var_temporary:
2845 st_src_reg src = get_temp(var->type);
2846
2847 entry = new(mem_ctx) variable_storage(var, src.file, src.index,
2848 src.array_id);
2849 _mesa_hash_table_insert(this->variables, var, entry);
2850
2851 break;
2852 }
2853
2854 if (!entry) {
2855 printf("Failed to make storage for %s\n", var->name);
2856 exit(1);
2857 }
2858 }
2859
2860 this->result = st_src_reg(entry->file, entry->index, var->type,
2861 entry->component, entry->array_id);
2862 if (this->shader->Stage == MESA_SHADER_VERTEX &&
2863 var->data.mode == ir_var_shader_in &&
2864 var->type->without_array()->is_double())
2865 this->result.is_double_vertex_input = true;
2866 if (!native_integers)
2867 this->result.type = GLSL_TYPE_FLOAT;
2868 }
2869
2870 static void
shrink_array_declarations(struct inout_decl * decls,unsigned count,GLbitfield64 * usage_mask,GLbitfield64 double_usage_mask,GLbitfield * patch_usage_mask)2871 shrink_array_declarations(struct inout_decl *decls, unsigned count,
2872 GLbitfield64* usage_mask,
2873 GLbitfield64 double_usage_mask,
2874 GLbitfield* patch_usage_mask)
2875 {
2876 unsigned i;
2877 int j;
2878
2879 /* Fix array declarations by removing unused array elements at both ends
2880 * of the arrays. For example, mat4[3] where only mat[1] is used.
2881 */
2882 for (i = 0; i < count; i++) {
2883 struct inout_decl *decl = &decls[i];
2884 if (!decl->array_id)
2885 continue;
2886
2887 /* Shrink the beginning. */
2888 for (j = 0; j < (int)decl->size; j++) {
2889 if (decl->mesa_index >= VARYING_SLOT_PATCH0) {
2890 if (*patch_usage_mask &
2891 BITFIELD64_BIT(decl->mesa_index - VARYING_SLOT_PATCH0 + j))
2892 break;
2893 }
2894 else {
2895 if (*usage_mask & BITFIELD64_BIT(decl->mesa_index+j))
2896 break;
2897 if (double_usage_mask & BITFIELD64_BIT(decl->mesa_index+j-1))
2898 break;
2899 }
2900
2901 decl->mesa_index++;
2902 decl->size--;
2903 j--;
2904 }
2905
2906 /* Shrink the end. */
2907 for (j = decl->size-1; j >= 0; j--) {
2908 if (decl->mesa_index >= VARYING_SLOT_PATCH0) {
2909 if (*patch_usage_mask &
2910 BITFIELD64_BIT(decl->mesa_index - VARYING_SLOT_PATCH0 + j))
2911 break;
2912 }
2913 else {
2914 if (*usage_mask & BITFIELD64_BIT(decl->mesa_index+j))
2915 break;
2916 if (double_usage_mask & BITFIELD64_BIT(decl->mesa_index+j-1))
2917 break;
2918 }
2919
2920 decl->size--;
2921 }
2922
2923 /* When not all entries of an array are accessed, we mark them as used
2924 * here anyway, to ensure that the input/output mapping logic doesn't get
2925 * confused.
2926 *
2927 * TODO This happens when an array isn't used via indirect access, which
2928 * some game ports do (at least eON-based). There is an optimization
2929 * opportunity here by replacing the array declaration with non-array
2930 * declarations of those slots that are actually used.
2931 */
2932 for (j = 1; j < (int)decl->size; ++j) {
2933 if (decl->mesa_index >= VARYING_SLOT_PATCH0)
2934 *patch_usage_mask |= BITFIELD64_BIT(decl->mesa_index - VARYING_SLOT_PATCH0 + j);
2935 else
2936 *usage_mask |= BITFIELD64_BIT(decl->mesa_index + j);
2937 }
2938 }
2939 }
2940
2941
2942 static void
mark_array_io(struct inout_decl * decls,unsigned count,GLbitfield64 * usage_mask,GLbitfield64 double_usage_mask,GLbitfield * patch_usage_mask)2943 mark_array_io(struct inout_decl *decls, unsigned count,
2944 GLbitfield64* usage_mask,
2945 GLbitfield64 double_usage_mask,
2946 GLbitfield* patch_usage_mask)
2947 {
2948 unsigned i;
2949 int j;
2950
2951 /* Fix array declarations by removing unused array elements at both ends
2952 * of the arrays. For example, mat4[3] where only mat[1] is used.
2953 */
2954 for (i = 0; i < count; i++) {
2955 struct inout_decl *decl = &decls[i];
2956 if (!decl->array_id)
2957 continue;
2958
2959 /* When not all entries of an array are accessed, we mark them as used
2960 * here anyway, to ensure that the input/output mapping logic doesn't get
2961 * confused.
2962 *
2963 * TODO This happens when an array isn't used via indirect access, which
2964 * some game ports do (at least eON-based). There is an optimization
2965 * opportunity here by replacing the array declaration with non-array
2966 * declarations of those slots that are actually used.
2967 */
2968 for (j = 0; j < (int)decl->size; ++j) {
2969 if (decl->mesa_index >= VARYING_SLOT_PATCH0)
2970 *patch_usage_mask |= BITFIELD64_BIT(decl->mesa_index - VARYING_SLOT_PATCH0 + j);
2971 else
2972 *usage_mask |= BITFIELD64_BIT(decl->mesa_index + j);
2973 }
2974 }
2975 }
2976
2977 void
visit(ir_dereference_array * ir)2978 glsl_to_tgsi_visitor::visit(ir_dereference_array *ir)
2979 {
2980 ir_constant *index;
2981 st_src_reg src;
2982 bool is_2D = false;
2983 ir_variable *var = ir->variable_referenced();
2984
2985 if (handle_bound_deref(ir->as_dereference()))
2986 return;
2987
2988 /* We only need the logic provided by count_vec4_slots()
2989 * for arrays of structs. Indirect sampler and image indexing is handled
2990 * elsewhere.
2991 */
2992 int element_size = ir->type->without_array()->is_struct() ?
2993 ir->type->count_vec4_slots(false, var->data.bindless) :
2994 type_size(ir->type);
2995
2996 index = ir->array_index->constant_expression_value(ralloc_parent(ir));
2997
2998 ir->array->accept(this);
2999 src = this->result;
3000
3001 if (!src.has_index2) {
3002 switch (this->prog->Target) {
3003 case GL_TESS_CONTROL_PROGRAM_NV:
3004 is_2D = (src.file == PROGRAM_INPUT || src.file == PROGRAM_OUTPUT) &&
3005 !ir->variable_referenced()->data.patch;
3006 break;
3007 case GL_TESS_EVALUATION_PROGRAM_NV:
3008 is_2D = src.file == PROGRAM_INPUT &&
3009 !ir->variable_referenced()->data.patch;
3010 break;
3011 case GL_GEOMETRY_PROGRAM_NV:
3012 is_2D = src.file == PROGRAM_INPUT;
3013 break;
3014 }
3015 }
3016
3017 if (is_2D)
3018 element_size = 1;
3019
3020 if (index) {
3021
3022 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB &&
3023 src.file == PROGRAM_INPUT)
3024 element_size = attrib_type_size(ir->type, true);
3025 if (is_2D) {
3026 src.index2D = index->value.i[0];
3027 src.has_index2 = true;
3028 } else
3029 src.index += index->value.i[0] * element_size;
3030 } else {
3031 /* Variable index array dereference. It eats the "vec4" of the
3032 * base of the array and an index that offsets the TGSI register
3033 * index.
3034 */
3035 ir->array_index->accept(this);
3036
3037 st_src_reg index_reg;
3038
3039 if (element_size == 1) {
3040 index_reg = this->result;
3041 } else {
3042 index_reg = get_temp(native_integers ?
3043 glsl_type::int_type : glsl_type::float_type);
3044
3045 emit_asm(ir, TGSI_OPCODE_MUL, st_dst_reg(index_reg),
3046 this->result, st_src_reg_for_type(index_reg.type, element_size));
3047 }
3048
3049 /* If there was already a relative address register involved, add the
3050 * new and the old together to get the new offset.
3051 */
3052 if (!is_2D && src.reladdr != NULL) {
3053 st_src_reg accum_reg = get_temp(native_integers ?
3054 glsl_type::int_type : glsl_type::float_type);
3055
3056 emit_asm(ir, TGSI_OPCODE_ADD, st_dst_reg(accum_reg),
3057 index_reg, *src.reladdr);
3058
3059 index_reg = accum_reg;
3060 }
3061
3062 if (is_2D) {
3063 src.reladdr2 = ralloc(mem_ctx, st_src_reg);
3064 *src.reladdr2 = index_reg;
3065 src.index2D = 0;
3066 src.has_index2 = true;
3067 } else {
3068 src.reladdr = ralloc(mem_ctx, st_src_reg);
3069 *src.reladdr = index_reg;
3070 }
3071 }
3072
3073 /* Change the register type to the element type of the array. */
3074 src.type = ir->type->base_type;
3075
3076 this->result = src;
3077 }
3078
3079 void
visit(ir_dereference_record * ir)3080 glsl_to_tgsi_visitor::visit(ir_dereference_record *ir)
3081 {
3082 unsigned int i;
3083 const glsl_type *struct_type = ir->record->type;
3084 ir_variable *var = ir->record->variable_referenced();
3085 int offset = 0;
3086
3087 if (handle_bound_deref(ir->as_dereference()))
3088 return;
3089
3090 ir->record->accept(this);
3091
3092 assert(ir->field_idx >= 0);
3093 assert(var);
3094 for (i = 0; i < struct_type->length; i++) {
3095 if (i == (unsigned) ir->field_idx)
3096 break;
3097 const glsl_type *member_type = struct_type->fields.structure[i].type;
3098 offset += member_type->count_vec4_slots(false, var->data.bindless);
3099 }
3100
3101 /* If the type is smaller than a vec4, replicate the last channel out. */
3102 if (ir->type->is_scalar() || ir->type->is_vector())
3103 this->result.swizzle = swizzle_for_size(ir->type->vector_elements);
3104 else
3105 this->result.swizzle = SWIZZLE_NOOP;
3106
3107 this->result.index += offset;
3108 this->result.type = ir->type->base_type;
3109 }
3110
3111 /**
3112 * We want to be careful in assignment setup to hit the actual storage
3113 * instead of potentially using a temporary like we might with the
3114 * ir_dereference handler.
3115 */
3116 static st_dst_reg
get_assignment_lhs(ir_dereference * ir,glsl_to_tgsi_visitor * v,int * component)3117 get_assignment_lhs(ir_dereference *ir, glsl_to_tgsi_visitor *v, int *component)
3118 {
3119 /* The LHS must be a dereference. If the LHS is a variable indexed array
3120 * access of a vector, it must be separated into a series conditional moves
3121 * before reaching this point (see ir_vec_index_to_cond_assign).
3122 */
3123 assert(ir->as_dereference());
3124 ir_dereference_array *deref_array = ir->as_dereference_array();
3125 if (deref_array) {
3126 assert(!deref_array->array->type->is_vector());
3127 }
3128
3129 /* Use the rvalue deref handler for the most part. We write swizzles using
3130 * the writemask, but we do extract the base component for enhanced layouts
3131 * from the source swizzle.
3132 */
3133 ir->accept(v);
3134 *component = GET_SWZ(v->result.swizzle, 0);
3135 return st_dst_reg(v->result);
3136 }
3137
3138 /**
3139 * Process the condition of a conditional assignment
3140 *
3141 * Examines the condition of a conditional assignment to generate the optimal
3142 * first operand of a \c CMP instruction. If the condition is a relational
3143 * operator with 0 (e.g., \c ir_binop_less), the value being compared will be
3144 * used as the source for the \c CMP instruction. Otherwise the comparison
3145 * is processed to a boolean result, and the boolean result is used as the
3146 * operand to the CMP instruction.
3147 */
3148 bool
process_move_condition(ir_rvalue * ir)3149 glsl_to_tgsi_visitor::process_move_condition(ir_rvalue *ir)
3150 {
3151 ir_rvalue *src_ir = ir;
3152 bool negate = true;
3153 bool switch_order = false;
3154
3155 ir_expression *const expr = ir->as_expression();
3156
3157 if (native_integers) {
3158 if ((expr != NULL) && (expr->num_operands == 2)) {
3159 enum glsl_base_type type = expr->operands[0]->type->base_type;
3160 if (type == GLSL_TYPE_INT || type == GLSL_TYPE_UINT ||
3161 type == GLSL_TYPE_BOOL) {
3162 if (expr->operation == ir_binop_equal) {
3163 if (expr->operands[0]->is_zero()) {
3164 src_ir = expr->operands[1];
3165 switch_order = true;
3166 }
3167 else if (expr->operands[1]->is_zero()) {
3168 src_ir = expr->operands[0];
3169 switch_order = true;
3170 }
3171 }
3172 else if (expr->operation == ir_binop_nequal) {
3173 if (expr->operands[0]->is_zero()) {
3174 src_ir = expr->operands[1];
3175 }
3176 else if (expr->operands[1]->is_zero()) {
3177 src_ir = expr->operands[0];
3178 }
3179 }
3180 }
3181 }
3182
3183 src_ir->accept(this);
3184 return switch_order;
3185 }
3186
3187 if ((expr != NULL) && (expr->num_operands == 2)) {
3188 bool zero_on_left = false;
3189
3190 if (expr->operands[0]->is_zero()) {
3191 src_ir = expr->operands[1];
3192 zero_on_left = true;
3193 } else if (expr->operands[1]->is_zero()) {
3194 src_ir = expr->operands[0];
3195 zero_on_left = false;
3196 }
3197
3198 /* a is - 0 + - 0 +
3199 * (a < 0) T F F ( a < 0) T F F
3200 * (0 < a) F F T (-a < 0) F F T
3201 * (a >= 0) F T T ( a < 0) T F F (swap order of other operands)
3202 * (0 >= a) T T F (-a < 0) F F T (swap order of other operands)
3203 *
3204 * Note that exchanging the order of 0 and 'a' in the comparison simply
3205 * means that the value of 'a' should be negated.
3206 */
3207 if (src_ir != ir) {
3208 switch (expr->operation) {
3209 case ir_binop_less:
3210 switch_order = false;
3211 negate = zero_on_left;
3212 break;
3213
3214 case ir_binop_gequal:
3215 switch_order = true;
3216 negate = zero_on_left;
3217 break;
3218
3219 default:
3220 /* This isn't the right kind of comparison afterall, so make sure
3221 * the whole condition is visited.
3222 */
3223 src_ir = ir;
3224 break;
3225 }
3226 }
3227 }
3228
3229 src_ir->accept(this);
3230
3231 /* We use the TGSI_OPCODE_CMP (a < 0 ? b : c) for conditional moves, and the
3232 * condition we produced is 0.0 or 1.0. By flipping the sign, we can
3233 * choose which value TGSI_OPCODE_CMP produces without an extra instruction
3234 * computing the condition.
3235 */
3236 if (negate)
3237 this->result.negate = ~this->result.negate;
3238
3239 return switch_order;
3240 }
3241
3242 void
emit_block_mov(ir_assignment * ir,const struct glsl_type * type,st_dst_reg * l,st_src_reg * r,st_src_reg * cond,bool cond_swap)3243 glsl_to_tgsi_visitor::emit_block_mov(ir_assignment *ir, const struct glsl_type *type,
3244 st_dst_reg *l, st_src_reg *r,
3245 st_src_reg *cond, bool cond_swap)
3246 {
3247 if (type->is_struct()) {
3248 for (unsigned int i = 0; i < type->length; i++) {
3249 emit_block_mov(ir, type->fields.structure[i].type, l, r,
3250 cond, cond_swap);
3251 }
3252 return;
3253 }
3254
3255 if (type->is_array()) {
3256 for (unsigned int i = 0; i < type->length; i++) {
3257 emit_block_mov(ir, type->fields.array, l, r, cond, cond_swap);
3258 }
3259 return;
3260 }
3261
3262 if (type->is_matrix()) {
3263 const struct glsl_type *vec_type;
3264
3265 vec_type = glsl_type::get_instance(type->is_double()
3266 ? GLSL_TYPE_DOUBLE : GLSL_TYPE_FLOAT,
3267 type->vector_elements, 1);
3268
3269 for (int i = 0; i < type->matrix_columns; i++) {
3270 emit_block_mov(ir, vec_type, l, r, cond, cond_swap);
3271 }
3272 return;
3273 }
3274
3275 assert(type->is_scalar() || type->is_vector());
3276
3277 l->type = type->base_type;
3278 r->type = type->base_type;
3279 if (cond) {
3280 st_src_reg l_src = st_src_reg(*l);
3281
3282 if (l_src.file == PROGRAM_OUTPUT &&
3283 this->prog->Target == GL_FRAGMENT_PROGRAM_ARB &&
3284 (l_src.index == FRAG_RESULT_DEPTH ||
3285 l_src.index == FRAG_RESULT_STENCIL)) {
3286 /* This is a special case because the source swizzles will be shifted
3287 * later to account for the difference between GLSL (where they're
3288 * plain floats) and TGSI (where they're Z and Y components). */
3289 l_src.swizzle = SWIZZLE_XXXX;
3290 }
3291
3292 if (native_integers) {
3293 emit_asm(ir, TGSI_OPCODE_UCMP, *l, *cond,
3294 cond_swap ? l_src : *r,
3295 cond_swap ? *r : l_src);
3296 } else {
3297 emit_asm(ir, TGSI_OPCODE_CMP, *l, *cond,
3298 cond_swap ? l_src : *r,
3299 cond_swap ? *r : l_src);
3300 }
3301 } else {
3302 emit_asm(ir, TGSI_OPCODE_MOV, *l, *r);
3303 }
3304 l->index++;
3305 r->index++;
3306 if (type->is_dual_slot()) {
3307 l->index++;
3308 if (r->is_double_vertex_input == false)
3309 r->index++;
3310 }
3311 }
3312
3313 void
visit(ir_assignment * ir)3314 glsl_to_tgsi_visitor::visit(ir_assignment *ir)
3315 {
3316 int dst_component;
3317 st_dst_reg l;
3318 st_src_reg r;
3319
3320 /* all generated instructions need to be flaged as precise */
3321 this->precise = is_precise(ir->lhs->variable_referenced());
3322 ir->rhs->accept(this);
3323 r = this->result;
3324
3325 l = get_assignment_lhs(ir->lhs, this, &dst_component);
3326
3327 {
3328 int swizzles[4];
3329 int first_enabled_chan = 0;
3330 int rhs_chan = 0;
3331 ir_variable *variable = ir->lhs->variable_referenced();
3332
3333 if (shader->Stage == MESA_SHADER_FRAGMENT &&
3334 variable->data.mode == ir_var_shader_out &&
3335 (variable->data.location == FRAG_RESULT_DEPTH ||
3336 variable->data.location == FRAG_RESULT_STENCIL)) {
3337 assert(ir->lhs->type->is_scalar());
3338 assert(ir->write_mask == WRITEMASK_X);
3339
3340 if (variable->data.location == FRAG_RESULT_DEPTH)
3341 l.writemask = WRITEMASK_Z;
3342 else {
3343 assert(variable->data.location == FRAG_RESULT_STENCIL);
3344 l.writemask = WRITEMASK_Y;
3345 }
3346 } else if (ir->write_mask == 0) {
3347 assert(!ir->lhs->type->is_scalar() && !ir->lhs->type->is_vector());
3348
3349 unsigned num_elements =
3350 ir->lhs->type->without_array()->vector_elements;
3351
3352 if (num_elements) {
3353 l.writemask = u_bit_consecutive(0, num_elements);
3354 } else {
3355 /* The type is a struct or an array of (array of) structs. */
3356 l.writemask = WRITEMASK_XYZW;
3357 }
3358 } else {
3359 l.writemask = ir->write_mask;
3360 }
3361
3362 for (int i = 0; i < 4; i++) {
3363 if (l.writemask & (1 << i)) {
3364 first_enabled_chan = GET_SWZ(r.swizzle, i);
3365 break;
3366 }
3367 }
3368
3369 l.writemask = l.writemask << dst_component;
3370
3371 /* Swizzle a small RHS vector into the channels being written.
3372 *
3373 * glsl ir treats write_mask as dictating how many channels are
3374 * present on the RHS while TGSI treats write_mask as just
3375 * showing which channels of the vec4 RHS get written.
3376 */
3377 for (int i = 0; i < 4; i++) {
3378 if (l.writemask & (1 << i))
3379 swizzles[i] = GET_SWZ(r.swizzle, rhs_chan++);
3380 else
3381 swizzles[i] = first_enabled_chan;
3382 }
3383 r.swizzle = MAKE_SWIZZLE4(swizzles[0], swizzles[1],
3384 swizzles[2], swizzles[3]);
3385 }
3386
3387 assert(l.file != PROGRAM_UNDEFINED);
3388 assert(r.file != PROGRAM_UNDEFINED);
3389
3390 if (ir->rhs->as_expression() &&
3391 this->instructions.get_tail() &&
3392 ir->rhs == ((glsl_to_tgsi_instruction *)this->instructions.get_tail())->ir &&
3393 !((glsl_to_tgsi_instruction *)this->instructions.get_tail())->is_64bit_expanded &&
3394 type_size(ir->lhs->type) == 1 &&
3395 !ir->lhs->type->is_64bit() &&
3396 l.writemask == ((glsl_to_tgsi_instruction *)this->instructions.get_tail())->dst[0].writemask) {
3397 /* To avoid emitting an extra MOV when assigning an expression to a
3398 * variable, emit the last instruction of the expression again, but
3399 * replace the destination register with the target of the assignment.
3400 * Dead code elimination will remove the original instruction.
3401 */
3402 glsl_to_tgsi_instruction *inst, *new_inst;
3403 inst = (glsl_to_tgsi_instruction *)this->instructions.get_tail();
3404 new_inst = emit_asm(ir, inst->op, l, inst->src[0], inst->src[1], inst->src[2], inst->src[3]);
3405 new_inst->saturate = inst->saturate;
3406 new_inst->resource = inst->resource;
3407 inst->dead_mask = inst->dst[0].writemask;
3408 } else {
3409 emit_block_mov(ir, ir->rhs->type, &l, &r, NULL, false);
3410 }
3411 this->precise = 0;
3412 }
3413
3414
3415 void
visit(ir_constant * ir)3416 glsl_to_tgsi_visitor::visit(ir_constant *ir)
3417 {
3418 st_src_reg src;
3419 GLdouble stack_vals[4] = { 0 };
3420 gl_constant_value *values = (gl_constant_value *) stack_vals;
3421 GLenum gl_type = GL_NONE;
3422 unsigned int i, elements;
3423 static int in_array = 0;
3424 gl_register_file file = in_array ? PROGRAM_CONSTANT : PROGRAM_IMMEDIATE;
3425
3426 /* Unfortunately, 4 floats is all we can get into
3427 * _mesa_add_typed_unnamed_constant. So, make a temp to store an
3428 * aggregate constant and move each constant value into it. If we
3429 * get lucky, copy propagation will eliminate the extra moves.
3430 */
3431 if (ir->type->is_struct()) {
3432 st_src_reg temp_base = get_temp(ir->type);
3433 st_dst_reg temp = st_dst_reg(temp_base);
3434
3435 for (i = 0; i < ir->type->length; i++) {
3436 ir_constant *const field_value = ir->get_record_field(i);
3437 int size = type_size(field_value->type);
3438
3439 assert(size > 0);
3440
3441 field_value->accept(this);
3442 src = this->result;
3443
3444 for (unsigned j = 0; j < (unsigned int)size; j++) {
3445 emit_asm(ir, TGSI_OPCODE_MOV, temp, src);
3446
3447 src.index++;
3448 temp.index++;
3449 }
3450 }
3451 this->result = temp_base;
3452 return;
3453 }
3454
3455 if (ir->type->is_array()) {
3456 st_src_reg temp_base = get_temp(ir->type);
3457 st_dst_reg temp = st_dst_reg(temp_base);
3458 int size = type_size(ir->type->fields.array);
3459
3460 assert(size > 0);
3461 in_array++;
3462
3463 for (i = 0; i < ir->type->length; i++) {
3464 ir->const_elements[i]->accept(this);
3465 src = this->result;
3466 for (int j = 0; j < size; j++) {
3467 emit_asm(ir, TGSI_OPCODE_MOV, temp, src);
3468
3469 src.index++;
3470 temp.index++;
3471 }
3472 }
3473 this->result = temp_base;
3474 in_array--;
3475 return;
3476 }
3477
3478 if (ir->type->is_matrix()) {
3479 st_src_reg mat = get_temp(ir->type);
3480 st_dst_reg mat_column = st_dst_reg(mat);
3481
3482 for (i = 0; i < ir->type->matrix_columns; i++) {
3483 switch (ir->type->base_type) {
3484 case GLSL_TYPE_FLOAT:
3485 values = (gl_constant_value *)
3486 &ir->value.f[i * ir->type->vector_elements];
3487
3488 src = st_src_reg(file, -1, ir->type->base_type);
3489 src.index = add_constant(file,
3490 values,
3491 ir->type->vector_elements,
3492 GL_FLOAT,
3493 &src.swizzle);
3494 emit_asm(ir, TGSI_OPCODE_MOV, mat_column, src);
3495 break;
3496 case GLSL_TYPE_DOUBLE:
3497 values = (gl_constant_value *)
3498 &ir->value.d[i * ir->type->vector_elements];
3499 src = st_src_reg(file, -1, ir->type->base_type);
3500 src.index = add_constant(file,
3501 values,
3502 ir->type->vector_elements,
3503 GL_DOUBLE,
3504 &src.swizzle);
3505 if (ir->type->vector_elements >= 2) {
3506 mat_column.writemask = WRITEMASK_XY;
3507 src.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y,
3508 SWIZZLE_X, SWIZZLE_Y);
3509 emit_asm(ir, TGSI_OPCODE_MOV, mat_column, src);
3510 } else {
3511 mat_column.writemask = WRITEMASK_X;
3512 src.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X,
3513 SWIZZLE_X, SWIZZLE_X);
3514 emit_asm(ir, TGSI_OPCODE_MOV, mat_column, src);
3515 }
3516 src.index++;
3517 if (ir->type->vector_elements > 2) {
3518 if (ir->type->vector_elements == 4) {
3519 mat_column.writemask = WRITEMASK_ZW;
3520 src.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y,
3521 SWIZZLE_X, SWIZZLE_Y);
3522 emit_asm(ir, TGSI_OPCODE_MOV, mat_column, src);
3523 } else {
3524 mat_column.writemask = WRITEMASK_Z;
3525 src.swizzle = MAKE_SWIZZLE4(SWIZZLE_Y, SWIZZLE_Y,
3526 SWIZZLE_Y, SWIZZLE_Y);
3527 emit_asm(ir, TGSI_OPCODE_MOV, mat_column, src);
3528 mat_column.writemask = WRITEMASK_XYZW;
3529 src.swizzle = SWIZZLE_XYZW;
3530 }
3531 mat_column.index++;
3532 }
3533 break;
3534 default:
3535 unreachable("Illegal matrix constant type.\n");
3536 break;
3537 }
3538 mat_column.index++;
3539 }
3540 this->result = mat;
3541 return;
3542 }
3543
3544 elements = ir->type->vector_elements;
3545 switch (ir->type->base_type) {
3546 case GLSL_TYPE_FLOAT:
3547 gl_type = GL_FLOAT;
3548 for (i = 0; i < ir->type->vector_elements; i++) {
3549 values[i].f = ir->value.f[i];
3550 }
3551 break;
3552 case GLSL_TYPE_DOUBLE:
3553 gl_type = GL_DOUBLE;
3554 for (i = 0; i < ir->type->vector_elements; i++) {
3555 memcpy(&values[i * 2], &ir->value.d[i], sizeof(double));
3556 }
3557 break;
3558 case GLSL_TYPE_INT64:
3559 gl_type = GL_INT64_ARB;
3560 for (i = 0; i < ir->type->vector_elements; i++) {
3561 memcpy(&values[i * 2], &ir->value.d[i], sizeof(int64_t));
3562 }
3563 break;
3564 case GLSL_TYPE_UINT64:
3565 gl_type = GL_UNSIGNED_INT64_ARB;
3566 for (i = 0; i < ir->type->vector_elements; i++) {
3567 memcpy(&values[i * 2], &ir->value.d[i], sizeof(uint64_t));
3568 }
3569 break;
3570 case GLSL_TYPE_UINT:
3571 gl_type = native_integers ? GL_UNSIGNED_INT : GL_FLOAT;
3572 for (i = 0; i < ir->type->vector_elements; i++) {
3573 if (native_integers)
3574 values[i].u = ir->value.u[i];
3575 else
3576 values[i].f = ir->value.u[i];
3577 }
3578 break;
3579 case GLSL_TYPE_INT:
3580 gl_type = native_integers ? GL_INT : GL_FLOAT;
3581 for (i = 0; i < ir->type->vector_elements; i++) {
3582 if (native_integers)
3583 values[i].i = ir->value.i[i];
3584 else
3585 values[i].f = ir->value.i[i];
3586 }
3587 break;
3588 case GLSL_TYPE_BOOL:
3589 gl_type = native_integers ? GL_BOOL : GL_FLOAT;
3590 for (i = 0; i < ir->type->vector_elements; i++) {
3591 values[i].u = ir->value.b[i] ? ctx->Const.UniformBooleanTrue : 0;
3592 }
3593 break;
3594 case GLSL_TYPE_SAMPLER:
3595 case GLSL_TYPE_IMAGE:
3596 gl_type = GL_UNSIGNED_INT;
3597 elements = 2;
3598 values[0].u = ir->value.u64[0] & 0xffffffff;
3599 values[1].u = ir->value.u64[0] >> 32;
3600 break;
3601 default:
3602 assert(!"Non-float/uint/int/bool/sampler/image constant");
3603 }
3604
3605 this->result = st_src_reg(file, -1, ir->type);
3606 this->result.index = add_constant(file,
3607 values,
3608 elements,
3609 gl_type,
3610 &this->result.swizzle);
3611 }
3612
3613 void
visit_atomic_counter_intrinsic(ir_call * ir)3614 glsl_to_tgsi_visitor::visit_atomic_counter_intrinsic(ir_call *ir)
3615 {
3616 exec_node *param = ir->actual_parameters.get_head();
3617 ir_dereference *deref = static_cast<ir_dereference *>(param);
3618 ir_variable *location = deref->variable_referenced();
3619 bool has_hw_atomics = st_context(ctx)->has_hw_atomics;
3620 /* Calculate the surface offset */
3621 st_src_reg offset;
3622 unsigned array_size = 0, base = 0;
3623 uint16_t index = 0;
3624 st_src_reg resource;
3625
3626 get_deref_offsets(deref, &array_size, &base, &index, &offset, false);
3627
3628 if (has_hw_atomics) {
3629 variable_storage *entry = find_variable_storage(location);
3630 st_src_reg buffer(PROGRAM_HW_ATOMIC, 0, GLSL_TYPE_ATOMIC_UINT,
3631 location->data.binding);
3632
3633 if (!entry) {
3634 entry = new(mem_ctx) variable_storage(location, PROGRAM_HW_ATOMIC,
3635 num_atomics);
3636 _mesa_hash_table_insert(this->variables, location, entry);
3637
3638 atomic_info[num_atomics].location = location->data.location;
3639 atomic_info[num_atomics].binding = location->data.binding;
3640 atomic_info[num_atomics].size = location->type->arrays_of_arrays_size();
3641 if (atomic_info[num_atomics].size == 0)
3642 atomic_info[num_atomics].size = 1;
3643 atomic_info[num_atomics].array_id = 0;
3644 num_atomics++;
3645 }
3646
3647 if (offset.file != PROGRAM_UNDEFINED) {
3648 if (atomic_info[entry->index].array_id == 0) {
3649 num_atomic_arrays++;
3650 atomic_info[entry->index].array_id = num_atomic_arrays;
3651 }
3652 buffer.array_id = atomic_info[entry->index].array_id;
3653 }
3654
3655 buffer.index = index;
3656 buffer.index += location->data.offset / ATOMIC_COUNTER_SIZE;
3657 buffer.has_index2 = true;
3658
3659 if (offset.file != PROGRAM_UNDEFINED) {
3660 buffer.reladdr = ralloc(mem_ctx, st_src_reg);
3661 *buffer.reladdr = offset;
3662 emit_arl(ir, sampler_reladdr, offset);
3663 }
3664 offset = st_src_reg_for_int(0);
3665
3666 resource = buffer;
3667 } else {
3668 st_src_reg buffer(PROGRAM_BUFFER,
3669 prog->info.num_ssbos +
3670 location->data.binding,
3671 GLSL_TYPE_ATOMIC_UINT);
3672
3673 if (offset.file != PROGRAM_UNDEFINED) {
3674 emit_asm(ir, TGSI_OPCODE_MUL, st_dst_reg(offset),
3675 offset, st_src_reg_for_int(ATOMIC_COUNTER_SIZE));
3676 emit_asm(ir, TGSI_OPCODE_ADD, st_dst_reg(offset),
3677 offset, st_src_reg_for_int(location->data.offset + index * ATOMIC_COUNTER_SIZE));
3678 } else {
3679 offset = st_src_reg_for_int(location->data.offset + index * ATOMIC_COUNTER_SIZE);
3680 }
3681 resource = buffer;
3682 }
3683
3684 ir->return_deref->accept(this);
3685 st_dst_reg dst(this->result);
3686 dst.writemask = WRITEMASK_X;
3687
3688 glsl_to_tgsi_instruction *inst;
3689
3690 if (ir->callee->intrinsic_id == ir_intrinsic_atomic_counter_read) {
3691 inst = emit_asm(ir, TGSI_OPCODE_LOAD, dst, offset);
3692 } else if (ir->callee->intrinsic_id == ir_intrinsic_atomic_counter_increment) {
3693 inst = emit_asm(ir, TGSI_OPCODE_ATOMUADD, dst, offset,
3694 st_src_reg_for_int(1));
3695 } else if (ir->callee->intrinsic_id == ir_intrinsic_atomic_counter_predecrement) {
3696 inst = emit_asm(ir, TGSI_OPCODE_ATOMUADD, dst, offset,
3697 st_src_reg_for_int(-1));
3698 emit_asm(ir, TGSI_OPCODE_ADD, dst, this->result, st_src_reg_for_int(-1));
3699 } else {
3700 param = param->get_next();
3701 ir_rvalue *val = ((ir_instruction *)param)->as_rvalue();
3702 val->accept(this);
3703
3704 st_src_reg data = this->result, data2 = undef_src;
3705 enum tgsi_opcode opcode;
3706 switch (ir->callee->intrinsic_id) {
3707 case ir_intrinsic_atomic_counter_add:
3708 opcode = TGSI_OPCODE_ATOMUADD;
3709 break;
3710 case ir_intrinsic_atomic_counter_min:
3711 opcode = TGSI_OPCODE_ATOMIMIN;
3712 break;
3713 case ir_intrinsic_atomic_counter_max:
3714 opcode = TGSI_OPCODE_ATOMIMAX;
3715 break;
3716 case ir_intrinsic_atomic_counter_and:
3717 opcode = TGSI_OPCODE_ATOMAND;
3718 break;
3719 case ir_intrinsic_atomic_counter_or:
3720 opcode = TGSI_OPCODE_ATOMOR;
3721 break;
3722 case ir_intrinsic_atomic_counter_xor:
3723 opcode = TGSI_OPCODE_ATOMXOR;
3724 break;
3725 case ir_intrinsic_atomic_counter_exchange:
3726 opcode = TGSI_OPCODE_ATOMXCHG;
3727 break;
3728 case ir_intrinsic_atomic_counter_comp_swap: {
3729 opcode = TGSI_OPCODE_ATOMCAS;
3730 param = param->get_next();
3731 val = ((ir_instruction *)param)->as_rvalue();
3732 val->accept(this);
3733 data2 = this->result;
3734 break;
3735 }
3736 default:
3737 assert(!"Unexpected intrinsic");
3738 return;
3739 }
3740
3741 inst = emit_asm(ir, opcode, dst, offset, data, data2);
3742 }
3743
3744 inst->resource = resource;
3745 }
3746
3747 void
visit_ssbo_intrinsic(ir_call * ir)3748 glsl_to_tgsi_visitor::visit_ssbo_intrinsic(ir_call *ir)
3749 {
3750 exec_node *param = ir->actual_parameters.get_head();
3751
3752 ir_rvalue *block = ((ir_instruction *)param)->as_rvalue();
3753
3754 param = param->get_next();
3755 ir_rvalue *offset = ((ir_instruction *)param)->as_rvalue();
3756
3757 ir_constant *const_block = block->as_constant();
3758 st_src_reg buffer(
3759 PROGRAM_BUFFER,
3760 const_block ? const_block->value.u[0] : 0,
3761 GLSL_TYPE_UINT);
3762
3763 if (!const_block) {
3764 block->accept(this);
3765 buffer.reladdr = ralloc(mem_ctx, st_src_reg);
3766 *buffer.reladdr = this->result;
3767 emit_arl(ir, sampler_reladdr, this->result);
3768 }
3769
3770 /* Calculate the surface offset */
3771 offset->accept(this);
3772 st_src_reg off = this->result;
3773
3774 st_dst_reg dst = undef_dst;
3775 if (ir->return_deref) {
3776 ir->return_deref->accept(this);
3777 dst = st_dst_reg(this->result);
3778 dst.writemask = (1 << ir->return_deref->type->vector_elements) - 1;
3779 }
3780
3781 glsl_to_tgsi_instruction *inst;
3782
3783 if (ir->callee->intrinsic_id == ir_intrinsic_ssbo_load) {
3784 inst = emit_asm(ir, TGSI_OPCODE_LOAD, dst, off);
3785 if (dst.type == GLSL_TYPE_BOOL)
3786 emit_asm(ir, TGSI_OPCODE_USNE, dst, st_src_reg(dst),
3787 st_src_reg_for_int(0));
3788 } else if (ir->callee->intrinsic_id == ir_intrinsic_ssbo_store) {
3789 param = param->get_next();
3790 ir_rvalue *val = ((ir_instruction *)param)->as_rvalue();
3791 val->accept(this);
3792
3793 param = param->get_next();
3794 ir_constant *write_mask = ((ir_instruction *)param)->as_constant();
3795 assert(write_mask);
3796 dst.writemask = write_mask->value.u[0];
3797
3798 dst.type = this->result.type;
3799 inst = emit_asm(ir, TGSI_OPCODE_STORE, dst, off, this->result);
3800 } else {
3801 param = param->get_next();
3802 ir_rvalue *val = ((ir_instruction *)param)->as_rvalue();
3803 val->accept(this);
3804
3805 st_src_reg data = this->result, data2 = undef_src;
3806 enum tgsi_opcode opcode;
3807 switch (ir->callee->intrinsic_id) {
3808 case ir_intrinsic_ssbo_atomic_add:
3809 opcode = TGSI_OPCODE_ATOMUADD;
3810 break;
3811 case ir_intrinsic_ssbo_atomic_min:
3812 opcode = TGSI_OPCODE_ATOMIMIN;
3813 break;
3814 case ir_intrinsic_ssbo_atomic_max:
3815 opcode = TGSI_OPCODE_ATOMIMAX;
3816 break;
3817 case ir_intrinsic_ssbo_atomic_and:
3818 opcode = TGSI_OPCODE_ATOMAND;
3819 break;
3820 case ir_intrinsic_ssbo_atomic_or:
3821 opcode = TGSI_OPCODE_ATOMOR;
3822 break;
3823 case ir_intrinsic_ssbo_atomic_xor:
3824 opcode = TGSI_OPCODE_ATOMXOR;
3825 break;
3826 case ir_intrinsic_ssbo_atomic_exchange:
3827 opcode = TGSI_OPCODE_ATOMXCHG;
3828 break;
3829 case ir_intrinsic_ssbo_atomic_comp_swap:
3830 opcode = TGSI_OPCODE_ATOMCAS;
3831 param = param->get_next();
3832 val = ((ir_instruction *)param)->as_rvalue();
3833 val->accept(this);
3834 data2 = this->result;
3835 break;
3836 default:
3837 assert(!"Unexpected intrinsic");
3838 return;
3839 }
3840
3841 inst = emit_asm(ir, opcode, dst, off, data, data2);
3842 }
3843
3844 param = param->get_next();
3845 ir_constant *access = NULL;
3846 if (!param->is_tail_sentinel()) {
3847 access = ((ir_instruction *)param)->as_constant();
3848 assert(access);
3849 }
3850
3851 add_buffer_to_load_and_stores(inst, &buffer, &this->instructions, access);
3852 }
3853
3854 void
visit_membar_intrinsic(ir_call * ir)3855 glsl_to_tgsi_visitor::visit_membar_intrinsic(ir_call *ir)
3856 {
3857 switch (ir->callee->intrinsic_id) {
3858 case ir_intrinsic_memory_barrier:
3859 emit_asm(ir, TGSI_OPCODE_MEMBAR, undef_dst,
3860 st_src_reg_for_int(TGSI_MEMBAR_SHADER_BUFFER |
3861 TGSI_MEMBAR_ATOMIC_BUFFER |
3862 TGSI_MEMBAR_SHADER_IMAGE |
3863 TGSI_MEMBAR_SHARED));
3864 break;
3865 case ir_intrinsic_memory_barrier_atomic_counter:
3866 emit_asm(ir, TGSI_OPCODE_MEMBAR, undef_dst,
3867 st_src_reg_for_int(TGSI_MEMBAR_ATOMIC_BUFFER));
3868 break;
3869 case ir_intrinsic_memory_barrier_buffer:
3870 emit_asm(ir, TGSI_OPCODE_MEMBAR, undef_dst,
3871 st_src_reg_for_int(TGSI_MEMBAR_SHADER_BUFFER));
3872 break;
3873 case ir_intrinsic_memory_barrier_image:
3874 emit_asm(ir, TGSI_OPCODE_MEMBAR, undef_dst,
3875 st_src_reg_for_int(TGSI_MEMBAR_SHADER_IMAGE));
3876 break;
3877 case ir_intrinsic_memory_barrier_shared:
3878 emit_asm(ir, TGSI_OPCODE_MEMBAR, undef_dst,
3879 st_src_reg_for_int(TGSI_MEMBAR_SHARED));
3880 break;
3881 case ir_intrinsic_group_memory_barrier:
3882 emit_asm(ir, TGSI_OPCODE_MEMBAR, undef_dst,
3883 st_src_reg_for_int(TGSI_MEMBAR_SHADER_BUFFER |
3884 TGSI_MEMBAR_ATOMIC_BUFFER |
3885 TGSI_MEMBAR_SHADER_IMAGE |
3886 TGSI_MEMBAR_SHARED |
3887 TGSI_MEMBAR_THREAD_GROUP));
3888 break;
3889 default:
3890 assert(!"Unexpected memory barrier intrinsic");
3891 }
3892 }
3893
3894 void
visit_shared_intrinsic(ir_call * ir)3895 glsl_to_tgsi_visitor::visit_shared_intrinsic(ir_call *ir)
3896 {
3897 exec_node *param = ir->actual_parameters.get_head();
3898
3899 ir_rvalue *offset = ((ir_instruction *)param)->as_rvalue();
3900
3901 st_src_reg buffer(PROGRAM_MEMORY, 0, GLSL_TYPE_UINT);
3902
3903 /* Calculate the surface offset */
3904 offset->accept(this);
3905 st_src_reg off = this->result;
3906
3907 st_dst_reg dst = undef_dst;
3908 if (ir->return_deref) {
3909 ir->return_deref->accept(this);
3910 dst = st_dst_reg(this->result);
3911 dst.writemask = (1 << ir->return_deref->type->vector_elements) - 1;
3912 }
3913
3914 glsl_to_tgsi_instruction *inst;
3915
3916 if (ir->callee->intrinsic_id == ir_intrinsic_shared_load) {
3917 inst = emit_asm(ir, TGSI_OPCODE_LOAD, dst, off);
3918 inst->resource = buffer;
3919 } else if (ir->callee->intrinsic_id == ir_intrinsic_shared_store) {
3920 param = param->get_next();
3921 ir_rvalue *val = ((ir_instruction *)param)->as_rvalue();
3922 val->accept(this);
3923
3924 param = param->get_next();
3925 ir_constant *write_mask = ((ir_instruction *)param)->as_constant();
3926 assert(write_mask);
3927 dst.writemask = write_mask->value.u[0];
3928
3929 dst.type = this->result.type;
3930 inst = emit_asm(ir, TGSI_OPCODE_STORE, dst, off, this->result);
3931 inst->resource = buffer;
3932 } else {
3933 param = param->get_next();
3934 ir_rvalue *val = ((ir_instruction *)param)->as_rvalue();
3935 val->accept(this);
3936
3937 st_src_reg data = this->result, data2 = undef_src;
3938 enum tgsi_opcode opcode;
3939 switch (ir->callee->intrinsic_id) {
3940 case ir_intrinsic_shared_atomic_add:
3941 opcode = TGSI_OPCODE_ATOMUADD;
3942 break;
3943 case ir_intrinsic_shared_atomic_min:
3944 opcode = TGSI_OPCODE_ATOMIMIN;
3945 break;
3946 case ir_intrinsic_shared_atomic_max:
3947 opcode = TGSI_OPCODE_ATOMIMAX;
3948 break;
3949 case ir_intrinsic_shared_atomic_and:
3950 opcode = TGSI_OPCODE_ATOMAND;
3951 break;
3952 case ir_intrinsic_shared_atomic_or:
3953 opcode = TGSI_OPCODE_ATOMOR;
3954 break;
3955 case ir_intrinsic_shared_atomic_xor:
3956 opcode = TGSI_OPCODE_ATOMXOR;
3957 break;
3958 case ir_intrinsic_shared_atomic_exchange:
3959 opcode = TGSI_OPCODE_ATOMXCHG;
3960 break;
3961 case ir_intrinsic_shared_atomic_comp_swap:
3962 opcode = TGSI_OPCODE_ATOMCAS;
3963 param = param->get_next();
3964 val = ((ir_instruction *)param)->as_rvalue();
3965 val->accept(this);
3966 data2 = this->result;
3967 break;
3968 default:
3969 assert(!"Unexpected intrinsic");
3970 return;
3971 }
3972
3973 inst = emit_asm(ir, opcode, dst, off, data, data2);
3974 inst->resource = buffer;
3975 }
3976 }
3977
3978 static void
get_image_qualifiers(ir_dereference * ir,const glsl_type ** type,bool * memory_coherent,bool * memory_volatile,bool * memory_restrict,bool * memory_read_only,enum pipe_format * image_format)3979 get_image_qualifiers(ir_dereference *ir, const glsl_type **type,
3980 bool *memory_coherent, bool *memory_volatile,
3981 bool *memory_restrict, bool *memory_read_only,
3982 enum pipe_format *image_format)
3983 {
3984
3985 switch (ir->ir_type) {
3986 case ir_type_dereference_record: {
3987 ir_dereference_record *deref_record = ir->as_dereference_record();
3988 const glsl_type *struct_type = deref_record->record->type;
3989 int fild_idx = deref_record->field_idx;
3990
3991 *type = struct_type->fields.structure[fild_idx].type->without_array();
3992 *memory_coherent =
3993 struct_type->fields.structure[fild_idx].memory_coherent;
3994 *memory_volatile =
3995 struct_type->fields.structure[fild_idx].memory_volatile;
3996 *memory_restrict =
3997 struct_type->fields.structure[fild_idx].memory_restrict;
3998 *memory_read_only =
3999 struct_type->fields.structure[fild_idx].memory_read_only;
4000 *image_format =
4001 struct_type->fields.structure[fild_idx].image_format;
4002 break;
4003 }
4004
4005 case ir_type_dereference_array: {
4006 ir_dereference_array *deref_arr = ir->as_dereference_array();
4007 get_image_qualifiers((ir_dereference *)deref_arr->array, type,
4008 memory_coherent, memory_volatile, memory_restrict,
4009 memory_read_only, image_format);
4010 break;
4011 }
4012
4013 case ir_type_dereference_variable: {
4014 ir_variable *var = ir->variable_referenced();
4015
4016 *type = var->type->without_array();
4017 *memory_coherent = var->data.memory_coherent;
4018 *memory_volatile = var->data.memory_volatile;
4019 *memory_restrict = var->data.memory_restrict;
4020 *memory_read_only = var->data.memory_read_only;
4021 *image_format = var->data.image_format;
4022 break;
4023 }
4024
4025 default:
4026 break;
4027 }
4028 }
4029
4030 void
visit_image_intrinsic(ir_call * ir)4031 glsl_to_tgsi_visitor::visit_image_intrinsic(ir_call *ir)
4032 {
4033 exec_node *param = ir->actual_parameters.get_head();
4034
4035 ir_dereference *img = (ir_dereference *)param;
4036 const ir_variable *imgvar = img->variable_referenced();
4037 unsigned sampler_array_size = 1, sampler_base = 0;
4038 bool memory_coherent = false, memory_volatile = false,
4039 memory_restrict = false, memory_read_only = false;
4040 enum pipe_format image_format = PIPE_FORMAT_NONE;
4041 const glsl_type *type = NULL;
4042
4043 get_image_qualifiers(img, &type, &memory_coherent, &memory_volatile,
4044 &memory_restrict, &memory_read_only, &image_format);
4045
4046 st_src_reg reladdr;
4047 st_src_reg image(PROGRAM_IMAGE, 0, GLSL_TYPE_UINT);
4048 uint16_t index = 0;
4049 get_deref_offsets(img, &sampler_array_size, &sampler_base,
4050 &index, &reladdr, !imgvar->contains_bindless());
4051
4052 image.index = index;
4053 if (reladdr.file != PROGRAM_UNDEFINED) {
4054 image.reladdr = ralloc(mem_ctx, st_src_reg);
4055 *image.reladdr = reladdr;
4056 emit_arl(ir, sampler_reladdr, reladdr);
4057 }
4058
4059 st_dst_reg dst = undef_dst;
4060 if (ir->return_deref) {
4061 ir->return_deref->accept(this);
4062 dst = st_dst_reg(this->result);
4063 dst.writemask = (1 << ir->return_deref->type->vector_elements) - 1;
4064 }
4065
4066 glsl_to_tgsi_instruction *inst;
4067
4068 st_src_reg bindless;
4069 if (imgvar->contains_bindless()) {
4070 img->accept(this);
4071 bindless = this->result;
4072 }
4073
4074 if (ir->callee->intrinsic_id == ir_intrinsic_image_size) {
4075 dst.writemask = WRITEMASK_XYZ;
4076 inst = emit_asm(ir, TGSI_OPCODE_RESQ, dst);
4077 } else if (ir->callee->intrinsic_id == ir_intrinsic_image_samples) {
4078 st_src_reg res = get_temp(glsl_type::ivec4_type);
4079 st_dst_reg dstres = st_dst_reg(res);
4080 dstres.writemask = WRITEMASK_W;
4081 inst = emit_asm(ir, TGSI_OPCODE_RESQ, dstres);
4082 res.swizzle = SWIZZLE_WWWW;
4083 emit_asm(ir, TGSI_OPCODE_MOV, dst, res);
4084 } else {
4085 st_src_reg arg1 = undef_src, arg2 = undef_src;
4086 st_src_reg coord;
4087 st_dst_reg coord_dst;
4088 coord = get_temp(glsl_type::ivec4_type);
4089 coord_dst = st_dst_reg(coord);
4090 coord_dst.writemask = (1 << type->coordinate_components()) - 1;
4091 param = param->get_next();
4092 ((ir_dereference *)param)->accept(this);
4093 emit_asm(ir, TGSI_OPCODE_MOV, coord_dst, this->result);
4094 coord.swizzle = SWIZZLE_XXXX;
4095 switch (type->coordinate_components()) {
4096 case 4: assert(!"unexpected coord count");
4097 FALLTHROUGH;
4098 case 3: coord.swizzle |= SWIZZLE_Z << 6;
4099 FALLTHROUGH;
4100 case 2: coord.swizzle |= SWIZZLE_Y << 3;
4101 }
4102
4103 if (type->sampler_dimensionality == GLSL_SAMPLER_DIM_MS) {
4104 param = param->get_next();
4105 ((ir_dereference *)param)->accept(this);
4106 st_src_reg sample = this->result;
4107 sample.swizzle = SWIZZLE_XXXX;
4108 coord_dst.writemask = WRITEMASK_W;
4109 emit_asm(ir, TGSI_OPCODE_MOV, coord_dst, sample);
4110 coord.swizzle |= SWIZZLE_W << 9;
4111 }
4112
4113 param = param->get_next();
4114 if (!param->is_tail_sentinel()) {
4115 ((ir_dereference *)param)->accept(this);
4116 arg1 = this->result;
4117 param = param->get_next();
4118 }
4119
4120 if (!param->is_tail_sentinel()) {
4121 ((ir_dereference *)param)->accept(this);
4122 arg2 = this->result;
4123 param = param->get_next();
4124 }
4125
4126 assert(param->is_tail_sentinel());
4127
4128 enum tgsi_opcode opcode;
4129 switch (ir->callee->intrinsic_id) {
4130 case ir_intrinsic_image_load:
4131 opcode = TGSI_OPCODE_LOAD;
4132 break;
4133 case ir_intrinsic_image_store:
4134 opcode = TGSI_OPCODE_STORE;
4135 break;
4136 case ir_intrinsic_image_atomic_add:
4137 opcode = TGSI_OPCODE_ATOMUADD;
4138 break;
4139 case ir_intrinsic_image_atomic_min:
4140 opcode = TGSI_OPCODE_ATOMIMIN;
4141 break;
4142 case ir_intrinsic_image_atomic_max:
4143 opcode = TGSI_OPCODE_ATOMIMAX;
4144 break;
4145 case ir_intrinsic_image_atomic_and:
4146 opcode = TGSI_OPCODE_ATOMAND;
4147 break;
4148 case ir_intrinsic_image_atomic_or:
4149 opcode = TGSI_OPCODE_ATOMOR;
4150 break;
4151 case ir_intrinsic_image_atomic_xor:
4152 opcode = TGSI_OPCODE_ATOMXOR;
4153 break;
4154 case ir_intrinsic_image_atomic_exchange:
4155 opcode = TGSI_OPCODE_ATOMXCHG;
4156 break;
4157 case ir_intrinsic_image_atomic_comp_swap:
4158 opcode = TGSI_OPCODE_ATOMCAS;
4159 break;
4160 case ir_intrinsic_image_atomic_inc_wrap: {
4161 /* There's a bit of disagreement between GLSL and the hardware. The
4162 * hardware wants to wrap after the given wrap value, while GLSL
4163 * wants to wrap at the value. Subtract 1 to make up the difference.
4164 */
4165 st_src_reg wrap = get_temp(glsl_type::uint_type);
4166 emit_asm(ir, TGSI_OPCODE_ADD, st_dst_reg(wrap),
4167 arg1, st_src_reg_for_int(-1));
4168 arg1 = wrap;
4169 opcode = TGSI_OPCODE_ATOMINC_WRAP;
4170 break;
4171 }
4172 case ir_intrinsic_image_atomic_dec_wrap:
4173 opcode = TGSI_OPCODE_ATOMDEC_WRAP;
4174 break;
4175 default:
4176 assert(!"Unexpected intrinsic");
4177 return;
4178 }
4179
4180 inst = emit_asm(ir, opcode, dst, coord, arg1, arg2);
4181 if (opcode == TGSI_OPCODE_STORE)
4182 inst->dst[0].writemask = WRITEMASK_XYZW;
4183 }
4184
4185 if (imgvar->contains_bindless()) {
4186 inst->resource = bindless;
4187 inst->resource.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y,
4188 SWIZZLE_X, SWIZZLE_Y);
4189 } else {
4190 inst->resource = image;
4191 inst->sampler_array_size = sampler_array_size;
4192 inst->sampler_base = sampler_base;
4193 }
4194
4195 inst->tex_target = type->sampler_index();
4196 inst->image_format = image_format;
4197 inst->read_only = memory_read_only;
4198
4199 if (memory_coherent)
4200 inst->buffer_access |= TGSI_MEMORY_COHERENT;
4201 if (memory_restrict)
4202 inst->buffer_access |= TGSI_MEMORY_RESTRICT;
4203 if (memory_volatile)
4204 inst->buffer_access |= TGSI_MEMORY_VOLATILE;
4205 }
4206
4207 void
visit_generic_intrinsic(ir_call * ir,enum tgsi_opcode op)4208 glsl_to_tgsi_visitor::visit_generic_intrinsic(ir_call *ir, enum tgsi_opcode op)
4209 {
4210 ir->return_deref->accept(this);
4211 st_dst_reg dst = st_dst_reg(this->result);
4212
4213 dst.writemask = u_bit_consecutive(0, ir->return_deref->var->type->vector_elements);
4214
4215 st_src_reg src[4] = { undef_src, undef_src, undef_src, undef_src };
4216 unsigned num_src = 0;
4217 foreach_in_list(ir_rvalue, param, &ir->actual_parameters) {
4218 assert(num_src < ARRAY_SIZE(src));
4219
4220 this->result.file = PROGRAM_UNDEFINED;
4221 param->accept(this);
4222 assert(this->result.file != PROGRAM_UNDEFINED);
4223
4224 src[num_src] = this->result;
4225 num_src++;
4226 }
4227
4228 emit_asm(ir, op, dst, src[0], src[1], src[2], src[3]);
4229 }
4230
4231 void
visit(ir_call * ir)4232 glsl_to_tgsi_visitor::visit(ir_call *ir)
4233 {
4234 ir_function_signature *sig = ir->callee;
4235
4236 /* Filter out intrinsics */
4237 switch (sig->intrinsic_id) {
4238 case ir_intrinsic_atomic_counter_read:
4239 case ir_intrinsic_atomic_counter_increment:
4240 case ir_intrinsic_atomic_counter_predecrement:
4241 case ir_intrinsic_atomic_counter_add:
4242 case ir_intrinsic_atomic_counter_min:
4243 case ir_intrinsic_atomic_counter_max:
4244 case ir_intrinsic_atomic_counter_and:
4245 case ir_intrinsic_atomic_counter_or:
4246 case ir_intrinsic_atomic_counter_xor:
4247 case ir_intrinsic_atomic_counter_exchange:
4248 case ir_intrinsic_atomic_counter_comp_swap:
4249 visit_atomic_counter_intrinsic(ir);
4250 return;
4251
4252 case ir_intrinsic_ssbo_load:
4253 case ir_intrinsic_ssbo_store:
4254 case ir_intrinsic_ssbo_atomic_add:
4255 case ir_intrinsic_ssbo_atomic_min:
4256 case ir_intrinsic_ssbo_atomic_max:
4257 case ir_intrinsic_ssbo_atomic_and:
4258 case ir_intrinsic_ssbo_atomic_or:
4259 case ir_intrinsic_ssbo_atomic_xor:
4260 case ir_intrinsic_ssbo_atomic_exchange:
4261 case ir_intrinsic_ssbo_atomic_comp_swap:
4262 visit_ssbo_intrinsic(ir);
4263 return;
4264
4265 case ir_intrinsic_memory_barrier:
4266 case ir_intrinsic_memory_barrier_atomic_counter:
4267 case ir_intrinsic_memory_barrier_buffer:
4268 case ir_intrinsic_memory_barrier_image:
4269 case ir_intrinsic_memory_barrier_shared:
4270 case ir_intrinsic_group_memory_barrier:
4271 visit_membar_intrinsic(ir);
4272 return;
4273
4274 case ir_intrinsic_shared_load:
4275 case ir_intrinsic_shared_store:
4276 case ir_intrinsic_shared_atomic_add:
4277 case ir_intrinsic_shared_atomic_min:
4278 case ir_intrinsic_shared_atomic_max:
4279 case ir_intrinsic_shared_atomic_and:
4280 case ir_intrinsic_shared_atomic_or:
4281 case ir_intrinsic_shared_atomic_xor:
4282 case ir_intrinsic_shared_atomic_exchange:
4283 case ir_intrinsic_shared_atomic_comp_swap:
4284 visit_shared_intrinsic(ir);
4285 return;
4286
4287 case ir_intrinsic_image_load:
4288 case ir_intrinsic_image_store:
4289 case ir_intrinsic_image_atomic_add:
4290 case ir_intrinsic_image_atomic_min:
4291 case ir_intrinsic_image_atomic_max:
4292 case ir_intrinsic_image_atomic_and:
4293 case ir_intrinsic_image_atomic_or:
4294 case ir_intrinsic_image_atomic_xor:
4295 case ir_intrinsic_image_atomic_exchange:
4296 case ir_intrinsic_image_atomic_comp_swap:
4297 case ir_intrinsic_image_size:
4298 case ir_intrinsic_image_samples:
4299 case ir_intrinsic_image_atomic_inc_wrap:
4300 case ir_intrinsic_image_atomic_dec_wrap:
4301 visit_image_intrinsic(ir);
4302 return;
4303
4304 case ir_intrinsic_shader_clock:
4305 visit_generic_intrinsic(ir, TGSI_OPCODE_CLOCK);
4306 return;
4307
4308 case ir_intrinsic_vote_all:
4309 visit_generic_intrinsic(ir, TGSI_OPCODE_VOTE_ALL);
4310 return;
4311 case ir_intrinsic_vote_any:
4312 visit_generic_intrinsic(ir, TGSI_OPCODE_VOTE_ANY);
4313 return;
4314 case ir_intrinsic_vote_eq:
4315 visit_generic_intrinsic(ir, TGSI_OPCODE_VOTE_EQ);
4316 return;
4317 case ir_intrinsic_ballot:
4318 visit_generic_intrinsic(ir, TGSI_OPCODE_BALLOT);
4319 return;
4320 case ir_intrinsic_read_first_invocation:
4321 visit_generic_intrinsic(ir, TGSI_OPCODE_READ_FIRST);
4322 return;
4323 case ir_intrinsic_read_invocation:
4324 visit_generic_intrinsic(ir, TGSI_OPCODE_READ_INVOC);
4325 return;
4326
4327 case ir_intrinsic_helper_invocation:
4328 visit_generic_intrinsic(ir, TGSI_OPCODE_READ_HELPER);
4329 return;
4330
4331 case ir_intrinsic_invalid:
4332 case ir_intrinsic_generic_load:
4333 case ir_intrinsic_generic_store:
4334 case ir_intrinsic_generic_atomic_add:
4335 case ir_intrinsic_generic_atomic_and:
4336 case ir_intrinsic_generic_atomic_or:
4337 case ir_intrinsic_generic_atomic_xor:
4338 case ir_intrinsic_generic_atomic_min:
4339 case ir_intrinsic_generic_atomic_max:
4340 case ir_intrinsic_generic_atomic_exchange:
4341 case ir_intrinsic_generic_atomic_comp_swap:
4342 case ir_intrinsic_begin_invocation_interlock:
4343 case ir_intrinsic_end_invocation_interlock:
4344 case ir_intrinsic_image_sparse_load:
4345 case ir_intrinsic_is_sparse_texels_resident:
4346 unreachable("Invalid intrinsic");
4347 }
4348 }
4349
4350 void
calc_deref_offsets(ir_dereference * tail,unsigned * array_elements,uint16_t * index,st_src_reg * indirect,unsigned * location)4351 glsl_to_tgsi_visitor::calc_deref_offsets(ir_dereference *tail,
4352 unsigned *array_elements,
4353 uint16_t *index,
4354 st_src_reg *indirect,
4355 unsigned *location)
4356 {
4357 switch (tail->ir_type) {
4358 case ir_type_dereference_record: {
4359 ir_dereference_record *deref_record = tail->as_dereference_record();
4360 const glsl_type *struct_type = deref_record->record->type;
4361 int field_index = deref_record->field_idx;
4362
4363 calc_deref_offsets(deref_record->record->as_dereference(), array_elements, index, indirect, location);
4364
4365 assert(field_index >= 0);
4366 *location += struct_type->struct_location_offset(field_index);
4367 break;
4368 }
4369
4370 case ir_type_dereference_array: {
4371 ir_dereference_array *deref_arr = tail->as_dereference_array();
4372
4373 void *mem_ctx = ralloc_parent(deref_arr);
4374 ir_constant *array_index =
4375 deref_arr->array_index->constant_expression_value(mem_ctx);
4376
4377 if (!array_index) {
4378 st_src_reg temp_reg;
4379 st_dst_reg temp_dst;
4380
4381 temp_reg = get_temp(glsl_type::uint_type);
4382 temp_dst = st_dst_reg(temp_reg);
4383 temp_dst.writemask = 1;
4384
4385 deref_arr->array_index->accept(this);
4386 if (*array_elements != 1)
4387 emit_asm(NULL, TGSI_OPCODE_MUL, temp_dst, this->result, st_src_reg_for_int(*array_elements));
4388 else
4389 emit_asm(NULL, TGSI_OPCODE_MOV, temp_dst, this->result);
4390
4391 if (indirect->file == PROGRAM_UNDEFINED)
4392 *indirect = temp_reg;
4393 else {
4394 temp_dst = st_dst_reg(*indirect);
4395 temp_dst.writemask = 1;
4396 emit_asm(NULL, TGSI_OPCODE_ADD, temp_dst, *indirect, temp_reg);
4397 }
4398 } else
4399 *index += array_index->value.u[0] * *array_elements;
4400
4401 *array_elements *= deref_arr->array->type->length;
4402
4403 calc_deref_offsets(deref_arr->array->as_dereference(), array_elements, index, indirect, location);
4404 break;
4405 }
4406 default:
4407 break;
4408 }
4409 }
4410
4411 void
get_deref_offsets(ir_dereference * ir,unsigned * array_size,unsigned * base,uint16_t * index,st_src_reg * reladdr,bool opaque)4412 glsl_to_tgsi_visitor::get_deref_offsets(ir_dereference *ir,
4413 unsigned *array_size,
4414 unsigned *base,
4415 uint16_t *index,
4416 st_src_reg *reladdr,
4417 bool opaque)
4418 {
4419 GLuint shader = _mesa_program_enum_to_shader_stage(this->prog->Target);
4420 unsigned location = 0;
4421 ir_variable *var = ir->variable_referenced();
4422
4423 reladdr->reset();
4424
4425 *base = 0;
4426 *array_size = 1;
4427
4428 assert(var);
4429 location = var->data.location;
4430 calc_deref_offsets(ir, array_size, index, reladdr, &location);
4431
4432 /*
4433 * If we end up with no indirect then adjust the base to the index,
4434 * and set the array size to 1.
4435 */
4436 if (reladdr->file == PROGRAM_UNDEFINED) {
4437 *base = *index;
4438 *array_size = 1;
4439 }
4440
4441 if (opaque) {
4442 assert(location != 0xffffffff);
4443 *base += this->shader_program->data->UniformStorage[location].opaque[shader].index;
4444 *index += this->shader_program->data->UniformStorage[location].opaque[shader].index;
4445 }
4446 }
4447
4448 st_src_reg
canonicalize_gather_offset(st_src_reg offset)4449 glsl_to_tgsi_visitor::canonicalize_gather_offset(st_src_reg offset)
4450 {
4451 if (offset.reladdr || offset.reladdr2 ||
4452 offset.has_index2 ||
4453 offset.file == PROGRAM_UNIFORM ||
4454 offset.file == PROGRAM_CONSTANT ||
4455 offset.file == PROGRAM_STATE_VAR) {
4456 st_src_reg tmp = get_temp(glsl_type::ivec2_type);
4457 st_dst_reg tmp_dst = st_dst_reg(tmp);
4458 tmp_dst.writemask = WRITEMASK_XY;
4459 emit_asm(NULL, TGSI_OPCODE_MOV, tmp_dst, offset);
4460 return tmp;
4461 }
4462
4463 return offset;
4464 }
4465
4466 bool
handle_bound_deref(ir_dereference * ir)4467 glsl_to_tgsi_visitor::handle_bound_deref(ir_dereference *ir)
4468 {
4469 ir_variable *var = ir->variable_referenced();
4470
4471 if (!var || var->data.mode != ir_var_uniform || var->data.bindless ||
4472 !(ir->type->is_image() || ir->type->is_sampler()))
4473 return false;
4474
4475 /* Convert from bound sampler/image to bindless handle. */
4476 bool is_image = ir->type->is_image();
4477 st_src_reg resource(is_image ? PROGRAM_IMAGE : PROGRAM_SAMPLER, 0, GLSL_TYPE_UINT);
4478 uint16_t index = 0;
4479 unsigned array_size = 1, base = 0;
4480 st_src_reg reladdr;
4481 get_deref_offsets(ir, &array_size, &base, &index, &reladdr, true);
4482
4483 resource.index = index;
4484 if (reladdr.file != PROGRAM_UNDEFINED) {
4485 resource.reladdr = ralloc(mem_ctx, st_src_reg);
4486 *resource.reladdr = reladdr;
4487 emit_arl(ir, sampler_reladdr, reladdr);
4488 }
4489
4490 this->result = get_temp(glsl_type::uvec2_type);
4491 st_dst_reg dst(this->result);
4492 dst.writemask = WRITEMASK_XY;
4493
4494 glsl_to_tgsi_instruction *inst = emit_asm(
4495 ir, is_image ? TGSI_OPCODE_IMG2HND : TGSI_OPCODE_SAMP2HND, dst);
4496
4497 inst->tex_target = ir->type->sampler_index();
4498 inst->resource = resource;
4499 inst->sampler_array_size = array_size;
4500 inst->sampler_base = base;
4501
4502 return true;
4503 }
4504
4505 void
visit(ir_texture * ir)4506 glsl_to_tgsi_visitor::visit(ir_texture *ir)
4507 {
4508 st_src_reg result_src, coord, cube_sc, lod_info, projector, dx, dy;
4509 st_src_reg offset[MAX_GLSL_TEXTURE_OFFSET], sample_index, component;
4510 st_src_reg levels_src, reladdr;
4511 st_dst_reg result_dst, coord_dst, cube_sc_dst;
4512 glsl_to_tgsi_instruction *inst = NULL;
4513 enum tgsi_opcode opcode = TGSI_OPCODE_NOP;
4514 const glsl_type *sampler_type = ir->sampler->type;
4515 unsigned sampler_array_size = 1, sampler_base = 0;
4516 bool is_cube_array = false;
4517 ir_variable *var = ir->sampler->variable_referenced();
4518 unsigned i;
4519
4520 /* if we are a cube array sampler or a cube shadow */
4521 if (sampler_type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE) {
4522 is_cube_array = sampler_type->sampler_array;
4523 }
4524
4525 if (ir->coordinate) {
4526 ir->coordinate->accept(this);
4527
4528 /* Put our coords in a temp. We'll need to modify them for shadow,
4529 * projection, or LOD, so the only case we'd use it as-is is if
4530 * we're doing plain old texturing. The optimization passes on
4531 * glsl_to_tgsi_visitor should handle cleaning up our mess in that case.
4532 */
4533 coord = get_temp(glsl_type::vec4_type);
4534 coord_dst = st_dst_reg(coord);
4535 coord_dst.writemask = (1 << ir->coordinate->type->vector_elements) - 1;
4536 emit_asm(ir, TGSI_OPCODE_MOV, coord_dst, this->result);
4537 }
4538
4539 if (ir->projector) {
4540 ir->projector->accept(this);
4541 projector = this->result;
4542 }
4543
4544 /* Storage for our result. Ideally for an assignment we'd be using
4545 * the actual storage for the result here, instead.
4546 */
4547 result_src = get_temp(ir->type);
4548 result_dst = st_dst_reg(result_src);
4549 result_dst.writemask = (1 << ir->type->vector_elements) - 1;
4550
4551 switch (ir->op) {
4552 case ir_tex:
4553 opcode = (is_cube_array && ir->shadow_comparator) ? TGSI_OPCODE_TEX2 : TGSI_OPCODE_TEX;
4554 if (ir->offset) {
4555 ir->offset->accept(this);
4556 offset[0] = this->result;
4557 }
4558 break;
4559 case ir_txb:
4560 if (is_cube_array ||
4561 (sampler_type->sampler_shadow && sampler_type->coordinate_components() >= 3)) {
4562 opcode = TGSI_OPCODE_TXB2;
4563 }
4564 else {
4565 opcode = TGSI_OPCODE_TXB;
4566 }
4567 ir->lod_info.bias->accept(this);
4568 lod_info = this->result;
4569 if (ir->offset) {
4570 ir->offset->accept(this);
4571 offset[0] = this->result;
4572 }
4573 break;
4574 case ir_txl:
4575 if (this->has_tex_txf_lz && ir->lod_info.lod->is_zero()) {
4576 opcode = TGSI_OPCODE_TEX_LZ;
4577 } else {
4578 opcode = (is_cube_array || (sampler_type->sampler_shadow && sampler_type->coordinate_components() >= 3)) ? TGSI_OPCODE_TXL2 : TGSI_OPCODE_TXL;
4579 ir->lod_info.lod->accept(this);
4580 lod_info = this->result;
4581 }
4582 if (ir->offset) {
4583 ir->offset->accept(this);
4584 offset[0] = this->result;
4585 }
4586 break;
4587 case ir_txd:
4588 opcode = TGSI_OPCODE_TXD;
4589 ir->lod_info.grad.dPdx->accept(this);
4590 dx = this->result;
4591 ir->lod_info.grad.dPdy->accept(this);
4592 dy = this->result;
4593 if (ir->offset) {
4594 ir->offset->accept(this);
4595 offset[0] = this->result;
4596 }
4597 break;
4598 case ir_txs:
4599 opcode = TGSI_OPCODE_TXQ;
4600 ir->lod_info.lod->accept(this);
4601 lod_info = this->result;
4602 break;
4603 case ir_query_levels:
4604 opcode = TGSI_OPCODE_TXQ;
4605 lod_info = undef_src;
4606 levels_src = get_temp(ir->type);
4607 break;
4608 case ir_txf:
4609 if (this->has_tex_txf_lz && ir->lod_info.lod->is_zero()) {
4610 opcode = TGSI_OPCODE_TXF_LZ;
4611 } else {
4612 opcode = TGSI_OPCODE_TXF;
4613 ir->lod_info.lod->accept(this);
4614 lod_info = this->result;
4615 }
4616 if (ir->offset) {
4617 ir->offset->accept(this);
4618 offset[0] = this->result;
4619 }
4620 break;
4621 case ir_txf_ms:
4622 opcode = TGSI_OPCODE_TXF;
4623 ir->lod_info.sample_index->accept(this);
4624 sample_index = this->result;
4625 break;
4626 case ir_tg4:
4627 opcode = TGSI_OPCODE_TG4;
4628 ir->lod_info.component->accept(this);
4629 component = this->result;
4630 if (ir->offset) {
4631 ir->offset->accept(this);
4632 if (ir->offset->type->is_array()) {
4633 const glsl_type *elt_type = ir->offset->type->fields.array;
4634 for (i = 0; i < ir->offset->type->length; i++) {
4635 offset[i] = this->result;
4636 offset[i].index += i * type_size(elt_type);
4637 offset[i].type = elt_type->base_type;
4638 offset[i].swizzle = swizzle_for_size(elt_type->vector_elements);
4639 offset[i] = canonicalize_gather_offset(offset[i]);
4640 }
4641 } else {
4642 offset[0] = canonicalize_gather_offset(this->result);
4643 }
4644 }
4645 break;
4646 case ir_lod:
4647 opcode = TGSI_OPCODE_LODQ;
4648 break;
4649 case ir_texture_samples:
4650 opcode = TGSI_OPCODE_TXQS;
4651 break;
4652 case ir_samples_identical:
4653 unreachable("Unexpected ir_samples_identical opcode");
4654 }
4655
4656 if (ir->projector) {
4657 if (opcode == TGSI_OPCODE_TEX) {
4658 /* Slot the projector in as the last component of the coord. */
4659 coord_dst.writemask = WRITEMASK_W;
4660 emit_asm(ir, TGSI_OPCODE_MOV, coord_dst, projector);
4661 coord_dst.writemask = WRITEMASK_XYZW;
4662 opcode = TGSI_OPCODE_TXP;
4663 } else {
4664 st_src_reg coord_w = coord;
4665 coord_w.swizzle = SWIZZLE_WWWW;
4666
4667 /* For the other TEX opcodes there's no projective version
4668 * since the last slot is taken up by LOD info. Do the
4669 * projective divide now.
4670 */
4671 coord_dst.writemask = WRITEMASK_W;
4672 emit_asm(ir, TGSI_OPCODE_RCP, coord_dst, projector);
4673
4674 /* In the case where we have to project the coordinates "by hand,"
4675 * the shadow comparator value must also be projected.
4676 */
4677 st_src_reg tmp_src = coord;
4678 if (ir->shadow_comparator) {
4679 /* Slot the shadow value in as the second to last component of the
4680 * coord.
4681 */
4682 ir->shadow_comparator->accept(this);
4683
4684 tmp_src = get_temp(glsl_type::vec4_type);
4685 st_dst_reg tmp_dst = st_dst_reg(tmp_src);
4686
4687 /* Projective division not allowed for array samplers. */
4688 assert(!sampler_type->sampler_array);
4689
4690 tmp_dst.writemask = WRITEMASK_Z;
4691 emit_asm(ir, TGSI_OPCODE_MOV, tmp_dst, this->result);
4692
4693 tmp_dst.writemask = WRITEMASK_XY;
4694 emit_asm(ir, TGSI_OPCODE_MOV, tmp_dst, coord);
4695 }
4696
4697 coord_dst.writemask = WRITEMASK_XYZ;
4698 emit_asm(ir, TGSI_OPCODE_MUL, coord_dst, tmp_src, coord_w);
4699
4700 coord_dst.writemask = WRITEMASK_XYZW;
4701 coord.swizzle = SWIZZLE_XYZW;
4702 }
4703 }
4704
4705 /* If projection is done and the opcode is not TGSI_OPCODE_TXP, then the
4706 * shadow comparator was put in the correct place (and projected) by the
4707 * code, above, that handles by-hand projection.
4708 */
4709 if (ir->shadow_comparator && (!ir->projector || opcode == TGSI_OPCODE_TXP)) {
4710 /* Slot the shadow value in as the second to last component of the
4711 * coord.
4712 */
4713 ir->shadow_comparator->accept(this);
4714
4715 if (is_cube_array) {
4716 if (lod_info.file != PROGRAM_UNDEFINED) {
4717 // If we have both a cube array *and* a bias/lod, stick the
4718 // comparator into the .Y of the second argument.
4719 st_src_reg tmp = get_temp(glsl_type::vec2_type);
4720 cube_sc_dst = st_dst_reg(tmp);
4721 cube_sc_dst.writemask = WRITEMASK_X;
4722 emit_asm(ir, TGSI_OPCODE_MOV, cube_sc_dst, lod_info);
4723 lod_info = tmp;
4724 cube_sc_dst.writemask = WRITEMASK_Y;
4725 } else {
4726 cube_sc = get_temp(glsl_type::float_type);
4727 cube_sc_dst = st_dst_reg(cube_sc);
4728 cube_sc_dst.writemask = WRITEMASK_X;
4729 }
4730 emit_asm(ir, TGSI_OPCODE_MOV, cube_sc_dst, this->result);
4731 }
4732 else {
4733 if ((sampler_type->sampler_dimensionality == GLSL_SAMPLER_DIM_2D &&
4734 sampler_type->sampler_array) ||
4735 sampler_type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE) {
4736 coord_dst.writemask = WRITEMASK_W;
4737 } else {
4738 coord_dst.writemask = WRITEMASK_Z;
4739 }
4740 emit_asm(ir, TGSI_OPCODE_MOV, coord_dst, this->result);
4741 coord_dst.writemask = WRITEMASK_XYZW;
4742 }
4743 }
4744
4745 if (ir->op == ir_txf_ms) {
4746 coord_dst.writemask = WRITEMASK_W;
4747 emit_asm(ir, TGSI_OPCODE_MOV, coord_dst, sample_index);
4748 coord_dst.writemask = WRITEMASK_XYZW;
4749 } else if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXB ||
4750 opcode == TGSI_OPCODE_TXF) {
4751 /* TGSI stores LOD or LOD bias in the last channel of the coords. */
4752 coord_dst.writemask = WRITEMASK_W;
4753 emit_asm(ir, TGSI_OPCODE_MOV, coord_dst, lod_info);
4754 coord_dst.writemask = WRITEMASK_XYZW;
4755 }
4756
4757 st_src_reg sampler(PROGRAM_SAMPLER, 0, GLSL_TYPE_UINT);
4758
4759 uint16_t index = 0;
4760 get_deref_offsets(ir->sampler, &sampler_array_size, &sampler_base,
4761 &index, &reladdr, !var->contains_bindless());
4762
4763 sampler.index = index;
4764 if (reladdr.file != PROGRAM_UNDEFINED) {
4765 sampler.reladdr = ralloc(mem_ctx, st_src_reg);
4766 *sampler.reladdr = reladdr;
4767 emit_arl(ir, sampler_reladdr, reladdr);
4768 }
4769
4770 st_src_reg bindless;
4771 if (var->contains_bindless()) {
4772 ir->sampler->accept(this);
4773 bindless = this->result;
4774 }
4775
4776 if (opcode == TGSI_OPCODE_TXD)
4777 inst = emit_asm(ir, opcode, result_dst, coord, dx, dy);
4778 else if (opcode == TGSI_OPCODE_TXQ) {
4779 if (ir->op == ir_query_levels) {
4780 /* the level is stored in W */
4781 inst = emit_asm(ir, opcode, st_dst_reg(levels_src), lod_info);
4782 result_dst.writemask = WRITEMASK_X;
4783 levels_src.swizzle = SWIZZLE_WWWW;
4784 emit_asm(ir, TGSI_OPCODE_MOV, result_dst, levels_src);
4785 } else
4786 inst = emit_asm(ir, opcode, result_dst, lod_info);
4787 } else if (opcode == TGSI_OPCODE_TXQS) {
4788 inst = emit_asm(ir, opcode, result_dst);
4789 } else if (opcode == TGSI_OPCODE_TXL2 || opcode == TGSI_OPCODE_TXB2) {
4790 inst = emit_asm(ir, opcode, result_dst, coord, lod_info);
4791 } else if (opcode == TGSI_OPCODE_TEX2) {
4792 inst = emit_asm(ir, opcode, result_dst, coord, cube_sc);
4793 } else if (opcode == TGSI_OPCODE_TG4) {
4794 if (is_cube_array && ir->shadow_comparator) {
4795 inst = emit_asm(ir, opcode, result_dst, coord, cube_sc);
4796 } else {
4797 if (this->tg4_component_in_swizzle) {
4798 inst = emit_asm(ir, opcode, result_dst, coord);
4799 int idx = 0;
4800 foreach_in_list(immediate_storage, entry, &this->immediates) {
4801 if (component.index == idx) {
4802 gl_constant_value value = entry->values[component.swizzle];
4803 inst->gather_component = value.i;
4804 break;
4805 }
4806 idx++;
4807 }
4808 } else {
4809 inst = emit_asm(ir, opcode, result_dst, coord, component);
4810 }
4811 }
4812 } else
4813 inst = emit_asm(ir, opcode, result_dst, coord);
4814
4815 if (ir->shadow_comparator)
4816 inst->tex_shadow = GL_TRUE;
4817
4818 if (var->contains_bindless()) {
4819 inst->resource = bindless;
4820 inst->resource.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y,
4821 SWIZZLE_X, SWIZZLE_Y);
4822 } else {
4823 inst->resource = sampler;
4824 inst->sampler_array_size = sampler_array_size;
4825 inst->sampler_base = sampler_base;
4826 }
4827
4828 if (ir->offset) {
4829 if (!inst->tex_offsets)
4830 inst->tex_offsets = rzalloc_array(inst, st_src_reg,
4831 MAX_GLSL_TEXTURE_OFFSET);
4832
4833 for (i = 0; i < MAX_GLSL_TEXTURE_OFFSET &&
4834 offset[i].file != PROGRAM_UNDEFINED; i++)
4835 inst->tex_offsets[i] = offset[i];
4836 inst->tex_offset_num_offset = i;
4837 }
4838
4839 inst->tex_target = sampler_type->sampler_index();
4840 inst->tex_type = ir->type->base_type;
4841
4842 this->result = result_src;
4843 }
4844
4845 void
visit(ir_return * ir)4846 glsl_to_tgsi_visitor::visit(ir_return *ir)
4847 {
4848 assert(!ir->get_value());
4849
4850 emit_asm(ir, TGSI_OPCODE_RET);
4851 }
4852
4853 void
visit(ir_discard * ir)4854 glsl_to_tgsi_visitor::visit(ir_discard *ir)
4855 {
4856 if (ir->condition) {
4857 ir->condition->accept(this);
4858 st_src_reg condition = this->result;
4859
4860 /* Convert the bool condition to a float so we can negate. */
4861 if (native_integers) {
4862 st_src_reg temp = get_temp(ir->condition->type);
4863 emit_asm(ir, TGSI_OPCODE_AND, st_dst_reg(temp),
4864 condition, st_src_reg_for_float(1.0));
4865 condition = temp;
4866 }
4867
4868 condition.negate = ~condition.negate;
4869 emit_asm(ir, TGSI_OPCODE_KILL_IF, undef_dst, condition);
4870 } else {
4871 /* unconditional kil */
4872 emit_asm(ir, TGSI_OPCODE_KILL);
4873 }
4874 }
4875
4876 void
visit(ir_demote * ir)4877 glsl_to_tgsi_visitor::visit(ir_demote *ir)
4878 {
4879 emit_asm(ir, TGSI_OPCODE_DEMOTE);
4880 }
4881
4882 void
visit(ir_if * ir)4883 glsl_to_tgsi_visitor::visit(ir_if *ir)
4884 {
4885 enum tgsi_opcode if_opcode;
4886 glsl_to_tgsi_instruction *if_inst;
4887
4888 ir->condition->accept(this);
4889 assert(this->result.file != PROGRAM_UNDEFINED);
4890
4891 if_opcode = native_integers ? TGSI_OPCODE_UIF : TGSI_OPCODE_IF;
4892
4893 if_inst = emit_asm(ir->condition, if_opcode, undef_dst, this->result);
4894
4895 this->instructions.push_tail(if_inst);
4896
4897 visit_exec_list(&ir->then_instructions, this);
4898
4899 if (!ir->else_instructions.is_empty()) {
4900 emit_asm(ir->condition, TGSI_OPCODE_ELSE);
4901 visit_exec_list(&ir->else_instructions, this);
4902 }
4903
4904 if_inst = emit_asm(ir->condition, TGSI_OPCODE_ENDIF);
4905 }
4906
4907
4908 void
visit(ir_emit_vertex * ir)4909 glsl_to_tgsi_visitor::visit(ir_emit_vertex *ir)
4910 {
4911 assert(this->prog->Target == GL_GEOMETRY_PROGRAM_NV);
4912
4913 ir->stream->accept(this);
4914 emit_asm(ir, TGSI_OPCODE_EMIT, undef_dst, this->result);
4915 }
4916
4917 void
visit(ir_end_primitive * ir)4918 glsl_to_tgsi_visitor::visit(ir_end_primitive *ir)
4919 {
4920 assert(this->prog->Target == GL_GEOMETRY_PROGRAM_NV);
4921
4922 ir->stream->accept(this);
4923 emit_asm(ir, TGSI_OPCODE_ENDPRIM, undef_dst, this->result);
4924 }
4925
4926 void
visit(ir_barrier * ir)4927 glsl_to_tgsi_visitor::visit(ir_barrier *ir)
4928 {
4929 assert(this->prog->Target == GL_TESS_CONTROL_PROGRAM_NV ||
4930 this->prog->Target == GL_COMPUTE_PROGRAM_NV);
4931
4932 emit_asm(ir, TGSI_OPCODE_BARRIER);
4933 }
4934
glsl_to_tgsi_visitor()4935 glsl_to_tgsi_visitor::glsl_to_tgsi_visitor()
4936 {
4937 STATIC_ASSERT(sizeof(samplers_used) * 8 >= PIPE_MAX_SAMPLERS);
4938
4939 result.file = PROGRAM_UNDEFINED;
4940 next_temp = 1;
4941 array_sizes = NULL;
4942 max_num_arrays = 0;
4943 next_array = 0;
4944 num_inputs = 0;
4945 num_outputs = 0;
4946 num_input_arrays = 0;
4947 num_output_arrays = 0;
4948 num_atomics = 0;
4949 num_atomic_arrays = 0;
4950 num_immediates = 0;
4951 num_address_regs = 0;
4952 samplers_used = 0;
4953 images_used = 0;
4954 indirect_addr_consts = false;
4955 wpos_transform_const = -1;
4956 native_integers = false;
4957 mem_ctx = ralloc_context(NULL);
4958 ctx = NULL;
4959 prog = NULL;
4960 precise = 0;
4961 tg4_component_in_swizzle = false;
4962 shader_program = NULL;
4963 shader = NULL;
4964 options = NULL;
4965 have_sqrt = false;
4966 have_fma = false;
4967 use_shared_memory = false;
4968 has_tex_txf_lz = false;
4969 variables = NULL;
4970 }
4971
var_destroy(struct hash_entry * entry)4972 static void var_destroy(struct hash_entry *entry)
4973 {
4974 variable_storage *storage = (variable_storage *)entry->data;
4975
4976 delete storage;
4977 }
4978
~glsl_to_tgsi_visitor()4979 glsl_to_tgsi_visitor::~glsl_to_tgsi_visitor()
4980 {
4981 _mesa_hash_table_destroy(variables, var_destroy);
4982 free(array_sizes);
4983 ralloc_free(mem_ctx);
4984 }
4985
free_glsl_to_tgsi_visitor(glsl_to_tgsi_visitor * v)4986 extern "C" void free_glsl_to_tgsi_visitor(glsl_to_tgsi_visitor *v)
4987 {
4988 delete v;
4989 }
4990
4991
4992 /**
4993 * Count resources used by the given gpu program (number of texture
4994 * samplers, etc).
4995 */
4996 static void
count_resources(glsl_to_tgsi_visitor * v,gl_program * prog)4997 count_resources(glsl_to_tgsi_visitor *v, gl_program *prog)
4998 {
4999 v->samplers_used = 0;
5000 v->images_used = 0;
5001 BITSET_ZERO(prog->info.textures_used_by_txf);
5002
5003 foreach_in_list(glsl_to_tgsi_instruction, inst, &v->instructions) {
5004 if (inst->info->is_tex) {
5005 for (int i = 0; i < inst->sampler_array_size; i++) {
5006 unsigned idx = inst->sampler_base + i;
5007 v->samplers_used |= 1u << idx;
5008
5009 debug_assert(idx < (int)ARRAY_SIZE(v->sampler_types));
5010 v->sampler_types[idx] = inst->tex_type;
5011 v->sampler_targets[idx] =
5012 st_translate_texture_target(inst->tex_target, inst->tex_shadow);
5013
5014 if (inst->op == TGSI_OPCODE_TXF || inst->op == TGSI_OPCODE_TXF_LZ) {
5015 BITSET_SET(prog->info.textures_used_by_txf, idx);
5016 }
5017 }
5018 }
5019
5020 if (inst->tex_target == TEXTURE_EXTERNAL_INDEX)
5021 prog->ExternalSamplersUsed |= 1 << inst->resource.index;
5022
5023 if (inst->resource.file != PROGRAM_UNDEFINED && (
5024 is_resource_instruction(inst->op) ||
5025 inst->op == TGSI_OPCODE_STORE)) {
5026 if (inst->resource.file == PROGRAM_MEMORY) {
5027 v->use_shared_memory = true;
5028 } else if (inst->resource.file == PROGRAM_IMAGE) {
5029 for (int i = 0; i < inst->sampler_array_size; i++) {
5030 unsigned idx = inst->sampler_base + i;
5031 v->images_used |= 1 << idx;
5032 v->image_targets[idx] =
5033 st_translate_texture_target(inst->tex_target, false);
5034 v->image_formats[idx] = inst->image_format;
5035 v->image_wr[idx] = !inst->read_only;
5036 }
5037 }
5038 }
5039 }
5040 prog->SamplersUsed = v->samplers_used;
5041
5042 if (v->shader_program != NULL)
5043 _mesa_update_shader_textures_used(v->shader_program, prog);
5044 }
5045
5046 /**
5047 * Returns the mask of channels (bitmask of WRITEMASK_X,Y,Z,W) which
5048 * are read from the given src in this instruction
5049 */
5050 static int
get_src_arg_mask(st_dst_reg dst,st_src_reg src)5051 get_src_arg_mask(st_dst_reg dst, st_src_reg src)
5052 {
5053 int read_mask = 0, comp;
5054
5055 /* Now, given the src swizzle and the written channels, find which
5056 * components are actually read
5057 */
5058 for (comp = 0; comp < 4; ++comp) {
5059 const unsigned coord = GET_SWZ(src.swizzle, comp);
5060 assert(coord < 4);
5061 if (dst.writemask & (1 << comp) && coord <= SWIZZLE_W)
5062 read_mask |= 1 << coord;
5063 }
5064
5065 return read_mask;
5066 }
5067
5068 /**
5069 * This pass replaces CMP T0, T1 T2 T0 with MOV T0, T2 when the CMP
5070 * instruction is the first instruction to write to register T0. There are
5071 * several lowering passes done in GLSL IR (e.g. branches and
5072 * relative addressing) that create a large number of conditional assignments
5073 * that glsl_to_tgsi converts to CMP instructions like the one mentioned above.
5074 *
5075 * Here is why this conversion is safe:
5076 * CMP T0, T1 T2 T0 can be expanded to:
5077 * if (T1 < 0.0)
5078 * MOV T0, T2;
5079 * else
5080 * MOV T0, T0;
5081 *
5082 * If (T1 < 0.0) evaluates to true then our replacement MOV T0, T2 is the same
5083 * as the original program. If (T1 < 0.0) evaluates to false, executing
5084 * MOV T0, T0 will store a garbage value in T0 since T0 is uninitialized.
5085 * Therefore, it doesn't matter that we are replacing MOV T0, T0 with MOV T0, T2
5086 * because any instruction that was going to read from T0 after this was going
5087 * to read a garbage value anyway.
5088 */
5089 void
simplify_cmp(void)5090 glsl_to_tgsi_visitor::simplify_cmp(void)
5091 {
5092 int tempWritesSize = 0;
5093 unsigned *tempWrites = NULL;
5094 unsigned outputWrites[VARYING_SLOT_TESS_MAX];
5095
5096 memset(outputWrites, 0, sizeof(outputWrites));
5097
5098 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
5099 unsigned prevWriteMask = 0;
5100
5101 /* Give up if we encounter relative addressing or flow control. */
5102 if (inst->dst[0].reladdr || inst->dst[0].reladdr2 ||
5103 inst->dst[1].reladdr || inst->dst[1].reladdr2 ||
5104 inst->info->is_branch ||
5105 inst->op == TGSI_OPCODE_CONT ||
5106 inst->op == TGSI_OPCODE_END ||
5107 inst->op == TGSI_OPCODE_RET) {
5108 break;
5109 }
5110
5111 if (inst->dst[0].file == PROGRAM_OUTPUT) {
5112 assert(inst->dst[0].index < (signed)ARRAY_SIZE(outputWrites));
5113 prevWriteMask = outputWrites[inst->dst[0].index];
5114 outputWrites[inst->dst[0].index] |= inst->dst[0].writemask;
5115 } else if (inst->dst[0].file == PROGRAM_TEMPORARY) {
5116 if (inst->dst[0].index >= tempWritesSize) {
5117 const int inc = 4096;
5118
5119 tempWrites = (unsigned*)
5120 realloc(tempWrites,
5121 (tempWritesSize + inc) * sizeof(unsigned));
5122 if (!tempWrites)
5123 return;
5124
5125 memset(tempWrites + tempWritesSize, 0, inc * sizeof(unsigned));
5126 tempWritesSize += inc;
5127 }
5128
5129 prevWriteMask = tempWrites[inst->dst[0].index];
5130 tempWrites[inst->dst[0].index] |= inst->dst[0].writemask;
5131 } else
5132 continue;
5133
5134 /* For a CMP to be considered a conditional write, the destination
5135 * register and source register two must be the same. */
5136 if (inst->op == TGSI_OPCODE_CMP
5137 && !(inst->dst[0].writemask & prevWriteMask)
5138 && inst->src[2].file == inst->dst[0].file
5139 && inst->src[2].index == inst->dst[0].index
5140 && inst->dst[0].writemask ==
5141 get_src_arg_mask(inst->dst[0], inst->src[2])) {
5142
5143 inst->op = TGSI_OPCODE_MOV;
5144 inst->info = tgsi_get_opcode_info(inst->op);
5145 inst->src[0] = inst->src[1];
5146 }
5147 }
5148
5149 free(tempWrites);
5150 }
5151
5152 static void
rename_temp_handle_src(struct rename_reg_pair * renames,st_src_reg * src)5153 rename_temp_handle_src(struct rename_reg_pair *renames, st_src_reg *src)
5154 {
5155 if (src && src->file == PROGRAM_TEMPORARY) {
5156 int old_idx = src->index;
5157 if (renames[old_idx].valid)
5158 src->index = renames[old_idx].new_reg;
5159 }
5160 }
5161
5162 /* Replaces all references to a temporary register index with another index. */
5163 void
rename_temp_registers(struct rename_reg_pair * renames)5164 glsl_to_tgsi_visitor::rename_temp_registers(struct rename_reg_pair *renames)
5165 {
5166 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
5167 unsigned j;
5168 for (j = 0; j < num_inst_src_regs(inst); j++) {
5169 rename_temp_handle_src(renames, &inst->src[j]);
5170 rename_temp_handle_src(renames, inst->src[j].reladdr);
5171 rename_temp_handle_src(renames, inst->src[j].reladdr2);
5172 }
5173
5174 for (j = 0; j < inst->tex_offset_num_offset; j++) {
5175 rename_temp_handle_src(renames, &inst->tex_offsets[j]);
5176 rename_temp_handle_src(renames, inst->tex_offsets[j].reladdr);
5177 rename_temp_handle_src(renames, inst->tex_offsets[j].reladdr2);
5178 }
5179
5180 rename_temp_handle_src(renames, &inst->resource);
5181 rename_temp_handle_src(renames, inst->resource.reladdr);
5182 rename_temp_handle_src(renames, inst->resource.reladdr2);
5183
5184 for (j = 0; j < num_inst_dst_regs(inst); j++) {
5185 if (inst->dst[j].file == PROGRAM_TEMPORARY) {
5186 int old_idx = inst->dst[j].index;
5187 if (renames[old_idx].valid)
5188 inst->dst[j].index = renames[old_idx].new_reg;
5189 }
5190 rename_temp_handle_src(renames, inst->dst[j].reladdr);
5191 rename_temp_handle_src(renames, inst->dst[j].reladdr2);
5192 }
5193 }
5194 }
5195
5196 void
get_first_temp_write(int * first_writes)5197 glsl_to_tgsi_visitor::get_first_temp_write(int *first_writes)
5198 {
5199 int depth = 0; /* loop depth */
5200 int loop_start = -1; /* index of the first active BGNLOOP (if any) */
5201 unsigned i = 0, j;
5202
5203 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
5204 for (j = 0; j < num_inst_dst_regs(inst); j++) {
5205 if (inst->dst[j].file == PROGRAM_TEMPORARY) {
5206 if (first_writes[inst->dst[j].index] == -1)
5207 first_writes[inst->dst[j].index] = (depth == 0) ? i : loop_start;
5208 }
5209 }
5210
5211 if (inst->op == TGSI_OPCODE_BGNLOOP) {
5212 if (depth++ == 0)
5213 loop_start = i;
5214 } else if (inst->op == TGSI_OPCODE_ENDLOOP) {
5215 if (--depth == 0)
5216 loop_start = -1;
5217 }
5218 assert(depth >= 0);
5219 i++;
5220 }
5221 }
5222
5223 void
get_first_temp_read(int * first_reads)5224 glsl_to_tgsi_visitor::get_first_temp_read(int *first_reads)
5225 {
5226 int depth = 0; /* loop depth */
5227 int loop_start = -1; /* index of the first active BGNLOOP (if any) */
5228 unsigned i = 0, j;
5229
5230 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
5231 for (j = 0; j < num_inst_src_regs(inst); j++) {
5232 if (inst->src[j].file == PROGRAM_TEMPORARY) {
5233 if (first_reads[inst->src[j].index] == -1)
5234 first_reads[inst->src[j].index] = (depth == 0) ? i : loop_start;
5235 }
5236 }
5237 for (j = 0; j < inst->tex_offset_num_offset; j++) {
5238 if (inst->tex_offsets[j].file == PROGRAM_TEMPORARY) {
5239 if (first_reads[inst->tex_offsets[j].index] == -1)
5240 first_reads[inst->tex_offsets[j].index] = (depth == 0) ? i : loop_start;
5241 }
5242 }
5243 if (inst->op == TGSI_OPCODE_BGNLOOP) {
5244 if (depth++ == 0)
5245 loop_start = i;
5246 } else if (inst->op == TGSI_OPCODE_ENDLOOP) {
5247 if (--depth == 0)
5248 loop_start = -1;
5249 }
5250 assert(depth >= 0);
5251 i++;
5252 }
5253 }
5254
5255 void
get_last_temp_read_first_temp_write(int * last_reads,int * first_writes)5256 glsl_to_tgsi_visitor::get_last_temp_read_first_temp_write(int *last_reads, int *first_writes)
5257 {
5258 int depth = 0; /* loop depth */
5259 int loop_start = -1; /* index of the first active BGNLOOP (if any) */
5260 unsigned i = 0, j;
5261 int k;
5262 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
5263 for (j = 0; j < num_inst_src_regs(inst); j++) {
5264 if (inst->src[j].file == PROGRAM_TEMPORARY)
5265 last_reads[inst->src[j].index] = (depth == 0) ? i : -2;
5266 }
5267 for (j = 0; j < num_inst_dst_regs(inst); j++) {
5268 if (inst->dst[j].file == PROGRAM_TEMPORARY) {
5269 if (first_writes[inst->dst[j].index] == -1)
5270 first_writes[inst->dst[j].index] = (depth == 0) ? i : loop_start;
5271 last_reads[inst->dst[j].index] = (depth == 0) ? i : -2;
5272 }
5273 }
5274 for (j = 0; j < inst->tex_offset_num_offset; j++) {
5275 if (inst->tex_offsets[j].file == PROGRAM_TEMPORARY)
5276 last_reads[inst->tex_offsets[j].index] = (depth == 0) ? i : -2;
5277 }
5278 if (inst->op == TGSI_OPCODE_BGNLOOP) {
5279 if (depth++ == 0)
5280 loop_start = i;
5281 } else if (inst->op == TGSI_OPCODE_ENDLOOP) {
5282 if (--depth == 0) {
5283 loop_start = -1;
5284 for (k = 0; k < this->next_temp; k++) {
5285 if (last_reads[k] == -2) {
5286 last_reads[k] = i;
5287 }
5288 }
5289 }
5290 }
5291 assert(depth >= 0);
5292 i++;
5293 }
5294 }
5295
5296 void
get_last_temp_write(int * last_writes)5297 glsl_to_tgsi_visitor::get_last_temp_write(int *last_writes)
5298 {
5299 int depth = 0; /* loop depth */
5300 int i = 0, k;
5301 unsigned j;
5302
5303 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
5304 for (j = 0; j < num_inst_dst_regs(inst); j++) {
5305 if (inst->dst[j].file == PROGRAM_TEMPORARY)
5306 last_writes[inst->dst[j].index] = (depth == 0) ? i : -2;
5307 }
5308
5309 if (inst->op == TGSI_OPCODE_BGNLOOP)
5310 depth++;
5311 else if (inst->op == TGSI_OPCODE_ENDLOOP)
5312 if (--depth == 0) {
5313 for (k = 0; k < this->next_temp; k++) {
5314 if (last_writes[k] == -2) {
5315 last_writes[k] = i;
5316 }
5317 }
5318 }
5319 assert(depth >= 0);
5320 i++;
5321 }
5322 }
5323
5324 /*
5325 * On a basic block basis, tracks available PROGRAM_TEMPORARY register
5326 * channels for copy propagation and updates following instructions to
5327 * use the original versions.
5328 *
5329 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass
5330 * will occur. As an example, a TXP production before this pass:
5331 *
5332 * 0: MOV TEMP[1], INPUT[4].xyyy;
5333 * 1: MOV TEMP[1].w, INPUT[4].wwww;
5334 * 2: TXP TEMP[2], TEMP[1], texture[0], 2D;
5335 *
5336 * and after:
5337 *
5338 * 0: MOV TEMP[1], INPUT[4].xyyy;
5339 * 1: MOV TEMP[1].w, INPUT[4].wwww;
5340 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
5341 *
5342 * which allows for dead code elimination on TEMP[1]'s writes.
5343 */
5344 void
copy_propagate(void)5345 glsl_to_tgsi_visitor::copy_propagate(void)
5346 {
5347 glsl_to_tgsi_instruction **acp = rzalloc_array(mem_ctx,
5348 glsl_to_tgsi_instruction *,
5349 this->next_temp * 4);
5350 int *acp_level = rzalloc_array(mem_ctx, int, this->next_temp * 4);
5351 int level = 0;
5352
5353 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
5354 assert(inst->dst[0].file != PROGRAM_TEMPORARY
5355 || inst->dst[0].index < this->next_temp);
5356
5357 /* First, do any copy propagation possible into the src regs. */
5358 for (int r = 0; r < 3; r++) {
5359 glsl_to_tgsi_instruction *first = NULL;
5360 bool good = true;
5361 int acp_base = inst->src[r].index * 4;
5362
5363 if (inst->src[r].file != PROGRAM_TEMPORARY ||
5364 inst->src[r].reladdr ||
5365 inst->src[r].reladdr2)
5366 continue;
5367
5368 /* See if we can find entries in the ACP consisting of MOVs
5369 * from the same src register for all the swizzled channels
5370 * of this src register reference.
5371 */
5372 for (int i = 0; i < 4; i++) {
5373 int src_chan = GET_SWZ(inst->src[r].swizzle, i);
5374 glsl_to_tgsi_instruction *copy_chan = acp[acp_base + src_chan];
5375
5376 if (!copy_chan) {
5377 good = false;
5378 break;
5379 }
5380
5381 assert(acp_level[acp_base + src_chan] <= level);
5382
5383 if (!first) {
5384 first = copy_chan;
5385 } else {
5386 if (first->src[0].file != copy_chan->src[0].file ||
5387 first->src[0].index != copy_chan->src[0].index ||
5388 first->src[0].double_reg2 != copy_chan->src[0].double_reg2 ||
5389 first->src[0].index2D != copy_chan->src[0].index2D) {
5390 good = false;
5391 break;
5392 }
5393 }
5394 }
5395
5396 if (good) {
5397 /* We've now validated that we can copy-propagate to
5398 * replace this src register reference. Do it.
5399 */
5400 inst->src[r].file = first->src[0].file;
5401 inst->src[r].index = first->src[0].index;
5402 inst->src[r].index2D = first->src[0].index2D;
5403 inst->src[r].has_index2 = first->src[0].has_index2;
5404 inst->src[r].double_reg2 = first->src[0].double_reg2;
5405 inst->src[r].array_id = first->src[0].array_id;
5406
5407 int swizzle = 0;
5408 for (int i = 0; i < 4; i++) {
5409 int src_chan = GET_SWZ(inst->src[r].swizzle, i);
5410 glsl_to_tgsi_instruction *copy_inst = acp[acp_base + src_chan];
5411 swizzle |= (GET_SWZ(copy_inst->src[0].swizzle, src_chan) << (3 * i));
5412 }
5413 inst->src[r].swizzle = swizzle;
5414 }
5415 }
5416
5417 switch (inst->op) {
5418 case TGSI_OPCODE_BGNLOOP:
5419 case TGSI_OPCODE_ENDLOOP:
5420 /* End of a basic block, clear the ACP entirely. */
5421 memset(acp, 0, sizeof(*acp) * this->next_temp * 4);
5422 break;
5423
5424 case TGSI_OPCODE_IF:
5425 case TGSI_OPCODE_UIF:
5426 ++level;
5427 break;
5428
5429 case TGSI_OPCODE_ENDIF:
5430 case TGSI_OPCODE_ELSE:
5431 /* Clear all channels written inside the block from the ACP, but
5432 * leaving those that were not touched.
5433 */
5434 for (int r = 0; r < this->next_temp; r++) {
5435 for (int c = 0; c < 4; c++) {
5436 if (!acp[4 * r + c])
5437 continue;
5438
5439 if (acp_level[4 * r + c] >= level)
5440 acp[4 * r + c] = NULL;
5441 }
5442 }
5443 if (inst->op == TGSI_OPCODE_ENDIF)
5444 --level;
5445 break;
5446
5447 default:
5448 /* Continuing the block, clear any written channels from
5449 * the ACP.
5450 */
5451 for (int d = 0; d < 2; d++) {
5452 if (inst->dst[d].file == PROGRAM_TEMPORARY && inst->dst[d].reladdr) {
5453 /* Any temporary might be written, so no copy propagation
5454 * across this instruction.
5455 */
5456 memset(acp, 0, sizeof(*acp) * this->next_temp * 4);
5457 } else if (inst->dst[d].file == PROGRAM_OUTPUT &&
5458 inst->dst[d].reladdr) {
5459 /* Any output might be written, so no copy propagation
5460 * from outputs across this instruction.
5461 */
5462 for (int r = 0; r < this->next_temp; r++) {
5463 for (int c = 0; c < 4; c++) {
5464 if (!acp[4 * r + c])
5465 continue;
5466
5467 if (acp[4 * r + c]->src[0].file == PROGRAM_OUTPUT)
5468 acp[4 * r + c] = NULL;
5469 }
5470 }
5471 } else if (inst->dst[d].file == PROGRAM_TEMPORARY ||
5472 inst->dst[d].file == PROGRAM_OUTPUT) {
5473 /* Clear where it's used as dst. */
5474 if (inst->dst[d].file == PROGRAM_TEMPORARY) {
5475 for (int c = 0; c < 4; c++) {
5476 if (inst->dst[d].writemask & (1 << c))
5477 acp[4 * inst->dst[d].index + c] = NULL;
5478 }
5479 }
5480
5481 /* Clear where it's used as src. */
5482 for (int r = 0; r < this->next_temp; r++) {
5483 for (int c = 0; c < 4; c++) {
5484 if (!acp[4 * r + c])
5485 continue;
5486
5487 int src_chan = GET_SWZ(acp[4 * r + c]->src[0].swizzle, c);
5488
5489 if (acp[4 * r + c]->src[0].file == inst->dst[d].file &&
5490 acp[4 * r + c]->src[0].index == inst->dst[d].index &&
5491 inst->dst[d].writemask & (1 << src_chan)) {
5492 acp[4 * r + c] = NULL;
5493 }
5494 }
5495 }
5496 }
5497 }
5498 break;
5499 }
5500
5501 /* If this is a copy, add it to the ACP. */
5502 if (inst->op == TGSI_OPCODE_MOV &&
5503 inst->dst[0].file == PROGRAM_TEMPORARY &&
5504 !(inst->dst[0].file == inst->src[0].file &&
5505 inst->dst[0].index == inst->src[0].index) &&
5506 !inst->dst[0].reladdr &&
5507 !inst->dst[0].reladdr2 &&
5508 !inst->saturate &&
5509 inst->src[0].file != PROGRAM_ARRAY &&
5510 (inst->src[0].file != PROGRAM_OUTPUT ||
5511 this->shader->Stage != MESA_SHADER_TESS_CTRL) &&
5512 !inst->src[0].reladdr &&
5513 !inst->src[0].reladdr2 &&
5514 !inst->src[0].negate &&
5515 !inst->src[0].abs) {
5516 for (int i = 0; i < 4; i++) {
5517 if (inst->dst[0].writemask & (1 << i)) {
5518 acp[4 * inst->dst[0].index + i] = inst;
5519 acp_level[4 * inst->dst[0].index + i] = level;
5520 }
5521 }
5522 }
5523 }
5524
5525 ralloc_free(acp_level);
5526 ralloc_free(acp);
5527 }
5528
5529 static void
dead_code_handle_reladdr(glsl_to_tgsi_instruction ** writes,st_src_reg * reladdr)5530 dead_code_handle_reladdr(glsl_to_tgsi_instruction **writes, st_src_reg *reladdr)
5531 {
5532 if (reladdr && reladdr->file == PROGRAM_TEMPORARY) {
5533 /* Clear where it's used as src. */
5534 int swz = GET_SWZ(reladdr->swizzle, 0);
5535 writes[4 * reladdr->index + swz] = NULL;
5536 }
5537 }
5538
5539 /*
5540 * On a basic block basis, tracks available PROGRAM_TEMPORARY registers for dead
5541 * code elimination.
5542 *
5543 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass
5544 * will occur. As an example, a TXP production after copy propagation but
5545 * before this pass:
5546 *
5547 * 0: MOV TEMP[1], INPUT[4].xyyy;
5548 * 1: MOV TEMP[1].w, INPUT[4].wwww;
5549 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
5550 *
5551 * and after this pass:
5552 *
5553 * 0: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D;
5554 */
5555 int
eliminate_dead_code(void)5556 glsl_to_tgsi_visitor::eliminate_dead_code(void)
5557 {
5558 glsl_to_tgsi_instruction **writes = rzalloc_array(mem_ctx,
5559 glsl_to_tgsi_instruction *,
5560 this->next_temp * 4);
5561 int *write_level = rzalloc_array(mem_ctx, int, this->next_temp * 4);
5562 int level = 0;
5563 int removed = 0;
5564
5565 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
5566 assert(inst->dst[0].file != PROGRAM_TEMPORARY
5567 || inst->dst[0].index < this->next_temp);
5568
5569 switch (inst->op) {
5570 case TGSI_OPCODE_BGNLOOP:
5571 case TGSI_OPCODE_ENDLOOP:
5572 case TGSI_OPCODE_CONT:
5573 case TGSI_OPCODE_BRK:
5574 /* End of a basic block, clear the write array entirely.
5575 *
5576 * This keeps us from killing dead code when the writes are
5577 * on either side of a loop, even when the register isn't touched
5578 * inside the loop. However, glsl_to_tgsi_visitor doesn't seem to emit
5579 * dead code of this type, so it shouldn't make a difference as long as
5580 * the dead code elimination pass in the GLSL compiler does its job.
5581 */
5582 memset(writes, 0, sizeof(*writes) * this->next_temp * 4);
5583 break;
5584
5585 case TGSI_OPCODE_ENDIF:
5586 case TGSI_OPCODE_ELSE:
5587 /* Promote the recorded level of all channels written inside the
5588 * preceding if or else block to the level above the if/else block.
5589 */
5590 for (int r = 0; r < this->next_temp; r++) {
5591 for (int c = 0; c < 4; c++) {
5592 if (!writes[4 * r + c])
5593 continue;
5594
5595 if (write_level[4 * r + c] == level)
5596 write_level[4 * r + c] = level-1;
5597 }
5598 }
5599 if (inst->op == TGSI_OPCODE_ENDIF)
5600 --level;
5601 break;
5602
5603 case TGSI_OPCODE_IF:
5604 case TGSI_OPCODE_UIF:
5605 ++level;
5606 FALLTHROUGH; /* to mark the condition as read */
5607 default:
5608 /* Continuing the block, clear any channels from the write array that
5609 * are read by this instruction.
5610 */
5611 for (unsigned i = 0; i < ARRAY_SIZE(inst->src); i++) {
5612 if (inst->src[i].file == PROGRAM_TEMPORARY && inst->src[i].reladdr){
5613 /* Any temporary might be read, so no dead code elimination
5614 * across this instruction.
5615 */
5616 memset(writes, 0, sizeof(*writes) * this->next_temp * 4);
5617 } else if (inst->src[i].file == PROGRAM_TEMPORARY) {
5618 /* Clear where it's used as src. */
5619 int src_chans = 1 << GET_SWZ(inst->src[i].swizzle, 0);
5620 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 1);
5621 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 2);
5622 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 3);
5623
5624 for (int c = 0; c < 4; c++) {
5625 if (src_chans & (1 << c))
5626 writes[4 * inst->src[i].index + c] = NULL;
5627 }
5628 }
5629 dead_code_handle_reladdr(writes, inst->src[i].reladdr);
5630 dead_code_handle_reladdr(writes, inst->src[i].reladdr2);
5631 }
5632 for (unsigned i = 0; i < inst->tex_offset_num_offset; i++) {
5633 if (inst->tex_offsets[i].file == PROGRAM_TEMPORARY && inst->tex_offsets[i].reladdr){
5634 /* Any temporary might be read, so no dead code elimination
5635 * across this instruction.
5636 */
5637 memset(writes, 0, sizeof(*writes) * this->next_temp * 4);
5638 } else if (inst->tex_offsets[i].file == PROGRAM_TEMPORARY) {
5639 /* Clear where it's used as src. */
5640 int src_chans = 1 << GET_SWZ(inst->tex_offsets[i].swizzle, 0);
5641 src_chans |= 1 << GET_SWZ(inst->tex_offsets[i].swizzle, 1);
5642 src_chans |= 1 << GET_SWZ(inst->tex_offsets[i].swizzle, 2);
5643 src_chans |= 1 << GET_SWZ(inst->tex_offsets[i].swizzle, 3);
5644
5645 for (int c = 0; c < 4; c++) {
5646 if (src_chans & (1 << c))
5647 writes[4 * inst->tex_offsets[i].index + c] = NULL;
5648 }
5649 }
5650 dead_code_handle_reladdr(writes, inst->tex_offsets[i].reladdr);
5651 dead_code_handle_reladdr(writes, inst->tex_offsets[i].reladdr2);
5652 }
5653
5654 if (inst->resource.file == PROGRAM_TEMPORARY) {
5655 int src_chans;
5656
5657 src_chans = 1 << GET_SWZ(inst->resource.swizzle, 0);
5658 src_chans |= 1 << GET_SWZ(inst->resource.swizzle, 1);
5659 src_chans |= 1 << GET_SWZ(inst->resource.swizzle, 2);
5660 src_chans |= 1 << GET_SWZ(inst->resource.swizzle, 3);
5661
5662 for (int c = 0; c < 4; c++) {
5663 if (src_chans & (1 << c))
5664 writes[4 * inst->resource.index + c] = NULL;
5665 }
5666 }
5667 dead_code_handle_reladdr(writes, inst->resource.reladdr);
5668 dead_code_handle_reladdr(writes, inst->resource.reladdr2);
5669
5670 for (unsigned i = 0; i < ARRAY_SIZE(inst->dst); i++) {
5671 dead_code_handle_reladdr(writes, inst->dst[i].reladdr);
5672 dead_code_handle_reladdr(writes, inst->dst[i].reladdr2);
5673 }
5674 break;
5675 }
5676
5677 /* If this instruction writes to a temporary, add it to the write array.
5678 * If there is already an instruction in the write array for one or more
5679 * of the channels, flag that channel write as dead.
5680 */
5681 for (unsigned i = 0; i < ARRAY_SIZE(inst->dst); i++) {
5682 if (inst->dst[i].file == PROGRAM_TEMPORARY &&
5683 !inst->dst[i].reladdr) {
5684 for (int c = 0; c < 4; c++) {
5685 if (inst->dst[i].writemask & (1 << c)) {
5686 if (writes[4 * inst->dst[i].index + c]) {
5687 if (write_level[4 * inst->dst[i].index + c] < level)
5688 continue;
5689 else
5690 writes[4 * inst->dst[i].index + c]->dead_mask |= (1 << c);
5691 }
5692 writes[4 * inst->dst[i].index + c] = inst;
5693 write_level[4 * inst->dst[i].index + c] = level;
5694 }
5695 }
5696 }
5697 }
5698 }
5699
5700 /* Anything still in the write array at this point is dead code. */
5701 for (int r = 0; r < this->next_temp; r++) {
5702 for (int c = 0; c < 4; c++) {
5703 glsl_to_tgsi_instruction *inst = writes[4 * r + c];
5704 if (inst)
5705 inst->dead_mask |= (1 << c);
5706 }
5707 }
5708
5709 /* Now actually remove the instructions that are completely dead and update
5710 * the writemask of other instructions with dead channels.
5711 */
5712 foreach_in_list_safe(glsl_to_tgsi_instruction, inst, &this->instructions) {
5713 if (!inst->dead_mask || !inst->dst[0].writemask)
5714 continue;
5715 /* No amount of dead masks should remove memory stores */
5716 if (inst->info->is_store)
5717 continue;
5718
5719 if ((inst->dst[0].writemask & ~inst->dead_mask) == 0) {
5720 inst->remove();
5721 delete inst;
5722 removed++;
5723 } else {
5724 if (glsl_base_type_is_64bit(inst->dst[0].type)) {
5725 if (inst->dead_mask == WRITEMASK_XY ||
5726 inst->dead_mask == WRITEMASK_ZW)
5727 inst->dst[0].writemask &= ~(inst->dead_mask);
5728 } else
5729 inst->dst[0].writemask &= ~(inst->dead_mask);
5730 }
5731 }
5732
5733 ralloc_free(write_level);
5734 ralloc_free(writes);
5735
5736 return removed;
5737 }
5738
5739 /* merge DFRACEXP instructions into one. */
5740 void
merge_two_dsts(void)5741 glsl_to_tgsi_visitor::merge_two_dsts(void)
5742 {
5743 /* We never delete inst, but we may delete its successor. */
5744 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
5745 glsl_to_tgsi_instruction *inst2;
5746 unsigned defined;
5747
5748 if (num_inst_dst_regs(inst) != 2)
5749 continue;
5750
5751 if (inst->dst[0].file != PROGRAM_UNDEFINED &&
5752 inst->dst[1].file != PROGRAM_UNDEFINED)
5753 continue;
5754
5755 assert(inst->dst[0].file != PROGRAM_UNDEFINED ||
5756 inst->dst[1].file != PROGRAM_UNDEFINED);
5757
5758 if (inst->dst[0].file == PROGRAM_UNDEFINED)
5759 defined = 1;
5760 else
5761 defined = 0;
5762
5763 inst2 = (glsl_to_tgsi_instruction *) inst->next;
5764 while (!inst2->is_tail_sentinel()) {
5765 if (inst->op == inst2->op &&
5766 inst2->dst[defined].file == PROGRAM_UNDEFINED &&
5767 inst->src[0].file == inst2->src[0].file &&
5768 inst->src[0].index == inst2->src[0].index &&
5769 inst->src[0].type == inst2->src[0].type &&
5770 inst->src[0].swizzle == inst2->src[0].swizzle)
5771 break;
5772 inst2 = (glsl_to_tgsi_instruction *) inst2->next;
5773 }
5774
5775 if (inst2->is_tail_sentinel()) {
5776 /* Undefined destinations are not allowed, substitute with an unused
5777 * temporary register.
5778 */
5779 st_src_reg tmp = get_temp(glsl_type::vec4_type);
5780 inst->dst[defined ^ 1] = st_dst_reg(tmp);
5781 inst->dst[defined ^ 1].writemask = 0;
5782 continue;
5783 }
5784
5785 inst->dst[defined ^ 1] = inst2->dst[defined ^ 1];
5786 inst2->remove();
5787 delete inst2;
5788 }
5789 }
5790
5791 template <typename st_reg>
test_indirect_access(const st_reg & reg,bool * has_indirect_access)5792 void test_indirect_access(const st_reg& reg, bool *has_indirect_access)
5793 {
5794 if (reg.file == PROGRAM_ARRAY) {
5795 if (reg.reladdr || reg.reladdr2 || reg.has_index2) {
5796 has_indirect_access[reg.array_id] = true;
5797 if (reg.reladdr)
5798 test_indirect_access(*reg.reladdr, has_indirect_access);
5799 if (reg.reladdr2)
5800 test_indirect_access(*reg.reladdr2, has_indirect_access);
5801 }
5802 }
5803 }
5804
5805 template <typename st_reg>
remap_array(st_reg & reg,const int * array_remap_info,const bool * has_indirect_access)5806 void remap_array(st_reg& reg, const int *array_remap_info,
5807 const bool *has_indirect_access)
5808 {
5809 if (reg.file == PROGRAM_ARRAY) {
5810 if (!has_indirect_access[reg.array_id]) {
5811 reg.file = PROGRAM_TEMPORARY;
5812 reg.index = reg.index + array_remap_info[reg.array_id];
5813 reg.array_id = 0;
5814 } else {
5815 reg.array_id = array_remap_info[reg.array_id];
5816 }
5817
5818 if (reg.reladdr)
5819 remap_array(*reg.reladdr, array_remap_info, has_indirect_access);
5820
5821 if (reg.reladdr2)
5822 remap_array(*reg.reladdr2, array_remap_info, has_indirect_access);
5823 }
5824 }
5825
5826 /* One-dimensional arrays whose elements are only accessed directly are
5827 * replaced by an according set of temporary registers that then can become
5828 * subject to further optimization steps like copy propagation and
5829 * register merging.
5830 */
5831 void
split_arrays(void)5832 glsl_to_tgsi_visitor::split_arrays(void)
5833 {
5834 if (!next_array)
5835 return;
5836
5837 bool *has_indirect_access = rzalloc_array(mem_ctx, bool, next_array + 1);
5838
5839 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
5840 for (unsigned j = 0; j < num_inst_src_regs(inst); j++)
5841 test_indirect_access(inst->src[j], has_indirect_access);
5842
5843 for (unsigned j = 0; j < inst->tex_offset_num_offset; j++)
5844 test_indirect_access(inst->tex_offsets[j], has_indirect_access);
5845
5846 for (unsigned j = 0; j < num_inst_dst_regs(inst); j++)
5847 test_indirect_access(inst->dst[j], has_indirect_access);
5848
5849 test_indirect_access(inst->resource, has_indirect_access);
5850 }
5851
5852 unsigned array_offset = 0;
5853 unsigned n_remaining_arrays = 0;
5854
5855 /* Double use: For arrays that get split this value will contain
5856 * the base index of the temporary registers this array is replaced
5857 * with. For arrays that remain it contains the new array ID.
5858 */
5859 int *array_remap_info = rzalloc_array(has_indirect_access, int,
5860 next_array + 1);
5861
5862 for (unsigned i = 1; i <= next_array; ++i) {
5863 if (!has_indirect_access[i]) {
5864 array_remap_info[i] = this->next_temp + array_offset;
5865 array_offset += array_sizes[i - 1];
5866 } else {
5867 array_sizes[n_remaining_arrays] = array_sizes[i-1];
5868 array_remap_info[i] = ++n_remaining_arrays;
5869 }
5870 }
5871
5872 if (next_array != n_remaining_arrays) {
5873 foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
5874 for (unsigned j = 0; j < num_inst_src_regs(inst); j++)
5875 remap_array(inst->src[j], array_remap_info, has_indirect_access);
5876
5877 for (unsigned j = 0; j < inst->tex_offset_num_offset; j++)
5878 remap_array(inst->tex_offsets[j], array_remap_info, has_indirect_access);
5879
5880 for (unsigned j = 0; j < num_inst_dst_regs(inst); j++) {
5881 remap_array(inst->dst[j], array_remap_info, has_indirect_access);
5882 }
5883 remap_array(inst->resource, array_remap_info, has_indirect_access);
5884 }
5885 }
5886
5887 ralloc_free(has_indirect_access);
5888 this->next_temp += array_offset;
5889 next_array = n_remaining_arrays;
5890 }
5891
5892 /* Merges temporary registers together where possible to reduce the number of
5893 * registers needed to run a program.
5894 *
5895 * Produces optimal code only after copy propagation and dead code elimination
5896 * have been run. */
5897 void
merge_registers(void)5898 glsl_to_tgsi_visitor::merge_registers(void)
5899 {
5900 class array_live_range *arr_live_ranges = NULL;
5901
5902 struct register_live_range *reg_live_ranges =
5903 rzalloc_array(mem_ctx, struct register_live_range, this->next_temp);
5904
5905 if (this->next_array > 0) {
5906 arr_live_ranges = new array_live_range[this->next_array];
5907 for (unsigned i = 0; i < this->next_array; ++i)
5908 arr_live_ranges[i] = array_live_range(i+1, this->array_sizes[i]);
5909 }
5910
5911
5912 if (get_temp_registers_required_live_ranges(reg_live_ranges, &this->instructions,
5913 this->next_temp, reg_live_ranges,
5914 this->next_array, arr_live_ranges)) {
5915 struct rename_reg_pair *renames =
5916 rzalloc_array(reg_live_ranges, struct rename_reg_pair, this->next_temp);
5917 get_temp_registers_remapping(reg_live_ranges, this->next_temp,
5918 reg_live_ranges, renames);
5919 rename_temp_registers(renames);
5920
5921 this->next_array = merge_arrays(this->next_array, this->array_sizes,
5922 &this->instructions, arr_live_ranges);
5923 }
5924
5925 if (arr_live_ranges)
5926 delete[] arr_live_ranges;
5927
5928 ralloc_free(reg_live_ranges);
5929 }
5930
5931 /* Reassign indices to temporary registers by reusing unused indices created
5932 * by optimization passes. */
5933 void
renumber_registers(void)5934 glsl_to_tgsi_visitor::renumber_registers(void)
5935 {
5936 int i = 0;
5937 int new_index = 0;
5938 int *first_writes = ralloc_array(mem_ctx, int, this->next_temp);
5939 struct rename_reg_pair *renames = rzalloc_array(mem_ctx, struct rename_reg_pair, this->next_temp);
5940
5941 for (i = 0; i < this->next_temp; i++) {
5942 first_writes[i] = -1;
5943 }
5944 get_first_temp_write(first_writes);
5945
5946 for (i = 0; i < this->next_temp; i++) {
5947 if (first_writes[i] < 0) continue;
5948 if (i != new_index) {
5949 renames[i].new_reg = new_index;
5950 renames[i].valid = true;
5951 }
5952 new_index++;
5953 }
5954
5955 rename_temp_registers(renames);
5956 this->next_temp = new_index;
5957 ralloc_free(renames);
5958 ralloc_free(first_writes);
5959 }
5960
5961 #ifndef NDEBUG
print_stats()5962 void glsl_to_tgsi_visitor::print_stats()
5963 {
5964 int narray_registers = 0;
5965 for (unsigned i = 0; i < this->next_array; ++i)
5966 narray_registers += this->array_sizes[i];
5967
5968 int ninstructions = 0;
5969 foreach_in_list(glsl_to_tgsi_instruction, inst, &instructions) {
5970 ++ninstructions;
5971 }
5972
5973 simple_mtx_lock(&print_stats_mutex);
5974 stats_log << next_array << ", "
5975 << next_temp << ", "
5976 << narray_registers << ", "
5977 << next_temp + narray_registers << ", "
5978 << ninstructions << "\n";
5979 simple_mtx_unlock(&print_stats_mutex);
5980 }
5981 #endif
5982 /* ------------------------- TGSI conversion stuff -------------------------- */
5983
5984 /**
5985 * Intermediate state used during shader translation.
5986 */
5987 struct st_translate {
5988 struct ureg_program *ureg;
5989
5990 unsigned temps_size;
5991 struct ureg_dst *temps;
5992
5993 struct ureg_dst *arrays;
5994 unsigned num_temp_arrays;
5995 struct ureg_src *constants;
5996 int num_constants;
5997 struct ureg_src *immediates;
5998 int num_immediates;
5999 struct ureg_dst outputs[PIPE_MAX_SHADER_OUTPUTS];
6000 struct ureg_src inputs[PIPE_MAX_SHADER_INPUTS];
6001 struct ureg_dst address[3];
6002 struct ureg_src samplers[PIPE_MAX_SAMPLERS];
6003 struct ureg_src buffers[PIPE_MAX_SHADER_BUFFERS];
6004 struct ureg_src images[PIPE_MAX_SHADER_IMAGES];
6005 struct ureg_src systemValues[SYSTEM_VALUE_MAX];
6006 struct ureg_src hw_atomics[PIPE_MAX_HW_ATOMIC_BUFFERS];
6007 struct ureg_src shared_memory;
6008 unsigned *array_sizes;
6009 struct inout_decl *input_decls;
6010 unsigned num_input_decls;
6011 struct inout_decl *output_decls;
6012 unsigned num_output_decls;
6013
6014 const ubyte *inputMapping;
6015 const ubyte *outputMapping;
6016
6017 enum pipe_shader_type procType; /**< PIPE_SHADER_VERTEX/FRAGMENT */
6018 bool tg4_component_in_swizzle;
6019 };
6020
6021 /**
6022 * Map a glsl_to_tgsi constant/immediate to a TGSI immediate.
6023 */
6024 static struct ureg_src
emit_immediate(struct st_translate * t,gl_constant_value values[4],GLenum type,int size)6025 emit_immediate(struct st_translate *t,
6026 gl_constant_value values[4],
6027 GLenum type, int size)
6028 {
6029 struct ureg_program *ureg = t->ureg;
6030
6031 switch (type) {
6032 case GL_FLOAT:
6033 return ureg_DECL_immediate(ureg, &values[0].f, size);
6034 case GL_DOUBLE:
6035 return ureg_DECL_immediate_f64(ureg, (double *)&values[0].f, size);
6036 case GL_INT64_ARB:
6037 return ureg_DECL_immediate_int64(ureg, (int64_t *)&values[0].f, size);
6038 case GL_UNSIGNED_INT64_ARB:
6039 return ureg_DECL_immediate_uint64(ureg, (uint64_t *)&values[0].f, size);
6040 case GL_INT:
6041 return ureg_DECL_immediate_int(ureg, &values[0].i, size);
6042 case GL_UNSIGNED_INT:
6043 case GL_BOOL:
6044 return ureg_DECL_immediate_uint(ureg, &values[0].u, size);
6045 default:
6046 assert(!"should not get here - type must be float, int, uint, or bool");
6047 return ureg_src_undef();
6048 }
6049 }
6050
6051 /**
6052 * Map a glsl_to_tgsi dst register to a TGSI ureg_dst register.
6053 */
6054 static struct ureg_dst
dst_register(struct st_translate * t,gl_register_file file,unsigned index,unsigned array_id)6055 dst_register(struct st_translate *t, gl_register_file file, unsigned index,
6056 unsigned array_id)
6057 {
6058 unsigned array;
6059
6060 switch (file) {
6061 case PROGRAM_UNDEFINED:
6062 return ureg_dst_undef();
6063
6064 case PROGRAM_TEMPORARY:
6065 /* Allocate space for temporaries on demand. */
6066 if (index >= t->temps_size) {
6067 const int inc = align(index - t->temps_size + 1, 4096);
6068
6069 t->temps = (struct ureg_dst*)
6070 realloc(t->temps,
6071 (t->temps_size + inc) * sizeof(struct ureg_dst));
6072 if (!t->temps)
6073 return ureg_dst_undef();
6074
6075 memset(t->temps + t->temps_size, 0, inc * sizeof(struct ureg_dst));
6076 t->temps_size += inc;
6077 }
6078
6079 if (ureg_dst_is_undef(t->temps[index]))
6080 t->temps[index] = ureg_DECL_local_temporary(t->ureg);
6081
6082 return t->temps[index];
6083
6084 case PROGRAM_ARRAY:
6085 assert(array_id && array_id <= t->num_temp_arrays);
6086 array = array_id - 1;
6087
6088 if (ureg_dst_is_undef(t->arrays[array]))
6089 t->arrays[array] = ureg_DECL_array_temporary(
6090 t->ureg, t->array_sizes[array], TRUE);
6091
6092 return ureg_dst_array_offset(t->arrays[array], index);
6093
6094 case PROGRAM_OUTPUT:
6095 if (!array_id) {
6096 if (t->procType == PIPE_SHADER_FRAGMENT)
6097 assert(index < 2 * FRAG_RESULT_MAX);
6098 else if (t->procType == PIPE_SHADER_TESS_CTRL ||
6099 t->procType == PIPE_SHADER_TESS_EVAL)
6100 assert(index < VARYING_SLOT_TESS_MAX);
6101 else
6102 assert(index < VARYING_SLOT_MAX);
6103
6104 assert(t->outputMapping[index] < ARRAY_SIZE(t->outputs));
6105 assert(t->outputs[t->outputMapping[index]].File != TGSI_FILE_NULL);
6106 return t->outputs[t->outputMapping[index]];
6107 }
6108 else {
6109 struct inout_decl *decl =
6110 find_inout_array(t->output_decls,
6111 t->num_output_decls, array_id);
6112 unsigned mesa_index = decl->mesa_index;
6113 ubyte slot = t->outputMapping[mesa_index];
6114
6115 assert(slot != 0xff && t->outputs[slot].File == TGSI_FILE_OUTPUT);
6116
6117 struct ureg_dst dst = t->outputs[slot];
6118 dst.ArrayID = array_id;
6119 return ureg_dst_array_offset(dst, index - mesa_index);
6120 }
6121
6122 case PROGRAM_ADDRESS:
6123 return t->address[index];
6124
6125 default:
6126 assert(!"unknown dst register file");
6127 return ureg_dst_undef();
6128 }
6129 }
6130
6131 static struct ureg_src
6132 translate_src(struct st_translate *t, const st_src_reg *src_reg);
6133
6134 static struct ureg_src
translate_addr(struct st_translate * t,const st_src_reg * reladdr,unsigned addr_index)6135 translate_addr(struct st_translate *t, const st_src_reg *reladdr,
6136 unsigned addr_index)
6137 {
6138 return ureg_src(t->address[addr_index]);
6139 }
6140
6141 /**
6142 * Create a TGSI ureg_dst register from an st_dst_reg.
6143 */
6144 static struct ureg_dst
translate_dst(struct st_translate * t,const st_dst_reg * dst_reg,bool saturate)6145 translate_dst(struct st_translate *t,
6146 const st_dst_reg *dst_reg,
6147 bool saturate)
6148 {
6149 struct ureg_dst dst = dst_register(t, dst_reg->file, dst_reg->index,
6150 dst_reg->array_id);
6151
6152 if (dst.File == TGSI_FILE_NULL)
6153 return dst;
6154
6155 dst = ureg_writemask(dst, dst_reg->writemask);
6156
6157 if (saturate)
6158 dst = ureg_saturate(dst);
6159
6160 if (dst_reg->reladdr != NULL) {
6161 assert(dst_reg->file != PROGRAM_TEMPORARY);
6162 dst = ureg_dst_indirect(dst, translate_addr(t, dst_reg->reladdr, 0));
6163 }
6164
6165 if (dst_reg->has_index2) {
6166 if (dst_reg->reladdr2)
6167 dst = ureg_dst_dimension_indirect(dst,
6168 translate_addr(t, dst_reg->reladdr2, 1),
6169 dst_reg->index2D);
6170 else
6171 dst = ureg_dst_dimension(dst, dst_reg->index2D);
6172 }
6173
6174 return dst;
6175 }
6176
6177 /**
6178 * Create a TGSI ureg_src register from an st_src_reg.
6179 */
6180 static struct ureg_src
translate_src(struct st_translate * t,const st_src_reg * src_reg)6181 translate_src(struct st_translate *t, const st_src_reg *src_reg)
6182 {
6183 struct ureg_src src;
6184 int index = src_reg->index;
6185 int double_reg2 = src_reg->double_reg2 ? 1 : 0;
6186
6187 switch (src_reg->file) {
6188 case PROGRAM_UNDEFINED:
6189 src = ureg_imm4f(t->ureg, 0, 0, 0, 0);
6190 break;
6191
6192 case PROGRAM_TEMPORARY:
6193 case PROGRAM_ARRAY:
6194 src = ureg_src(dst_register(t, src_reg->file, src_reg->index,
6195 src_reg->array_id));
6196 break;
6197
6198 case PROGRAM_OUTPUT: {
6199 struct ureg_dst dst = dst_register(t, src_reg->file, src_reg->index,
6200 src_reg->array_id);
6201 assert(dst.WriteMask != 0);
6202 unsigned shift = ffs(dst.WriteMask) - 1;
6203 src = ureg_swizzle(ureg_src(dst),
6204 shift,
6205 MIN2(shift + 1, 3),
6206 MIN2(shift + 2, 3),
6207 MIN2(shift + 3, 3));
6208 break;
6209 }
6210
6211 case PROGRAM_UNIFORM:
6212 assert(src_reg->index >= 0);
6213 src = src_reg->index < t->num_constants ?
6214 t->constants[src_reg->index] : ureg_imm4f(t->ureg, 0, 0, 0, 0);
6215 break;
6216 case PROGRAM_STATE_VAR:
6217 case PROGRAM_CONSTANT: /* ie, immediate */
6218 if (src_reg->has_index2)
6219 src = ureg_src_register(TGSI_FILE_CONSTANT, src_reg->index);
6220 else
6221 src = src_reg->index >= 0 && src_reg->index < t->num_constants ?
6222 t->constants[src_reg->index] : ureg_imm4f(t->ureg, 0, 0, 0, 0);
6223 break;
6224
6225 case PROGRAM_IMMEDIATE:
6226 assert(src_reg->index >= 0 && src_reg->index < t->num_immediates);
6227 src = t->immediates[src_reg->index];
6228 break;
6229
6230 case PROGRAM_INPUT:
6231 /* GLSL inputs are 64-bit containers, so we have to
6232 * map back to the original index and add the offset after
6233 * mapping. */
6234 index -= double_reg2;
6235 if (!src_reg->array_id) {
6236 assert(t->inputMapping[index] < ARRAY_SIZE(t->inputs));
6237 assert(t->inputs[t->inputMapping[index]].File != TGSI_FILE_NULL);
6238 src = t->inputs[t->inputMapping[index] + double_reg2];
6239 }
6240 else {
6241 struct inout_decl *decl = find_inout_array(t->input_decls,
6242 t->num_input_decls,
6243 src_reg->array_id);
6244 unsigned mesa_index = decl->mesa_index;
6245 ubyte slot = t->inputMapping[mesa_index];
6246
6247 assert(slot != 0xff && t->inputs[slot].File == TGSI_FILE_INPUT);
6248
6249 src = t->inputs[slot];
6250 src.ArrayID = src_reg->array_id;
6251 src = ureg_src_array_offset(src, index + double_reg2 - mesa_index);
6252 }
6253 break;
6254
6255 case PROGRAM_ADDRESS:
6256 src = ureg_src(t->address[src_reg->index]);
6257 break;
6258
6259 case PROGRAM_SYSTEM_VALUE:
6260 assert(src_reg->index < (int) ARRAY_SIZE(t->systemValues));
6261 src = t->systemValues[src_reg->index];
6262 break;
6263
6264 case PROGRAM_HW_ATOMIC:
6265 src = ureg_src_array_register(TGSI_FILE_HW_ATOMIC, src_reg->index,
6266 src_reg->array_id);
6267 break;
6268
6269 default:
6270 assert(!"unknown src register file");
6271 return ureg_src_undef();
6272 }
6273
6274 if (src_reg->has_index2) {
6275 /* 2D indexes occur with geometry shader inputs (attrib, vertex)
6276 * and UBO constant buffers (buffer, position).
6277 */
6278 if (src_reg->reladdr2)
6279 src = ureg_src_dimension_indirect(src,
6280 translate_addr(t, src_reg->reladdr2, 1),
6281 src_reg->index2D);
6282 else
6283 src = ureg_src_dimension(src, src_reg->index2D);
6284 }
6285
6286 src = ureg_swizzle(src,
6287 GET_SWZ(src_reg->swizzle, 0) & 0x3,
6288 GET_SWZ(src_reg->swizzle, 1) & 0x3,
6289 GET_SWZ(src_reg->swizzle, 2) & 0x3,
6290 GET_SWZ(src_reg->swizzle, 3) & 0x3);
6291
6292 if (src_reg->abs)
6293 src = ureg_abs(src);
6294
6295 if ((src_reg->negate & 0xf) == NEGATE_XYZW)
6296 src = ureg_negate(src);
6297
6298 if (src_reg->reladdr != NULL) {
6299 assert(src_reg->file != PROGRAM_TEMPORARY);
6300 src = ureg_src_indirect(src, translate_addr(t, src_reg->reladdr, 0));
6301 }
6302
6303 return src;
6304 }
6305
6306 static struct tgsi_texture_offset
translate_tex_offset(struct st_translate * t,const st_src_reg * in_offset)6307 translate_tex_offset(struct st_translate *t,
6308 const st_src_reg *in_offset)
6309 {
6310 struct tgsi_texture_offset offset;
6311 struct ureg_src src = translate_src(t, in_offset);
6312
6313 offset.File = src.File;
6314 offset.Index = src.Index;
6315 offset.SwizzleX = src.SwizzleX;
6316 offset.SwizzleY = src.SwizzleY;
6317 offset.SwizzleZ = src.SwizzleZ;
6318 offset.Padding = 0;
6319
6320 assert(!src.Indirect);
6321 assert(!src.DimIndirect);
6322 assert(!src.Dimension);
6323 assert(!src.Absolute); /* those shouldn't be used with integers anyway */
6324 assert(!src.Negate);
6325
6326 return offset;
6327 }
6328
6329 static void
compile_tgsi_instruction(struct st_translate * t,const glsl_to_tgsi_instruction * inst)6330 compile_tgsi_instruction(struct st_translate *t,
6331 const glsl_to_tgsi_instruction *inst)
6332 {
6333 struct ureg_program *ureg = t->ureg;
6334 int i;
6335 struct ureg_dst dst[2];
6336 struct ureg_src src[4];
6337 struct tgsi_texture_offset texoffsets[MAX_GLSL_TEXTURE_OFFSET];
6338
6339 int num_dst;
6340 int num_src;
6341 enum tgsi_texture_type tex_target = TGSI_TEXTURE_BUFFER;
6342
6343 num_dst = num_inst_dst_regs(inst);
6344 num_src = num_inst_src_regs(inst);
6345
6346 for (i = 0; i < num_dst; i++)
6347 dst[i] = translate_dst(t,
6348 &inst->dst[i],
6349 inst->saturate);
6350
6351 for (i = 0; i < num_src; i++)
6352 src[i] = translate_src(t, &inst->src[i]);
6353
6354 switch (inst->op) {
6355 case TGSI_OPCODE_BGNLOOP:
6356 case TGSI_OPCODE_ELSE:
6357 case TGSI_OPCODE_ENDLOOP:
6358 case TGSI_OPCODE_IF:
6359 case TGSI_OPCODE_UIF:
6360 assert(num_dst == 0);
6361 ureg_insn(ureg, inst->op, NULL, 0, src, num_src, inst->precise);
6362 return;
6363
6364 case TGSI_OPCODE_TEX:
6365 case TGSI_OPCODE_TEX_LZ:
6366 case TGSI_OPCODE_TXB:
6367 case TGSI_OPCODE_TXD:
6368 case TGSI_OPCODE_TXL:
6369 case TGSI_OPCODE_TXP:
6370 case TGSI_OPCODE_TXQ:
6371 case TGSI_OPCODE_TXQS:
6372 case TGSI_OPCODE_TXF:
6373 case TGSI_OPCODE_TXF_LZ:
6374 case TGSI_OPCODE_TEX2:
6375 case TGSI_OPCODE_TXB2:
6376 case TGSI_OPCODE_TXL2:
6377 case TGSI_OPCODE_TG4:
6378 case TGSI_OPCODE_LODQ:
6379 case TGSI_OPCODE_SAMP2HND:
6380 if (inst->resource.file == PROGRAM_SAMPLER) {
6381 src[num_src] = t->samplers[inst->resource.index];
6382 if (t->tg4_component_in_swizzle && inst->op == TGSI_OPCODE_TG4)
6383 src[num_src].SwizzleX = inst->gather_component;
6384 } else {
6385 /* Bindless samplers. */
6386 src[num_src] = translate_src(t, &inst->resource);
6387 }
6388 assert(src[num_src].File != TGSI_FILE_NULL);
6389 if (inst->resource.reladdr)
6390 src[num_src] =
6391 ureg_src_indirect(src[num_src],
6392 translate_addr(t, inst->resource.reladdr, 2));
6393 num_src++;
6394 for (i = 0; i < (int)inst->tex_offset_num_offset; i++) {
6395 texoffsets[i] = translate_tex_offset(t, &inst->tex_offsets[i]);
6396 }
6397 tex_target = st_translate_texture_target(inst->tex_target, inst->tex_shadow);
6398
6399 ureg_tex_insn(ureg,
6400 inst->op,
6401 dst, num_dst,
6402 tex_target,
6403 st_translate_texture_type(inst->tex_type),
6404 texoffsets, inst->tex_offset_num_offset,
6405 src, num_src);
6406 return;
6407
6408 case TGSI_OPCODE_RESQ:
6409 case TGSI_OPCODE_LOAD:
6410 case TGSI_OPCODE_ATOMUADD:
6411 case TGSI_OPCODE_ATOMXCHG:
6412 case TGSI_OPCODE_ATOMCAS:
6413 case TGSI_OPCODE_ATOMAND:
6414 case TGSI_OPCODE_ATOMOR:
6415 case TGSI_OPCODE_ATOMXOR:
6416 case TGSI_OPCODE_ATOMUMIN:
6417 case TGSI_OPCODE_ATOMUMAX:
6418 case TGSI_OPCODE_ATOMIMIN:
6419 case TGSI_OPCODE_ATOMIMAX:
6420 case TGSI_OPCODE_ATOMFADD:
6421 case TGSI_OPCODE_IMG2HND:
6422 case TGSI_OPCODE_ATOMINC_WRAP:
6423 case TGSI_OPCODE_ATOMDEC_WRAP:
6424 for (i = num_src - 1; i >= 0; i--)
6425 src[i + 1] = src[i];
6426 num_src++;
6427 if (inst->resource.file == PROGRAM_MEMORY) {
6428 src[0] = t->shared_memory;
6429 } else if (inst->resource.file == PROGRAM_BUFFER) {
6430 src[0] = t->buffers[inst->resource.index];
6431 } else if (inst->resource.file == PROGRAM_HW_ATOMIC) {
6432 src[0] = translate_src(t, &inst->resource);
6433 } else if (inst->resource.file == PROGRAM_CONSTANT) {
6434 assert(inst->resource.has_index2);
6435 src[0] = ureg_src_register(TGSI_FILE_CONSTBUF, inst->resource.index);
6436 } else {
6437 assert(inst->resource.file != PROGRAM_UNDEFINED);
6438 if (inst->resource.file == PROGRAM_IMAGE) {
6439 src[0] = t->images[inst->resource.index];
6440 } else {
6441 /* Bindless images. */
6442 src[0] = translate_src(t, &inst->resource);
6443 }
6444 tex_target = st_translate_texture_target(inst->tex_target, inst->tex_shadow);
6445 }
6446 if (inst->resource.reladdr)
6447 src[0] = ureg_src_indirect(src[0],
6448 translate_addr(t, inst->resource.reladdr, 2));
6449 assert(src[0].File != TGSI_FILE_NULL);
6450 ureg_memory_insn(ureg, inst->op, dst, num_dst, src, num_src,
6451 inst->buffer_access,
6452 tex_target, inst->image_format);
6453 break;
6454
6455 case TGSI_OPCODE_STORE:
6456 if (inst->resource.file == PROGRAM_MEMORY) {
6457 dst[0] = ureg_dst(t->shared_memory);
6458 } else if (inst->resource.file == PROGRAM_BUFFER) {
6459 dst[0] = ureg_dst(t->buffers[inst->resource.index]);
6460 } else {
6461 if (inst->resource.file == PROGRAM_IMAGE) {
6462 dst[0] = ureg_dst(t->images[inst->resource.index]);
6463 } else {
6464 /* Bindless images. */
6465 dst[0] = ureg_dst(translate_src(t, &inst->resource));
6466 }
6467 tex_target = st_translate_texture_target(inst->tex_target, inst->tex_shadow);
6468 }
6469 dst[0] = ureg_writemask(dst[0], inst->dst[0].writemask);
6470 if (inst->resource.reladdr)
6471 dst[0] = ureg_dst_indirect(dst[0],
6472 translate_addr(t, inst->resource.reladdr, 2));
6473 assert(dst[0].File != TGSI_FILE_NULL);
6474 ureg_memory_insn(ureg, inst->op, dst, num_dst, src, num_src,
6475 inst->buffer_access,
6476 tex_target, inst->image_format);
6477 break;
6478
6479 default:
6480 ureg_insn(ureg,
6481 inst->op,
6482 dst, num_dst,
6483 src, num_src, inst->precise);
6484 break;
6485 }
6486 }
6487
6488 /* Invert SamplePos.y when rendering to the default framebuffer. */
6489 static void
emit_samplepos_adjustment(struct st_translate * t,int wpos_y_transform)6490 emit_samplepos_adjustment(struct st_translate *t, int wpos_y_transform)
6491 {
6492 struct ureg_program *ureg = t->ureg;
6493
6494 assert(wpos_y_transform >= 0);
6495 struct ureg_src trans_const = ureg_DECL_constant(ureg, wpos_y_transform);
6496 struct ureg_src samplepos_sysval = t->systemValues[SYSTEM_VALUE_SAMPLE_POS];
6497 struct ureg_dst samplepos_flipped = ureg_DECL_temporary(ureg);
6498 struct ureg_dst is_fbo = ureg_DECL_temporary(ureg);
6499
6500 ureg_ADD(ureg, ureg_writemask(samplepos_flipped, TGSI_WRITEMASK_Y),
6501 ureg_imm1f(ureg, 1), ureg_negate(samplepos_sysval));
6502
6503 /* If trans.x == 1, use samplepos.y, else use 1 - samplepos.y. */
6504 ureg_FSEQ(ureg, ureg_writemask(is_fbo, TGSI_WRITEMASK_Y),
6505 ureg_scalar(trans_const, TGSI_SWIZZLE_X), ureg_imm1f(ureg, 1));
6506 ureg_UCMP(ureg, ureg_writemask(samplepos_flipped, TGSI_WRITEMASK_Y),
6507 ureg_src(is_fbo), samplepos_sysval, ureg_src(samplepos_flipped));
6508 ureg_MOV(ureg, ureg_writemask(samplepos_flipped, TGSI_WRITEMASK_X),
6509 samplepos_sysval);
6510
6511 /* Use the result in place of the system value. */
6512 t->systemValues[SYSTEM_VALUE_SAMPLE_POS] = ureg_src(samplepos_flipped);
6513 }
6514
6515
6516 /**
6517 * Emit the TGSI instructions for inverting and adjusting WPOS.
6518 * This code is unavoidable because it also depends on whether
6519 * a FBO is bound (STATE_FB_WPOS_Y_TRANSFORM).
6520 */
6521 static void
emit_wpos_adjustment(struct gl_context * ctx,struct st_translate * t,int wpos_transform_const,boolean invert,GLfloat adjX,GLfloat adjY[2])6522 emit_wpos_adjustment(struct gl_context *ctx,
6523 struct st_translate *t,
6524 int wpos_transform_const,
6525 boolean invert,
6526 GLfloat adjX, GLfloat adjY[2])
6527 {
6528 struct ureg_program *ureg = t->ureg;
6529
6530 assert(wpos_transform_const >= 0);
6531
6532 /* Fragment program uses fragment position input.
6533 * Need to replace instances of INPUT[WPOS] with temp T
6534 * where T = INPUT[WPOS] is inverted by Y.
6535 */
6536 struct ureg_src wpostrans = ureg_DECL_constant(ureg, wpos_transform_const);
6537 struct ureg_dst wpos_temp = ureg_DECL_temporary(ureg);
6538 struct ureg_src *wpos =
6539 ctx->Const.GLSLFragCoordIsSysVal ?
6540 &t->systemValues[SYSTEM_VALUE_FRAG_COORD] :
6541 &t->inputs[t->inputMapping[VARYING_SLOT_POS]];
6542 struct ureg_src wpos_input = *wpos;
6543
6544 /* First, apply the coordinate shift: */
6545 if (adjX || adjY[0] || adjY[1]) {
6546 if (adjY[0] != adjY[1]) {
6547 /* Adjust the y coordinate by adjY[1] or adjY[0] respectively
6548 * depending on whether inversion is actually going to be applied
6549 * or not, which is determined by testing against the inversion
6550 * state variable used below, which will be either +1 or -1.
6551 */
6552 struct ureg_dst adj_temp = ureg_DECL_local_temporary(ureg);
6553
6554 ureg_CMP(ureg, adj_temp,
6555 ureg_scalar(wpostrans, invert ? 2 : 0),
6556 ureg_imm4f(ureg, adjX, adjY[0], 0.0f, 0.0f),
6557 ureg_imm4f(ureg, adjX, adjY[1], 0.0f, 0.0f));
6558 ureg_ADD(ureg, wpos_temp, wpos_input, ureg_src(adj_temp));
6559 } else {
6560 ureg_ADD(ureg, wpos_temp, wpos_input,
6561 ureg_imm4f(ureg, adjX, adjY[0], 0.0f, 0.0f));
6562 }
6563 wpos_input = ureg_src(wpos_temp);
6564 } else {
6565 /* MOV wpos_temp, input[wpos]
6566 */
6567 ureg_MOV(ureg, wpos_temp, wpos_input);
6568 }
6569
6570 /* Now the conditional y flip: STATE_FB_WPOS_Y_TRANSFORM.xy/zw will be
6571 * inversion/identity, or the other way around if we're drawing to an FBO.
6572 */
6573 if (invert) {
6574 /* MAD wpos_temp.y, wpos_input, wpostrans.xxxx, wpostrans.yyyy
6575 */
6576 ureg_MAD(ureg,
6577 ureg_writemask(wpos_temp, TGSI_WRITEMASK_Y),
6578 wpos_input,
6579 ureg_scalar(wpostrans, 0),
6580 ureg_scalar(wpostrans, 1));
6581 } else {
6582 /* MAD wpos_temp.y, wpos_input, wpostrans.zzzz, wpostrans.wwww
6583 */
6584 ureg_MAD(ureg,
6585 ureg_writemask(wpos_temp, TGSI_WRITEMASK_Y),
6586 wpos_input,
6587 ureg_scalar(wpostrans, 2),
6588 ureg_scalar(wpostrans, 3));
6589 }
6590
6591 /* Use wpos_temp as position input from here on:
6592 */
6593 *wpos = ureg_src(wpos_temp);
6594 }
6595
6596
6597 /**
6598 * Emit fragment position/ooordinate code.
6599 */
6600 static void
emit_wpos(struct st_context * st,struct st_translate * t,const struct gl_program * program,struct ureg_program * ureg,int wpos_transform_const)6601 emit_wpos(struct st_context *st,
6602 struct st_translate *t,
6603 const struct gl_program *program,
6604 struct ureg_program *ureg,
6605 int wpos_transform_const)
6606 {
6607 struct pipe_screen *pscreen = st->screen;
6608 GLfloat adjX = 0.0f;
6609 GLfloat adjY[2] = { 0.0f, 0.0f };
6610 boolean invert = FALSE;
6611
6612 /* Query the pixel center conventions supported by the pipe driver and set
6613 * adjX, adjY to help out if it cannot handle the requested one internally.
6614 *
6615 * The bias of the y-coordinate depends on whether y-inversion takes place
6616 * (adjY[1]) or not (adjY[0]), which is in turn dependent on whether we are
6617 * drawing to an FBO (causes additional inversion), and whether the pipe
6618 * driver origin and the requested origin differ (the latter condition is
6619 * stored in the 'invert' variable).
6620 *
6621 * For height = 100 (i = integer, h = half-integer, l = lower, u = upper):
6622 *
6623 * center shift only:
6624 * i -> h: +0.5
6625 * h -> i: -0.5
6626 *
6627 * inversion only:
6628 * l,i -> u,i: ( 0.0 + 1.0) * -1 + 100 = 99
6629 * l,h -> u,h: ( 0.5 + 0.0) * -1 + 100 = 99.5
6630 * u,i -> l,i: (99.0 + 1.0) * -1 + 100 = 0
6631 * u,h -> l,h: (99.5 + 0.0) * -1 + 100 = 0.5
6632 *
6633 * inversion and center shift:
6634 * l,i -> u,h: ( 0.0 + 0.5) * -1 + 100 = 99.5
6635 * l,h -> u,i: ( 0.5 + 0.5) * -1 + 100 = 99
6636 * u,i -> l,h: (99.0 + 0.5) * -1 + 100 = 0.5
6637 * u,h -> l,i: (99.5 + 0.5) * -1 + 100 = 0
6638 */
6639 if (program->info.fs.origin_upper_left) {
6640 /* Fragment shader wants origin in upper-left */
6641 if (pscreen->get_param(pscreen, PIPE_CAP_FS_COORD_ORIGIN_UPPER_LEFT)) {
6642 /* the driver supports upper-left origin */
6643 }
6644 else if (pscreen->get_param(pscreen, PIPE_CAP_FS_COORD_ORIGIN_LOWER_LEFT)) {
6645 /* the driver supports lower-left origin, need to invert Y */
6646 ureg_property(ureg, TGSI_PROPERTY_FS_COORD_ORIGIN,
6647 TGSI_FS_COORD_ORIGIN_LOWER_LEFT);
6648 invert = TRUE;
6649 }
6650 else
6651 assert(0);
6652 }
6653 else {
6654 /* Fragment shader wants origin in lower-left */
6655 if (pscreen->get_param(pscreen, PIPE_CAP_FS_COORD_ORIGIN_LOWER_LEFT))
6656 /* the driver supports lower-left origin */
6657 ureg_property(ureg, TGSI_PROPERTY_FS_COORD_ORIGIN,
6658 TGSI_FS_COORD_ORIGIN_LOWER_LEFT);
6659 else if (pscreen->get_param(pscreen, PIPE_CAP_FS_COORD_ORIGIN_UPPER_LEFT))
6660 /* the driver supports upper-left origin, need to invert Y */
6661 invert = TRUE;
6662 else
6663 assert(0);
6664 }
6665
6666 if (program->info.fs.pixel_center_integer) {
6667 /* Fragment shader wants pixel center integer */
6668 if (pscreen->get_param(pscreen, PIPE_CAP_FS_COORD_PIXEL_CENTER_INTEGER)) {
6669 /* the driver supports pixel center integer */
6670 adjY[1] = 1.0f;
6671 ureg_property(ureg, TGSI_PROPERTY_FS_COORD_PIXEL_CENTER,
6672 TGSI_FS_COORD_PIXEL_CENTER_INTEGER);
6673 }
6674 else if (pscreen->get_param(pscreen, PIPE_CAP_FS_COORD_PIXEL_CENTER_HALF_INTEGER)) {
6675 /* the driver supports pixel center half integer, need to bias X,Y */
6676 adjX = -0.5f;
6677 adjY[0] = -0.5f;
6678 adjY[1] = 0.5f;
6679 }
6680 else
6681 assert(0);
6682 }
6683 else {
6684 /* Fragment shader wants pixel center half integer */
6685 if (pscreen->get_param(pscreen, PIPE_CAP_FS_COORD_PIXEL_CENTER_HALF_INTEGER)) {
6686 /* the driver supports pixel center half integer */
6687 }
6688 else if (pscreen->get_param(pscreen, PIPE_CAP_FS_COORD_PIXEL_CENTER_INTEGER)) {
6689 /* the driver supports pixel center integer, need to bias X,Y */
6690 adjX = adjY[0] = adjY[1] = 0.5f;
6691 ureg_property(ureg, TGSI_PROPERTY_FS_COORD_PIXEL_CENTER,
6692 TGSI_FS_COORD_PIXEL_CENTER_INTEGER);
6693 }
6694 else
6695 assert(0);
6696 }
6697
6698 /* we invert after adjustment so that we avoid the MOV to temporary,
6699 * and reuse the adjustment ADD instead */
6700 emit_wpos_adjustment(st->ctx, t, wpos_transform_const, invert, adjX, adjY);
6701 }
6702
6703 /**
6704 * OpenGL's fragment gl_FrontFace input is 1 for front-facing, 0 for back.
6705 * TGSI uses +1 for front, -1 for back.
6706 * This function converts the TGSI value to the GL value. Simply clamping/
6707 * saturating the value to [0,1] does the job.
6708 */
6709 static void
emit_face_var(struct gl_context * ctx,struct st_translate * t)6710 emit_face_var(struct gl_context *ctx, struct st_translate *t)
6711 {
6712 struct ureg_program *ureg = t->ureg;
6713 struct ureg_dst face_temp = ureg_DECL_temporary(ureg);
6714 struct ureg_src face_input = t->inputs[t->inputMapping[VARYING_SLOT_FACE]];
6715
6716 if (ctx->Const.NativeIntegers) {
6717 ureg_FSGE(ureg, face_temp, face_input, ureg_imm1f(ureg, 0));
6718 }
6719 else {
6720 /* MOV_SAT face_temp, input[face] */
6721 ureg_MOV(ureg, ureg_saturate(face_temp), face_input);
6722 }
6723
6724 /* Use face_temp as face input from here on: */
6725 t->inputs[t->inputMapping[VARYING_SLOT_FACE]] = ureg_src(face_temp);
6726 }
6727
6728 struct sort_inout_decls {
operator ()sort_inout_decls6729 bool operator()(const struct inout_decl &a, const struct inout_decl &b) const {
6730 return mapping[a.mesa_index] < mapping[b.mesa_index];
6731 }
6732
6733 const ubyte *mapping;
6734 };
6735
6736 /* Sort the given array of decls by the corresponding slot (TGSI file index).
6737 *
6738 * This is for the benefit of older drivers which are broken when the
6739 * declarations aren't sorted in this way.
6740 */
6741 static void
sort_inout_decls_by_slot(struct inout_decl * decls,unsigned count,const ubyte mapping[])6742 sort_inout_decls_by_slot(struct inout_decl *decls,
6743 unsigned count,
6744 const ubyte mapping[])
6745 {
6746 sort_inout_decls sorter;
6747 sorter.mapping = mapping;
6748 std::sort(decls, decls + count, sorter);
6749 }
6750
6751 /**
6752 * Translate intermediate IR (glsl_to_tgsi_instruction) to TGSI format.
6753 * \param program the program to translate
6754 * \param numInputs number of input registers used
6755 * \param inputMapping maps Mesa fragment program inputs to TGSI generic
6756 * input indexes
6757 * \param inputSemanticName the TGSI_SEMANTIC flag for each input
6758 * \param inputSemanticIndex the semantic index (ex: which texcoord) for
6759 * each input
6760 * \param interpMode the TGSI_INTERPOLATE_LINEAR/PERSP mode for each input
6761 * \param numOutputs number of output registers used
6762 * \param outputMapping maps Mesa fragment program outputs to TGSI
6763 * generic outputs
6764 * \param outputSemanticName the TGSI_SEMANTIC flag for each output
6765 * \param outputSemanticIndex the semantic index (ex: which texcoord) for
6766 * each output
6767 *
6768 * \return PIPE_OK or PIPE_ERROR_OUT_OF_MEMORY
6769 */
6770 extern "C" enum pipe_error
st_translate_program(struct gl_context * ctx,enum pipe_shader_type procType,struct ureg_program * ureg,glsl_to_tgsi_visitor * program,const struct gl_program * proginfo,GLuint numInputs,const ubyte attrToIndex[],const ubyte inputSlotToAttr[],const ubyte inputSemanticName[],const ubyte inputSemanticIndex[],const ubyte interpMode[],GLuint numOutputs,const ubyte outputMapping[],const ubyte outputSemanticName[],const ubyte outputSemanticIndex[])6771 st_translate_program(
6772 struct gl_context *ctx,
6773 enum pipe_shader_type procType,
6774 struct ureg_program *ureg,
6775 glsl_to_tgsi_visitor *program,
6776 const struct gl_program *proginfo,
6777 GLuint numInputs,
6778 const ubyte attrToIndex[],
6779 const ubyte inputSlotToAttr[],
6780 const ubyte inputSemanticName[],
6781 const ubyte inputSemanticIndex[],
6782 const ubyte interpMode[],
6783 GLuint numOutputs,
6784 const ubyte outputMapping[],
6785 const ubyte outputSemanticName[],
6786 const ubyte outputSemanticIndex[])
6787 {
6788 struct pipe_screen *screen = st_context(ctx)->screen;
6789 struct st_translate *t;
6790 unsigned i;
6791 struct gl_program_constants *prog_const =
6792 &ctx->Const.Program[program->shader->Stage];
6793 enum pipe_error ret = PIPE_OK;
6794 uint8_t inputMapping[VARYING_SLOT_TESS_MAX] = {0};
6795
6796 assert(numInputs <= ARRAY_SIZE(t->inputs));
6797 assert(numOutputs <= ARRAY_SIZE(t->outputs));
6798
6799 ASSERT_BITFIELD_SIZE(st_src_reg, type, GLSL_TYPE_ERROR);
6800 ASSERT_BITFIELD_SIZE(st_src_reg, file, PROGRAM_FILE_MAX);
6801 ASSERT_BITFIELD_SIZE(st_dst_reg, type, GLSL_TYPE_ERROR);
6802 ASSERT_BITFIELD_SIZE(st_dst_reg, file, PROGRAM_FILE_MAX);
6803 ASSERT_BITFIELD_SIZE(glsl_to_tgsi_instruction, tex_type, GLSL_TYPE_ERROR);
6804 ASSERT_BITFIELD_SIZE(glsl_to_tgsi_instruction, image_format, PIPE_FORMAT_COUNT);
6805 ASSERT_BITFIELD_SIZE(glsl_to_tgsi_instruction, tex_target,
6806 (gl_texture_index) (NUM_TEXTURE_TARGETS - 1));
6807 ASSERT_BITFIELD_SIZE(glsl_to_tgsi_instruction, image_format,
6808 (enum pipe_format) (PIPE_FORMAT_COUNT - 1));
6809 ASSERT_BITFIELD_SIZE(glsl_to_tgsi_instruction, op,
6810 (enum tgsi_opcode) (TGSI_OPCODE_LAST - 1));
6811
6812 if (proginfo->DualSlotInputs != 0) {
6813 /* adjust attrToIndex to include placeholder for second
6814 * part of a double attribute.
6815 * Following code is basically matching behavior of
6816 * util_lower_uint64_vertex_elements
6817 */
6818 numInputs = 0;
6819 for (unsigned attr = 0; attr < VERT_ATTRIB_MAX; attr++) {
6820 if ((proginfo->info.inputs_read & BITFIELD64_BIT(attr)) != 0) {
6821 inputMapping[attr] = numInputs++;
6822
6823 if ((proginfo->DualSlotInputs & BITFIELD64_BIT(attr)) != 0) {
6824 /* add placeholder for second part of a double attribute */
6825 numInputs++;
6826 }
6827 }
6828 }
6829 inputMapping[VERT_ATTRIB_EDGEFLAG] = numInputs;
6830 }
6831 else {
6832 memcpy(inputMapping, attrToIndex, sizeof(inputMapping));
6833 }
6834
6835 t = CALLOC_STRUCT(st_translate);
6836 if (!t) {
6837 ret = PIPE_ERROR_OUT_OF_MEMORY;
6838 goto out;
6839 }
6840
6841 t->procType = procType;
6842 t->tg4_component_in_swizzle = screen->get_param(screen, PIPE_CAP_TGSI_TG4_COMPONENT_IN_SWIZZLE);
6843 t->inputMapping = inputMapping;
6844 t->outputMapping = outputMapping;
6845 t->ureg = ureg;
6846 t->num_temp_arrays = program->next_array;
6847 if (t->num_temp_arrays)
6848 t->arrays = (struct ureg_dst*)
6849 calloc(t->num_temp_arrays, sizeof(t->arrays[0]));
6850
6851 /*
6852 * Declare input attributes.
6853 */
6854 switch (procType) {
6855 case PIPE_SHADER_FRAGMENT:
6856 case PIPE_SHADER_GEOMETRY:
6857 case PIPE_SHADER_TESS_EVAL:
6858 case PIPE_SHADER_TESS_CTRL:
6859 sort_inout_decls_by_slot(program->inputs, program->num_inputs, inputMapping);
6860
6861 for (i = 0; i < program->num_inputs; ++i) {
6862 struct inout_decl *decl = &program->inputs[i];
6863 unsigned slot = inputMapping[decl->mesa_index];
6864 struct ureg_src src;
6865 ubyte tgsi_usage_mask = decl->usage_mask;
6866
6867 if (glsl_base_type_is_64bit(decl->base_type)) {
6868 if (tgsi_usage_mask == 1)
6869 tgsi_usage_mask = TGSI_WRITEMASK_XY;
6870 else if (tgsi_usage_mask == 2)
6871 tgsi_usage_mask = TGSI_WRITEMASK_ZW;
6872 else
6873 tgsi_usage_mask = TGSI_WRITEMASK_XYZW;
6874 }
6875
6876 enum tgsi_interpolate_mode interp_mode = TGSI_INTERPOLATE_CONSTANT;
6877 enum tgsi_interpolate_loc interp_location = TGSI_INTERPOLATE_LOC_CENTER;
6878 if (procType == PIPE_SHADER_FRAGMENT) {
6879 assert(interpMode);
6880 interp_mode = interpMode[slot] != TGSI_INTERPOLATE_COUNT ?
6881 (enum tgsi_interpolate_mode) interpMode[slot] :
6882 tgsi_get_interp_mode(decl->interp,
6883 inputSlotToAttr[slot] == VARYING_SLOT_COL0 ||
6884 inputSlotToAttr[slot] == VARYING_SLOT_COL1);
6885
6886 interp_location = (enum tgsi_interpolate_loc) decl->interp_loc;
6887 }
6888
6889 src = ureg_DECL_fs_input_centroid_layout(ureg,
6890 (enum tgsi_semantic) inputSemanticName[slot],
6891 inputSemanticIndex[slot],
6892 interp_mode, interp_location, slot, tgsi_usage_mask,
6893 decl->array_id, decl->size);
6894
6895 for (unsigned j = 0; j < decl->size; ++j) {
6896 if (t->inputs[slot + j].File != TGSI_FILE_INPUT) {
6897 /* The ArrayID is set up in dst_register */
6898 t->inputs[slot + j] = src;
6899 t->inputs[slot + j].ArrayID = 0;
6900 t->inputs[slot + j].Index += j;
6901 }
6902 }
6903 }
6904 break;
6905 case PIPE_SHADER_VERTEX:
6906 for (i = 0; i < numInputs; i++) {
6907 t->inputs[i] = ureg_DECL_vs_input(ureg, i);
6908 }
6909 break;
6910 case PIPE_SHADER_COMPUTE:
6911 break;
6912 default:
6913 assert(0);
6914 }
6915
6916 /*
6917 * Declare output attributes.
6918 */
6919 switch (procType) {
6920 case PIPE_SHADER_FRAGMENT:
6921 case PIPE_SHADER_COMPUTE:
6922 break;
6923 case PIPE_SHADER_GEOMETRY:
6924 case PIPE_SHADER_TESS_EVAL:
6925 case PIPE_SHADER_TESS_CTRL:
6926 case PIPE_SHADER_VERTEX:
6927 sort_inout_decls_by_slot(program->outputs, program->num_outputs, outputMapping);
6928
6929 for (i = 0; i < program->num_outputs; ++i) {
6930 struct inout_decl *decl = &program->outputs[i];
6931 unsigned slot = outputMapping[decl->mesa_index];
6932 struct ureg_dst dst;
6933 ubyte tgsi_usage_mask = decl->usage_mask;
6934
6935 if (glsl_base_type_is_64bit(decl->base_type)) {
6936 if (tgsi_usage_mask == 1)
6937 tgsi_usage_mask = TGSI_WRITEMASK_XY;
6938 else if (tgsi_usage_mask == 2)
6939 tgsi_usage_mask = TGSI_WRITEMASK_ZW;
6940 else
6941 tgsi_usage_mask = TGSI_WRITEMASK_XYZW;
6942 }
6943
6944 dst = ureg_DECL_output_layout(ureg,
6945 (enum tgsi_semantic) outputSemanticName[slot],
6946 outputSemanticIndex[slot],
6947 decl->gs_out_streams,
6948 slot, tgsi_usage_mask, decl->array_id, decl->size, decl->invariant);
6949 dst.Invariant = decl->invariant;
6950 for (unsigned j = 0; j < decl->size; ++j) {
6951 if (t->outputs[slot + j].File != TGSI_FILE_OUTPUT) {
6952 /* The ArrayID is set up in dst_register */
6953 t->outputs[slot + j] = dst;
6954 t->outputs[slot + j].ArrayID = 0;
6955 t->outputs[slot + j].Index += j;
6956 t->outputs[slot + j].Invariant = decl->invariant;
6957 }
6958 }
6959 }
6960 break;
6961 default:
6962 assert(0);
6963 }
6964
6965 if (procType == PIPE_SHADER_FRAGMENT) {
6966 if (proginfo->info.inputs_read & VARYING_BIT_POS) {
6967 /* Must do this after setting up t->inputs. */
6968 emit_wpos(st_context(ctx), t, proginfo, ureg,
6969 program->wpos_transform_const);
6970 }
6971
6972 if (proginfo->info.inputs_read & VARYING_BIT_FACE)
6973 emit_face_var(ctx, t);
6974
6975 for (i = 0; i < numOutputs; i++) {
6976 switch (outputSemanticName[i]) {
6977 case TGSI_SEMANTIC_POSITION:
6978 t->outputs[i] = ureg_DECL_output(ureg,
6979 TGSI_SEMANTIC_POSITION, /* Z/Depth */
6980 outputSemanticIndex[i]);
6981 t->outputs[i] = ureg_writemask(t->outputs[i], TGSI_WRITEMASK_Z);
6982 break;
6983 case TGSI_SEMANTIC_STENCIL:
6984 t->outputs[i] = ureg_DECL_output(ureg,
6985 TGSI_SEMANTIC_STENCIL, /* Stencil */
6986 outputSemanticIndex[i]);
6987 t->outputs[i] = ureg_writemask(t->outputs[i], TGSI_WRITEMASK_Y);
6988 break;
6989 case TGSI_SEMANTIC_COLOR:
6990 t->outputs[i] = ureg_DECL_output(ureg,
6991 TGSI_SEMANTIC_COLOR,
6992 outputSemanticIndex[i]);
6993 break;
6994 case TGSI_SEMANTIC_SAMPLEMASK:
6995 t->outputs[i] = ureg_DECL_output(ureg,
6996 TGSI_SEMANTIC_SAMPLEMASK,
6997 outputSemanticIndex[i]);
6998 /* TODO: If we ever support more than 32 samples, this will have
6999 * to become an array.
7000 */
7001 t->outputs[i] = ureg_writemask(t->outputs[i], TGSI_WRITEMASK_X);
7002 break;
7003 default:
7004 assert(!"fragment shader outputs must be POSITION/STENCIL/COLOR");
7005 ret = PIPE_ERROR_BAD_INPUT;
7006 goto out;
7007 }
7008 }
7009 }
7010 else if (procType == PIPE_SHADER_VERTEX) {
7011 for (i = 0; i < numOutputs; i++) {
7012 if (outputSemanticName[i] == TGSI_SEMANTIC_FOG) {
7013 /* force register to contain a fog coordinate in the form (F, 0, 0, 1). */
7014 ureg_MOV(ureg,
7015 ureg_writemask(t->outputs[i], TGSI_WRITEMASK_YZW),
7016 ureg_imm4f(ureg, 0.0f, 0.0f, 0.0f, 1.0f));
7017 t->outputs[i] = ureg_writemask(t->outputs[i], TGSI_WRITEMASK_X);
7018 }
7019 }
7020 }
7021
7022 /* Declare address register.
7023 */
7024 if (program->num_address_regs > 0) {
7025 assert(program->num_address_regs <= 3);
7026 for (int i = 0; i < program->num_address_regs; i++)
7027 t->address[i] = ureg_DECL_address(ureg);
7028 }
7029
7030 /* Declare misc input registers
7031 */
7032 BITSET_FOREACH_SET(i, proginfo->info.system_values_read, SYSTEM_VALUE_MAX) {
7033 enum tgsi_semantic semName = tgsi_get_sysval_semantic(i);
7034
7035 t->systemValues[i] = ureg_DECL_system_value(ureg, semName, 0);
7036
7037 if (semName == TGSI_SEMANTIC_INSTANCEID ||
7038 semName == TGSI_SEMANTIC_VERTEXID) {
7039 /* From Gallium perspective, these system values are always
7040 * integer, and require native integer support. However, if
7041 * native integer is supported on the vertex stage but not the
7042 * pixel stage (e.g, i915g + draw), Mesa will generate IR that
7043 * assumes these system values are floats. To resolve the
7044 * inconsistency, we insert a U2F.
7045 */
7046 struct st_context *st = st_context(ctx);
7047 struct pipe_screen *pscreen = st->screen;
7048 assert(procType == PIPE_SHADER_VERTEX);
7049 assert(pscreen->get_shader_param(pscreen, PIPE_SHADER_VERTEX, PIPE_SHADER_CAP_INTEGERS));
7050 (void) pscreen;
7051 if (!ctx->Const.NativeIntegers) {
7052 struct ureg_dst temp = ureg_DECL_local_temporary(t->ureg);
7053 ureg_U2F(t->ureg, ureg_writemask(temp, TGSI_WRITEMASK_X),
7054 t->systemValues[i]);
7055 t->systemValues[i] = ureg_scalar(ureg_src(temp), 0);
7056 }
7057 }
7058
7059 if (procType == PIPE_SHADER_FRAGMENT &&
7060 semName == TGSI_SEMANTIC_POSITION)
7061 emit_wpos(st_context(ctx), t, proginfo, ureg,
7062 program->wpos_transform_const);
7063
7064 if (procType == PIPE_SHADER_FRAGMENT &&
7065 semName == TGSI_SEMANTIC_SAMPLEPOS)
7066 emit_samplepos_adjustment(t, program->wpos_transform_const);
7067 }
7068
7069 t->array_sizes = program->array_sizes;
7070 t->input_decls = program->inputs;
7071 t->num_input_decls = program->num_inputs;
7072 t->output_decls = program->outputs;
7073 t->num_output_decls = program->num_outputs;
7074
7075 /* Emit constants and uniforms. TGSI uses a single index space for these,
7076 * so we put all the translated regs in t->constants.
7077 */
7078 if (proginfo->Parameters) {
7079 t->constants = (struct ureg_src *)
7080 calloc(proginfo->Parameters->NumParameters, sizeof(t->constants[0]));
7081 if (t->constants == NULL) {
7082 ret = PIPE_ERROR_OUT_OF_MEMORY;
7083 goto out;
7084 }
7085 t->num_constants = proginfo->Parameters->NumParameters;
7086
7087 for (i = 0; i < proginfo->Parameters->NumParameters; i++) {
7088 unsigned pvo = proginfo->Parameters->Parameters[i].ValueOffset;
7089
7090 switch (proginfo->Parameters->Parameters[i].Type) {
7091 case PROGRAM_STATE_VAR:
7092 case PROGRAM_UNIFORM:
7093 t->constants[i] = ureg_DECL_constant(ureg, i);
7094 break;
7095
7096 /* Emit immediates for PROGRAM_CONSTANT only when there's no indirect
7097 * addressing of the const buffer.
7098 * FIXME: Be smarter and recognize param arrays:
7099 * indirect addressing is only valid within the referenced
7100 * array.
7101 */
7102 case PROGRAM_CONSTANT:
7103 if (program->indirect_addr_consts)
7104 t->constants[i] = ureg_DECL_constant(ureg, i);
7105 else
7106 t->constants[i] = emit_immediate(t,
7107 proginfo->Parameters->ParameterValues + pvo,
7108 proginfo->Parameters->Parameters[i].DataType,
7109 4);
7110 break;
7111 default:
7112 break;
7113 }
7114 }
7115 }
7116
7117 for (i = 0; i < proginfo->info.num_ubos; i++) {
7118 unsigned size = proginfo->sh.UniformBlocks[i]->UniformBufferSize;
7119 unsigned num_const_vecs = (size + 15) / 16;
7120 unsigned first, last;
7121 assert(num_const_vecs > 0);
7122 first = 0;
7123 last = num_const_vecs > 0 ? num_const_vecs - 1 : 0;
7124 ureg_DECL_constant2D(t->ureg, first, last, i + 1);
7125 }
7126
7127 /* Emit immediate values.
7128 */
7129 t->immediates = (struct ureg_src *)
7130 calloc(program->num_immediates, sizeof(struct ureg_src));
7131 if (t->immediates == NULL) {
7132 ret = PIPE_ERROR_OUT_OF_MEMORY;
7133 goto out;
7134 }
7135 t->num_immediates = program->num_immediates;
7136
7137 i = 0;
7138 foreach_in_list(immediate_storage, imm, &program->immediates) {
7139 assert(i < program->num_immediates);
7140 t->immediates[i++] = emit_immediate(t, imm->values, imm->type, imm->size32);
7141 }
7142 assert(i == program->num_immediates);
7143
7144 /* texture samplers */
7145 for (i = 0; i < prog_const->MaxTextureImageUnits; i++) {
7146 if (program->samplers_used & (1u << i)) {
7147 enum tgsi_return_type type =
7148 st_translate_texture_type(program->sampler_types[i]);
7149
7150 t->samplers[i] = ureg_DECL_sampler(ureg, i);
7151
7152 ureg_DECL_sampler_view(ureg, i, program->sampler_targets[i],
7153 type, type, type, type);
7154 }
7155 }
7156
7157 /* Declare atomic and shader storage buffers. */
7158 {
7159 struct gl_program *prog = program->prog;
7160
7161 if (!st_context(ctx)->has_hw_atomics) {
7162 for (i = 0; i < prog->info.num_abos; i++) {
7163 unsigned index = (prog->info.num_ssbos +
7164 prog->sh.AtomicBuffers[i]->Binding);
7165 assert(prog->sh.AtomicBuffers[i]->Binding <
7166 prog_const->MaxAtomicBuffers);
7167 t->buffers[index] = ureg_DECL_buffer(ureg, index, true);
7168 }
7169 } else {
7170 for (i = 0; i < program->num_atomics; i++) {
7171 struct hwatomic_decl *ainfo = &program->atomic_info[i];
7172 gl_uniform_storage *uni_storage = &prog->sh.data->UniformStorage[ainfo->location];
7173 int base = uni_storage->offset / ATOMIC_COUNTER_SIZE;
7174 ureg_DECL_hw_atomic(ureg, base, base + ainfo->size - 1, ainfo->binding,
7175 ainfo->array_id);
7176 }
7177 }
7178
7179 assert(prog->info.num_ssbos <= prog_const->MaxShaderStorageBlocks);
7180 for (i = 0; i < prog->info.num_ssbos; i++) {
7181 t->buffers[i] = ureg_DECL_buffer(ureg, i, false);
7182 }
7183 }
7184
7185 if (program->use_shared_memory)
7186 t->shared_memory = ureg_DECL_memory(ureg, TGSI_MEMORY_TYPE_SHARED);
7187
7188 for (i = 0; i < program->shader->Program->info.num_images; i++) {
7189 if (program->images_used & (1 << i)) {
7190 t->images[i] = ureg_DECL_image(ureg, i,
7191 program->image_targets[i],
7192 program->image_formats[i],
7193 program->image_wr[i],
7194 false);
7195 }
7196 }
7197
7198 /* Emit each instruction in turn:
7199 */
7200 foreach_in_list(glsl_to_tgsi_instruction, inst, &program->instructions)
7201 compile_tgsi_instruction(t, inst);
7202
7203 out:
7204 if (t) {
7205 free(t->arrays);
7206 free(t->temps);
7207 free(t->constants);
7208 t->num_constants = 0;
7209 free(t->immediates);
7210 t->num_immediates = 0;
7211 FREE(t);
7212 }
7213
7214 return ret;
7215 }
7216 /* ----------------------------- End TGSI code ------------------------------ */
7217
7218
7219 /**
7220 * Convert a shader's GLSL IR into a Mesa gl_program, although without
7221 * generating Mesa IR.
7222 */
7223 static struct gl_program *
get_mesa_program_tgsi(struct gl_context * ctx,struct gl_shader_program * shader_program,struct gl_linked_shader * shader)7224 get_mesa_program_tgsi(struct gl_context *ctx,
7225 struct gl_shader_program *shader_program,
7226 struct gl_linked_shader *shader)
7227 {
7228 glsl_to_tgsi_visitor* v;
7229 struct gl_program *prog;
7230 struct gl_shader_compiler_options *options =
7231 &ctx->Const.ShaderCompilerOptions[shader->Stage];
7232 struct pipe_screen *pscreen = st_context(ctx)->screen;
7233 enum pipe_shader_type ptarget = pipe_shader_type_from_mesa(shader->Stage);
7234 unsigned skip_merge_registers;
7235
7236 validate_ir_tree(shader->ir);
7237
7238 prog = shader->Program;
7239
7240 prog->Parameters = _mesa_new_parameter_list();
7241 v = new glsl_to_tgsi_visitor();
7242 v->ctx = ctx;
7243 v->prog = prog;
7244 v->shader_program = shader_program;
7245 v->shader = shader;
7246 v->options = options;
7247 v->native_integers = ctx->Const.NativeIntegers;
7248
7249 v->have_sqrt = pscreen->get_shader_param(pscreen, ptarget,
7250 PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED);
7251 v->have_fma = pscreen->get_shader_param(pscreen, ptarget,
7252 PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED);
7253 v->has_tex_txf_lz = pscreen->get_param(pscreen,
7254 PIPE_CAP_TGSI_TEX_TXF_LZ);
7255
7256 v->tg4_component_in_swizzle = pscreen->get_param(pscreen, PIPE_CAP_TGSI_TG4_COMPONENT_IN_SWIZZLE);
7257 v->variables = _mesa_hash_table_create(v->mem_ctx, _mesa_hash_pointer,
7258 _mesa_key_pointer_equal);
7259 skip_merge_registers =
7260 pscreen->get_shader_param(pscreen, ptarget,
7261 PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS);
7262
7263 generate_parameters_list_for_uniforms(ctx, shader_program, shader,
7264 prog->Parameters);
7265
7266 /* Remove reads from output registers. */
7267 if (!pscreen->get_param(pscreen, PIPE_CAP_SHADER_CAN_READ_OUTPUTS))
7268 lower_output_reads(shader->Stage, shader->ir);
7269
7270 /* Emit intermediate IR for main(). */
7271 visit_exec_list(shader->ir, v);
7272
7273 #if 0
7274 /* Print out some information (for debugging purposes) used by the
7275 * optimization passes. */
7276 {
7277 int i;
7278 int *first_writes = ralloc_array(v->mem_ctx, int, v->next_temp);
7279 int *first_reads = ralloc_array(v->mem_ctx, int, v->next_temp);
7280 int *last_writes = ralloc_array(v->mem_ctx, int, v->next_temp);
7281 int *last_reads = ralloc_array(v->mem_ctx, int, v->next_temp);
7282
7283 for (i = 0; i < v->next_temp; i++) {
7284 first_writes[i] = -1;
7285 first_reads[i] = -1;
7286 last_writes[i] = -1;
7287 last_reads[i] = -1;
7288 }
7289 v->get_first_temp_read(first_reads);
7290 v->get_last_temp_read_first_temp_write(last_reads, first_writes);
7291 v->get_last_temp_write(last_writes);
7292 for (i = 0; i < v->next_temp; i++)
7293 printf("Temp %d: FR=%3d FW=%3d LR=%3d LW=%3d\n", i, first_reads[i],
7294 first_writes[i],
7295 last_reads[i],
7296 last_writes[i]);
7297 ralloc_free(first_writes);
7298 ralloc_free(first_reads);
7299 ralloc_free(last_writes);
7300 ralloc_free(last_reads);
7301 }
7302 #endif
7303
7304 /* Perform optimizations on the instructions in the glsl_to_tgsi_visitor. */
7305 v->simplify_cmp();
7306 v->copy_propagate();
7307
7308 while (v->eliminate_dead_code());
7309
7310 v->merge_two_dsts();
7311
7312 if (!skip_merge_registers) {
7313 v->split_arrays();
7314 v->copy_propagate();
7315 while (v->eliminate_dead_code());
7316
7317 v->merge_registers();
7318 v->copy_propagate();
7319 while (v->eliminate_dead_code());
7320 }
7321
7322 v->renumber_registers();
7323
7324 /* Write the END instruction. */
7325 v->emit_asm(NULL, TGSI_OPCODE_END);
7326
7327 if (ctx->_Shader->Flags & GLSL_DUMP) {
7328 _mesa_log("\n");
7329 _mesa_log("GLSL IR for linked %s program %d:\n",
7330 _mesa_shader_stage_to_string(shader->Stage),
7331 shader_program->Name);
7332 _mesa_print_ir(_mesa_get_log_file(), shader->ir, NULL);
7333 _mesa_log("\n\n");
7334 }
7335
7336 do_set_program_inouts(shader->ir, prog, shader->Stage);
7337
7338 _mesa_copy_linked_program_data(shader_program, shader);
7339
7340 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_SKIP_SHRINK_IO_ARRAYS)) {
7341 mark_array_io(v->inputs, v->num_inputs,
7342 &prog->info.inputs_read,
7343 prog->DualSlotInputs,
7344 &prog->info.patch_inputs_read);
7345
7346 mark_array_io(v->outputs, v->num_outputs,
7347 &prog->info.outputs_written, 0ULL,
7348 &prog->info.patch_outputs_written);
7349 } else {
7350 shrink_array_declarations(v->inputs, v->num_inputs,
7351 &prog->info.inputs_read,
7352 prog->DualSlotInputs,
7353 &prog->info.patch_inputs_read);
7354 shrink_array_declarations(v->outputs, v->num_outputs,
7355 &prog->info.outputs_written, 0ULL,
7356 &prog->info.patch_outputs_written);
7357 }
7358
7359 count_resources(v, prog);
7360
7361 /* The GLSL IR won't be needed anymore. */
7362 ralloc_free(shader->ir);
7363 shader->ir = NULL;
7364
7365 /* This must be done before the uniform storage is associated. */
7366 if (shader->Stage == MESA_SHADER_FRAGMENT &&
7367 (prog->info.inputs_read & VARYING_BIT_POS ||
7368 BITSET_TEST(prog->info.system_values_read, SYSTEM_VALUE_FRAG_COORD) ||
7369 BITSET_TEST(prog->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS))) {
7370 static const gl_state_index16 wposTransformState[STATE_LENGTH] = {
7371 STATE_FB_WPOS_Y_TRANSFORM
7372 };
7373
7374 v->wpos_transform_const = _mesa_add_state_reference(prog->Parameters,
7375 wposTransformState);
7376 }
7377
7378 /* Avoid reallocation of the program parameter list, because the uniform
7379 * storage is only associated with the original parameter list.
7380 * This should be enough for Bitmap and DrawPixels constants.
7381 */
7382 _mesa_ensure_and_associate_uniform_storage(ctx, shader_program, prog, 8);
7383 if (!shader_program->data->LinkStatus) {
7384 free_glsl_to_tgsi_visitor(v);
7385 _mesa_reference_program(ctx, &shader->Program, NULL);
7386 return NULL;
7387 }
7388
7389
7390 prog->glsl_to_tgsi = v;
7391
7392 PRINT_STATS(v->print_stats());
7393
7394 return prog;
7395 }
7396
7397 /* See if there are unsupported control flow statements. */
7398 class ir_control_flow_info_visitor : public ir_hierarchical_visitor {
7399 private:
7400 const struct gl_shader_compiler_options *options;
7401 public:
ir_control_flow_info_visitor(const struct gl_shader_compiler_options * options)7402 ir_control_flow_info_visitor(const struct gl_shader_compiler_options *options)
7403 : options(options),
7404 unsupported(false)
7405 {
7406 }
7407
visit_enter(ir_function * ir)7408 virtual ir_visitor_status visit_enter(ir_function *ir)
7409 {
7410 /* Other functions are skipped (same as glsl_to_tgsi). */
7411 if (strcmp(ir->name, "main") == 0)
7412 return visit_continue;
7413
7414 return visit_continue_with_parent;
7415 }
7416
visit_enter(ir_call * ir)7417 virtual ir_visitor_status visit_enter(ir_call *ir)
7418 {
7419 if (!ir->callee->is_intrinsic()) {
7420 unsupported = true; /* it's a function call */
7421 return visit_stop;
7422 }
7423 return visit_continue;
7424 }
7425
visit_enter(ir_return * ir)7426 virtual ir_visitor_status visit_enter(ir_return *ir)
7427 {
7428 if (options->EmitNoMainReturn) {
7429 unsupported = true;
7430 return visit_stop;
7431 }
7432 return visit_continue;
7433 }
7434
7435 bool unsupported;
7436 };
7437
7438 static bool
has_unsupported_control_flow(exec_list * ir,const struct gl_shader_compiler_options * options)7439 has_unsupported_control_flow(exec_list *ir,
7440 const struct gl_shader_compiler_options *options)
7441 {
7442 ir_control_flow_info_visitor visitor(options);
7443 visit_list_elements(&visitor, ir);
7444 return visitor.unsupported;
7445 }
7446
7447 /**
7448 * Link a shader.
7449 * This actually involves converting GLSL IR into an intermediate TGSI-like IR
7450 * with code lowering and other optimizations.
7451 */
7452 GLboolean
st_link_tgsi(struct gl_context * ctx,struct gl_shader_program * prog)7453 st_link_tgsi(struct gl_context *ctx, struct gl_shader_program *prog)
7454 {
7455 struct pipe_screen *pscreen = st_context(ctx)->screen;
7456
7457 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
7458 struct gl_linked_shader *shader = prog->_LinkedShaders[i];
7459 if (shader == NULL)
7460 continue;
7461
7462 exec_list *ir = shader->ir;
7463 gl_shader_stage stage = shader->Stage;
7464 enum pipe_shader_type ptarget = pipe_shader_type_from_mesa(stage);
7465 const struct gl_shader_compiler_options *options =
7466 &ctx->Const.ShaderCompilerOptions[stage];
7467
7468 unsigned if_threshold = pscreen->get_shader_param(pscreen, ptarget,
7469 PIPE_SHADER_CAP_LOWER_IF_THRESHOLD);
7470 if (ctx->Const.GLSLOptimizeConservatively) {
7471 /* Do it once and repeat only if there's unsupported control flow. */
7472 do {
7473 do_common_optimization(ir, true, true, options,
7474 ctx->Const.NativeIntegers);
7475 lower_if_to_cond_assign((gl_shader_stage)i, ir,
7476 options->MaxIfDepth, if_threshold);
7477 } while (has_unsupported_control_flow(ir, options));
7478 } else {
7479 /* Repeat it until it stops making changes. */
7480 bool progress;
7481 do {
7482 progress = do_common_optimization(ir, true, true, options,
7483 ctx->Const.NativeIntegers);
7484 progress |= lower_if_to_cond_assign((gl_shader_stage)i, ir,
7485 options->MaxIfDepth, if_threshold);
7486 } while (progress);
7487 }
7488
7489 /* Do this again to lower ir_binop_vector_extract introduced
7490 * by optimization passes.
7491 */
7492 do_vec_index_to_cond_assign(ir);
7493
7494 validate_ir_tree(ir);
7495
7496 struct gl_program *linked_prog =
7497 get_mesa_program_tgsi(ctx, prog, shader);
7498 st_set_prog_affected_state_flags(linked_prog);
7499
7500 if (linked_prog) {
7501 /* This is really conservative: */
7502 linked_prog->info.writes_memory =
7503 linked_prog->info.num_ssbos ||
7504 linked_prog->info.num_images ||
7505 ctx->Extensions.ARB_bindless_texture ||
7506 (linked_prog->sh.LinkedTransformFeedback &&
7507 linked_prog->sh.LinkedTransformFeedback->NumVarying);
7508
7509 if (!st_program_string_notify(ctx,
7510 _mesa_shader_stage_to_program(i),
7511 linked_prog)) {
7512 _mesa_reference_program(ctx, &shader->Program, NULL);
7513 return GL_FALSE;
7514 }
7515 }
7516 }
7517
7518 return GL_TRUE;
7519 }
7520