1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /**
25  * \file brw_vec4_gs_visitor.cpp
26  *
27  * Geometry-shader-specific code derived from the vec4_visitor class.
28  */
29 
30 #include "brw_vec4_gs_visitor.h"
31 #include "gfx6_gs_visitor.h"
32 #include "brw_cfg.h"
33 #include "brw_fs.h"
34 #include "brw_nir.h"
35 #include "dev/intel_debug.h"
36 
37 namespace brw {
38 
vec4_gs_visitor(const struct brw_compiler * compiler,void * log_data,struct brw_gs_compile * c,struct brw_gs_prog_data * prog_data,const nir_shader * shader,void * mem_ctx,bool no_spills,int shader_time_index,bool debug_enabled)39 vec4_gs_visitor::vec4_gs_visitor(const struct brw_compiler *compiler,
40                                  void *log_data,
41                                  struct brw_gs_compile *c,
42                                  struct brw_gs_prog_data *prog_data,
43                                  const nir_shader *shader,
44                                  void *mem_ctx,
45                                  bool no_spills,
46                                  int shader_time_index,
47                                  bool debug_enabled)
48    : vec4_visitor(compiler, log_data, &c->key.base.tex,
49                   &prog_data->base, shader,  mem_ctx,
50                   no_spills, shader_time_index, debug_enabled),
51      c(c),
52      gs_prog_data(prog_data)
53 {
54 }
55 
56 
57 static inline struct brw_reg
attribute_to_hw_reg(int attr,brw_reg_type type,bool interleaved)58 attribute_to_hw_reg(int attr, brw_reg_type type, bool interleaved)
59 {
60    struct brw_reg reg;
61 
62    unsigned width = REG_SIZE / 2 / MAX2(4, type_sz(type));
63    if (interleaved) {
64       reg = stride(brw_vecn_grf(width, attr / 2, (attr % 2) * 4), 0, width, 1);
65    } else {
66       reg = brw_vecn_grf(width, attr, 0);
67    }
68 
69    reg.type = type;
70    return reg;
71 }
72 
73 /**
74  * Replace each register of type ATTR in this->instructions with a reference
75  * to a fixed HW register.
76  *
77  * If interleaved is true, then each attribute takes up half a register, with
78  * register N containing attribute 2*N in its first half and attribute 2*N+1
79  * in its second half (this corresponds to the payload setup used by geometry
80  * shaders in "single" or "dual instanced" dispatch mode).  If interleaved is
81  * false, then each attribute takes up a whole register, with register N
82  * containing attribute N (this corresponds to the payload setup used by
83  * vertex shaders, and by geometry shaders in "dual object" dispatch mode).
84  */
85 int
setup_varying_inputs(int payload_reg,int attributes_per_reg)86 vec4_gs_visitor::setup_varying_inputs(int payload_reg,
87                                       int attributes_per_reg)
88 {
89    /* For geometry shaders there are N copies of the input attributes, where N
90     * is the number of input vertices.  attribute_map[BRW_VARYING_SLOT_COUNT *
91     * i + j] represents attribute j for vertex i.
92     *
93     * Note that GS inputs are read from the VUE 256 bits (2 vec4's) at a time,
94     * so the total number of input slots that will be delivered to the GS (and
95     * thus the stride of the input arrays) is urb_read_length * 2.
96     */
97    const unsigned num_input_vertices = nir->info.gs.vertices_in;
98    assert(num_input_vertices <= MAX_GS_INPUT_VERTICES);
99    unsigned input_array_stride = prog_data->urb_read_length * 2;
100 
101    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
102       for (int i = 0; i < 3; i++) {
103          if (inst->src[i].file != ATTR)
104             continue;
105 
106          assert(inst->src[i].offset % REG_SIZE == 0);
107          int grf = payload_reg * attributes_per_reg +
108                    inst->src[i].nr + inst->src[i].offset / REG_SIZE;
109 
110          struct brw_reg reg =
111             attribute_to_hw_reg(grf, inst->src[i].type, attributes_per_reg > 1);
112          reg.swizzle = inst->src[i].swizzle;
113          if (inst->src[i].abs)
114             reg = brw_abs(reg);
115          if (inst->src[i].negate)
116             reg = negate(reg);
117 
118          inst->src[i] = reg;
119       }
120    }
121 
122    int regs_used = ALIGN(input_array_stride * num_input_vertices,
123                          attributes_per_reg) / attributes_per_reg;
124    return payload_reg + regs_used;
125 }
126 
127 void
setup_payload()128 vec4_gs_visitor::setup_payload()
129 {
130    /* If we are in dual instanced or single mode, then attributes are going
131     * to be interleaved, so one register contains two attribute slots.
132     */
133    int attributes_per_reg =
134       prog_data->dispatch_mode == DISPATCH_MODE_4X2_DUAL_OBJECT ? 1 : 2;
135 
136    int reg = 0;
137 
138    /* The payload always contains important data in r0, which contains
139     * the URB handles that are passed on to the URB write at the end
140     * of the thread.
141     */
142    reg++;
143 
144    /* If the shader uses gl_PrimitiveIDIn, that goes in r1. */
145    if (gs_prog_data->include_primitive_id)
146       reg++;
147 
148    reg = setup_uniforms(reg);
149 
150    reg = setup_varying_inputs(reg, attributes_per_reg);
151 
152    this->first_non_payload_grf = reg;
153 }
154 
155 
156 void
emit_prolog()157 vec4_gs_visitor::emit_prolog()
158 {
159    /* In vertex shaders, r0.2 is guaranteed to be initialized to zero.  In
160     * geometry shaders, it isn't (it contains a bunch of information we don't
161     * need, like the input primitive type).  We need r0.2 to be zero in order
162     * to build scratch read/write messages correctly (otherwise this value
163     * will be interpreted as a global offset, causing us to do our scratch
164     * reads/writes to garbage memory).  So just set it to zero at the top of
165     * the shader.
166     */
167    this->current_annotation = "clear r0.2";
168    dst_reg r0(retype(brw_vec4_grf(0, 0), BRW_REGISTER_TYPE_UD));
169    vec4_instruction *inst = emit(GS_OPCODE_SET_DWORD_2, r0, brw_imm_ud(0u));
170    inst->force_writemask_all = true;
171 
172    /* Create a virtual register to hold the vertex count */
173    this->vertex_count = src_reg(this, glsl_type::uint_type);
174 
175    /* Initialize the vertex_count register to 0 */
176    this->current_annotation = "initialize vertex_count";
177    inst = emit(MOV(dst_reg(this->vertex_count), brw_imm_ud(0u)));
178    inst->force_writemask_all = true;
179 
180    if (c->control_data_header_size_bits > 0) {
181       /* Create a virtual register to hold the current set of control data
182        * bits.
183        */
184       this->control_data_bits = src_reg(this, glsl_type::uint_type);
185 
186       /* If we're outputting more than 32 control data bits, then EmitVertex()
187        * will set control_data_bits to 0 after emitting the first vertex.
188        * Otherwise, we need to initialize it to 0 here.
189        */
190       if (c->control_data_header_size_bits <= 32) {
191          this->current_annotation = "initialize control data bits";
192          inst = emit(MOV(dst_reg(this->control_data_bits), brw_imm_ud(0u)));
193          inst->force_writemask_all = true;
194       }
195    }
196 
197    this->current_annotation = NULL;
198 }
199 
200 void
emit_thread_end()201 vec4_gs_visitor::emit_thread_end()
202 {
203    if (c->control_data_header_size_bits > 0) {
204       /* During shader execution, we only ever call emit_control_data_bits()
205        * just prior to outputting a vertex.  Therefore, the control data bits
206        * corresponding to the most recently output vertex still need to be
207        * emitted.
208        */
209       current_annotation = "thread end: emit control data bits";
210       emit_control_data_bits();
211    }
212 
213    /* MRF 0 is reserved for the debugger, so start with message header
214     * in MRF 1.
215     */
216    int base_mrf = 1;
217 
218    current_annotation = "thread end";
219    dst_reg mrf_reg(MRF, base_mrf);
220    src_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
221    vec4_instruction *inst = emit(MOV(mrf_reg, r0));
222    inst->force_writemask_all = true;
223    emit(GS_OPCODE_SET_VERTEX_COUNT, mrf_reg, this->vertex_count);
224    if (INTEL_DEBUG(DEBUG_SHADER_TIME))
225       emit_shader_time_end();
226    inst = emit(GS_OPCODE_THREAD_END);
227    inst->base_mrf = base_mrf;
228    inst->mlen = 1;
229 }
230 
231 
232 void
emit_urb_write_header(int mrf)233 vec4_gs_visitor::emit_urb_write_header(int mrf)
234 {
235    /* The SEND instruction that writes the vertex data to the VUE will use
236     * per_slot_offset=true, which means that DWORDs 3 and 4 of the message
237     * header specify an offset (in multiples of 256 bits) into the URB entry
238     * at which the write should take place.
239     *
240     * So we have to prepare a message header with the appropriate offset
241     * values.
242     */
243    dst_reg mrf_reg(MRF, mrf);
244    src_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
245    this->current_annotation = "URB write header";
246    vec4_instruction *inst = emit(MOV(mrf_reg, r0));
247    inst->force_writemask_all = true;
248    emit(GS_OPCODE_SET_WRITE_OFFSET, mrf_reg, this->vertex_count,
249         brw_imm_ud(gs_prog_data->output_vertex_size_hwords));
250 }
251 
252 
253 vec4_instruction *
emit_urb_write_opcode(bool complete)254 vec4_gs_visitor::emit_urb_write_opcode(bool complete)
255 {
256    /* We don't care whether the vertex is complete, because in general
257     * geometry shaders output multiple vertices, and we don't terminate the
258     * thread until all vertices are complete.
259     */
260    (void) complete;
261 
262    vec4_instruction *inst = emit(GS_OPCODE_URB_WRITE);
263    inst->offset = gs_prog_data->control_data_header_size_hwords;
264 
265    inst->urb_write_flags = BRW_URB_WRITE_PER_SLOT_OFFSET;
266    return inst;
267 }
268 
269 
270 /**
271  * Write out a batch of 32 control data bits from the control_data_bits
272  * register to the URB.
273  *
274  * The current value of the vertex_count register determines which DWORD in
275  * the URB receives the control data bits.  The control_data_bits register is
276  * assumed to contain the correct data for the vertex that was most recently
277  * output, and all previous vertices that share the same DWORD.
278  *
279  * This function takes care of ensuring that if no vertices have been output
280  * yet, no control bits are emitted.
281  */
282 void
emit_control_data_bits()283 vec4_gs_visitor::emit_control_data_bits()
284 {
285    assert(c->control_data_bits_per_vertex != 0);
286 
287    /* Since the URB_WRITE_OWORD message operates with 128-bit (vec4 sized)
288     * granularity, we need to use two tricks to ensure that the batch of 32
289     * control data bits is written to the appropriate DWORD in the URB.  To
290     * select which vec4 we are writing to, we use the "slot {0,1} offset"
291     * fields of the message header.  To select which DWORD in the vec4 we are
292     * writing to, we use the channel mask fields of the message header.  To
293     * avoid penalizing geometry shaders that emit a small number of vertices
294     * with extra bookkeeping, we only do each of these tricks when
295     * c->prog_data.control_data_header_size_bits is large enough to make it
296     * necessary.
297     *
298     * Note: this means that if we're outputting just a single DWORD of control
299     * data bits, we'll actually replicate it four times since we won't do any
300     * channel masking.  But that's not a problem since in this case the
301     * hardware only pays attention to the first DWORD.
302     */
303    enum brw_urb_write_flags urb_write_flags = BRW_URB_WRITE_OWORD;
304    if (c->control_data_header_size_bits > 32)
305       urb_write_flags = urb_write_flags | BRW_URB_WRITE_USE_CHANNEL_MASKS;
306    if (c->control_data_header_size_bits > 128)
307       urb_write_flags = urb_write_flags | BRW_URB_WRITE_PER_SLOT_OFFSET;
308 
309    /* If we are using either channel masks or a per-slot offset, then we
310     * need to figure out which DWORD we are trying to write to, using the
311     * formula:
312     *
313     *     dword_index = (vertex_count - 1) * bits_per_vertex / 32
314     *
315     * Since bits_per_vertex is a power of two, and is known at compile
316     * time, this can be optimized to:
317     *
318     *     dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex))
319     */
320    src_reg dword_index(this, glsl_type::uint_type);
321    if (urb_write_flags) {
322       src_reg prev_count(this, glsl_type::uint_type);
323       emit(ADD(dst_reg(prev_count), this->vertex_count,
324                brw_imm_ud(0xffffffffu)));
325       unsigned log2_bits_per_vertex =
326          util_last_bit(c->control_data_bits_per_vertex);
327       emit(SHR(dst_reg(dword_index), prev_count,
328                brw_imm_ud(6 - log2_bits_per_vertex)));
329    }
330 
331    /* Start building the URB write message.  The first MRF gets a copy of
332     * R0.
333     */
334    int base_mrf = 1;
335    dst_reg mrf_reg(MRF, base_mrf);
336    src_reg r0(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
337    vec4_instruction *inst = emit(MOV(mrf_reg, r0));
338    inst->force_writemask_all = true;
339 
340    if (urb_write_flags & BRW_URB_WRITE_PER_SLOT_OFFSET) {
341       /* Set the per-slot offset to dword_index / 4, to that we'll write to
342        * the appropriate OWORD within the control data header.
343        */
344       src_reg per_slot_offset(this, glsl_type::uint_type);
345       emit(SHR(dst_reg(per_slot_offset), dword_index, brw_imm_ud(2u)));
346       emit(GS_OPCODE_SET_WRITE_OFFSET, mrf_reg, per_slot_offset,
347            brw_imm_ud(1u));
348    }
349 
350    if (urb_write_flags & BRW_URB_WRITE_USE_CHANNEL_MASKS) {
351       /* Set the channel masks to 1 << (dword_index % 4), so that we'll
352        * write to the appropriate DWORD within the OWORD.  We need to do
353        * this computation with force_writemask_all, otherwise garbage data
354        * from invocation 0 might clobber the mask for invocation 1 when
355        * GS_OPCODE_PREPARE_CHANNEL_MASKS tries to OR the two masks
356        * together.
357        */
358       src_reg channel(this, glsl_type::uint_type);
359       inst = emit(AND(dst_reg(channel), dword_index, brw_imm_ud(3u)));
360       inst->force_writemask_all = true;
361       src_reg one(this, glsl_type::uint_type);
362       inst = emit(MOV(dst_reg(one), brw_imm_ud(1u)));
363       inst->force_writemask_all = true;
364       src_reg channel_mask(this, glsl_type::uint_type);
365       inst = emit(SHL(dst_reg(channel_mask), one, channel));
366       inst->force_writemask_all = true;
367       emit(GS_OPCODE_PREPARE_CHANNEL_MASKS, dst_reg(channel_mask),
368                                             channel_mask);
369       emit(GS_OPCODE_SET_CHANNEL_MASKS, mrf_reg, channel_mask);
370    }
371 
372    /* Store the control data bits in the message payload and send it. */
373    dst_reg mrf_reg2(MRF, base_mrf + 1);
374    inst = emit(MOV(mrf_reg2, this->control_data_bits));
375    inst->force_writemask_all = true;
376    inst = emit(GS_OPCODE_URB_WRITE);
377    inst->urb_write_flags = urb_write_flags;
378    inst->base_mrf = base_mrf;
379    inst->mlen = 2;
380 }
381 
382 void
set_stream_control_data_bits(unsigned stream_id)383 vec4_gs_visitor::set_stream_control_data_bits(unsigned stream_id)
384 {
385    /* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
386 
387    /* Note: we are calling this *before* increasing vertex_count, so
388     * this->vertex_count == vertex_count - 1 in the formula above.
389     */
390 
391    /* Stream mode uses 2 bits per vertex */
392    assert(c->control_data_bits_per_vertex == 2);
393 
394    /* Must be a valid stream */
395    assert(stream_id < MAX_VERTEX_STREAMS);
396 
397    /* Control data bits are initialized to 0 so we don't have to set any
398     * bits when sending vertices to stream 0.
399     */
400    if (stream_id == 0)
401       return;
402 
403    /* reg::sid = stream_id */
404    src_reg sid(this, glsl_type::uint_type);
405    emit(MOV(dst_reg(sid), brw_imm_ud(stream_id)));
406 
407    /* reg:shift_count = 2 * (vertex_count - 1) */
408    src_reg shift_count(this, glsl_type::uint_type);
409    emit(SHL(dst_reg(shift_count), this->vertex_count, brw_imm_ud(1u)));
410 
411    /* Note: we're relying on the fact that the GEN SHL instruction only pays
412     * attention to the lower 5 bits of its second source argument, so on this
413     * architecture, stream_id << 2 * (vertex_count - 1) is equivalent to
414     * stream_id << ((2 * (vertex_count - 1)) % 32).
415     */
416    src_reg mask(this, glsl_type::uint_type);
417    emit(SHL(dst_reg(mask), sid, shift_count));
418    emit(OR(dst_reg(this->control_data_bits), this->control_data_bits, mask));
419 }
420 
421 void
gs_emit_vertex(int stream_id)422 vec4_gs_visitor::gs_emit_vertex(int stream_id)
423 {
424    this->current_annotation = "emit vertex: safety check";
425 
426    /* Haswell and later hardware ignores the "Render Stream Select" bits
427     * from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled,
428     * and instead sends all primitives down the pipeline for rasterization.
429     * If the SOL stage is enabled, "Render Stream Select" is honored and
430     * primitives bound to non-zero streams are discarded after stream output.
431     *
432     * Since the only purpose of primives sent to non-zero streams is to
433     * be recorded by transform feedback, we can simply discard all geometry
434     * bound to these streams when transform feedback is disabled.
435     */
436    if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
437       return;
438 
439    /* If we're outputting 32 control data bits or less, then we can wait
440     * until the shader is over to output them all.  Otherwise we need to
441     * output them as we go.  Now is the time to do it, since we're about to
442     * output the vertex_count'th vertex, so it's guaranteed that the
443     * control data bits associated with the (vertex_count - 1)th vertex are
444     * correct.
445     */
446    if (c->control_data_header_size_bits > 32) {
447       this->current_annotation = "emit vertex: emit control data bits";
448       /* Only emit control data bits if we've finished accumulating a batch
449        * of 32 bits.  This is the case when:
450        *
451        *     (vertex_count * bits_per_vertex) % 32 == 0
452        *
453        * (in other words, when the last 5 bits of vertex_count *
454        * bits_per_vertex are 0).  Assuming bits_per_vertex == 2^n for some
455        * integer n (which is always the case, since bits_per_vertex is
456        * always 1 or 2), this is equivalent to requiring that the last 5-n
457        * bits of vertex_count are 0:
458        *
459        *     vertex_count & (2^(5-n) - 1) == 0
460        *
461        * 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is
462        * equivalent to:
463        *
464        *     vertex_count & (32 / bits_per_vertex - 1) == 0
465        */
466       vec4_instruction *inst =
467          emit(AND(dst_null_ud(), this->vertex_count,
468                   brw_imm_ud(32 / c->control_data_bits_per_vertex - 1)));
469       inst->conditional_mod = BRW_CONDITIONAL_Z;
470 
471       emit(IF(BRW_PREDICATE_NORMAL));
472       {
473          /* If vertex_count is 0, then no control data bits have been
474           * accumulated yet, so we skip emitting them.
475           */
476          emit(CMP(dst_null_ud(), this->vertex_count, brw_imm_ud(0u),
477                   BRW_CONDITIONAL_NEQ));
478          emit(IF(BRW_PREDICATE_NORMAL));
479          emit_control_data_bits();
480          emit(BRW_OPCODE_ENDIF);
481 
482          /* Reset control_data_bits to 0 so we can start accumulating a new
483           * batch.
484           *
485           * Note: in the case where vertex_count == 0, this neutralizes the
486           * effect of any call to EndPrimitive() that the shader may have
487           * made before outputting its first vertex.
488           */
489          inst = emit(MOV(dst_reg(this->control_data_bits), brw_imm_ud(0u)));
490          inst->force_writemask_all = true;
491       }
492       emit(BRW_OPCODE_ENDIF);
493    }
494 
495    this->current_annotation = "emit vertex: vertex data";
496    emit_vertex();
497 
498    /* In stream mode we have to set control data bits for all vertices
499     * unless we have disabled control data bits completely (which we do
500     * do for GL_POINTS outputs that don't use streams).
501     */
502    if (c->control_data_header_size_bits > 0 &&
503        gs_prog_data->control_data_format ==
504           GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) {
505        this->current_annotation = "emit vertex: Stream control data bits";
506        set_stream_control_data_bits(stream_id);
507    }
508 
509    this->current_annotation = NULL;
510 }
511 
512 void
gs_end_primitive()513 vec4_gs_visitor::gs_end_primitive()
514 {
515    /* We can only do EndPrimitive() functionality when the control data
516     * consists of cut bits.  Fortunately, the only time it isn't is when the
517     * output type is points, in which case EndPrimitive() is a no-op.
518     */
519    if (gs_prog_data->control_data_format !=
520        GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) {
521       return;
522    }
523 
524    if (c->control_data_header_size_bits == 0)
525       return;
526 
527    /* Cut bits use one bit per vertex. */
528    assert(c->control_data_bits_per_vertex == 1);
529 
530    /* Cut bit n should be set to 1 if EndPrimitive() was called after emitting
531     * vertex n, 0 otherwise.  So all we need to do here is mark bit
532     * (vertex_count - 1) % 32 in the cut_bits register to indicate that
533     * EndPrimitive() was called after emitting vertex (vertex_count - 1);
534     * vec4_gs_visitor::emit_control_data_bits() will take care of the rest.
535     *
536     * Note that if EndPrimitve() is called before emitting any vertices, this
537     * will cause us to set bit 31 of the control_data_bits register to 1.
538     * That's fine because:
539     *
540     * - If max_vertices < 32, then vertex number 31 (zero-based) will never be
541     *   output, so the hardware will ignore cut bit 31.
542     *
543     * - If max_vertices == 32, then vertex number 31 is guaranteed to be the
544     *   last vertex, so setting cut bit 31 has no effect (since the primitive
545     *   is automatically ended when the GS terminates).
546     *
547     * - If max_vertices > 32, then the ir_emit_vertex visitor will reset the
548     *   control_data_bits register to 0 when the first vertex is emitted.
549     */
550 
551    /* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
552    src_reg one(this, glsl_type::uint_type);
553    emit(MOV(dst_reg(one), brw_imm_ud(1u)));
554    src_reg prev_count(this, glsl_type::uint_type);
555    emit(ADD(dst_reg(prev_count), this->vertex_count, brw_imm_ud(0xffffffffu)));
556    src_reg mask(this, glsl_type::uint_type);
557    /* Note: we're relying on the fact that the GEN SHL instruction only pays
558     * attention to the lower 5 bits of its second source argument, so on this
559     * architecture, 1 << (vertex_count - 1) is equivalent to 1 <<
560     * ((vertex_count - 1) % 32).
561     */
562    emit(SHL(dst_reg(mask), one, prev_count));
563    emit(OR(dst_reg(this->control_data_bits), this->control_data_bits, mask));
564 }
565 
566 static const GLuint gl_prim_to_hw_prim[GL_TRIANGLE_STRIP_ADJACENCY+1] = {
567    [GL_POINTS] =_3DPRIM_POINTLIST,
568    [GL_LINES] = _3DPRIM_LINELIST,
569    [GL_LINE_LOOP] = _3DPRIM_LINELOOP,
570    [GL_LINE_STRIP] = _3DPRIM_LINESTRIP,
571    [GL_TRIANGLES] = _3DPRIM_TRILIST,
572    [GL_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
573    [GL_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
574    [GL_QUADS] = _3DPRIM_QUADLIST,
575    [GL_QUAD_STRIP] = _3DPRIM_QUADSTRIP,
576    [GL_POLYGON] = _3DPRIM_POLYGON,
577    [GL_LINES_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
578    [GL_LINE_STRIP_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
579    [GL_TRIANGLES_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
580    [GL_TRIANGLE_STRIP_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
581 };
582 
583 } /* namespace brw */
584 
585 extern "C" const unsigned *
brw_compile_gs(const struct brw_compiler * compiler,void * log_data,void * mem_ctx,const struct brw_gs_prog_key * key,struct brw_gs_prog_data * prog_data,nir_shader * nir,int shader_time_index,struct brw_compile_stats * stats,char ** error_str)586 brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
587                void *mem_ctx,
588                const struct brw_gs_prog_key *key,
589                struct brw_gs_prog_data *prog_data,
590                nir_shader *nir,
591                int shader_time_index,
592                struct brw_compile_stats *stats,
593                char **error_str)
594 {
595    struct brw_gs_compile c;
596    memset(&c, 0, sizeof(c));
597    c.key = *key;
598 
599    const bool is_scalar = compiler->scalar_stage[MESA_SHADER_GEOMETRY];
600    const bool debug_enabled = INTEL_DEBUG(DEBUG_GS);
601 
602    prog_data->base.base.stage = MESA_SHADER_GEOMETRY;
603 
604    /* The GLSL linker will have already matched up GS inputs and the outputs
605     * of prior stages.  The driver does extend VS outputs in some cases, but
606     * only for legacy OpenGL or Gfx4-5 hardware, neither of which offer
607     * geometry shader support.  So we can safely ignore that.
608     *
609     * For SSO pipelines, we use a fixed VUE map layout based on variable
610     * locations, so we can rely on rendezvous-by-location making this work.
611     */
612    GLbitfield64 inputs_read = nir->info.inputs_read;
613    brw_compute_vue_map(compiler->devinfo,
614                        &c.input_vue_map, inputs_read,
615                        nir->info.separate_shader, 1);
616 
617    brw_nir_apply_key(nir, compiler, &key->base, 8, is_scalar);
618    brw_nir_lower_vue_inputs(nir, &c.input_vue_map);
619    brw_nir_lower_vue_outputs(nir);
620    brw_postprocess_nir(nir, compiler, is_scalar, debug_enabled,
621                        key->base.robust_buffer_access);
622 
623    prog_data->base.clip_distance_mask =
624       ((1 << nir->info.clip_distance_array_size) - 1);
625    prog_data->base.cull_distance_mask =
626       ((1 << nir->info.cull_distance_array_size) - 1) <<
627       nir->info.clip_distance_array_size;
628 
629    prog_data->include_primitive_id =
630       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_PRIMITIVE_ID);
631 
632    prog_data->invocations = nir->info.gs.invocations;
633 
634    if (compiler->devinfo->ver >= 8)
635       nir_gs_count_vertices_and_primitives(
636          nir, &prog_data->static_vertex_count, nullptr, 1u);
637 
638    if (compiler->devinfo->ver >= 7) {
639       if (nir->info.gs.output_primitive == GL_POINTS) {
640          /* When the output type is points, the geometry shader may output data
641           * to multiple streams, and EndPrimitive() has no effect.  So we
642           * configure the hardware to interpret the control data as stream ID.
643           */
644          prog_data->control_data_format = GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_SID;
645 
646          /* We only have to emit control bits if we are using non-zero streams */
647          if (nir->info.gs.active_stream_mask != (1 << 0))
648             c.control_data_bits_per_vertex = 2;
649          else
650             c.control_data_bits_per_vertex = 0;
651       } else {
652          /* When the output type is triangle_strip or line_strip, EndPrimitive()
653           * may be used to terminate the current strip and start a new one
654           * (similar to primitive restart), and outputting data to multiple
655           * streams is not supported.  So we configure the hardware to interpret
656           * the control data as EndPrimitive information (a.k.a. "cut bits").
657           */
658          prog_data->control_data_format = GFX7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT;
659 
660          /* We only need to output control data if the shader actually calls
661           * EndPrimitive().
662           */
663          c.control_data_bits_per_vertex =
664             nir->info.gs.uses_end_primitive ? 1 : 0;
665       }
666    } else {
667       /* There are no control data bits in gfx6. */
668       c.control_data_bits_per_vertex = 0;
669    }
670    c.control_data_header_size_bits =
671       nir->info.gs.vertices_out * c.control_data_bits_per_vertex;
672 
673    /* 1 HWORD = 32 bytes = 256 bits */
674    prog_data->control_data_header_size_hwords =
675       ALIGN(c.control_data_header_size_bits, 256) / 256;
676 
677    /* Compute the output vertex size.
678     *
679     * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 STATE_GS - Output Vertex
680     * Size (p168):
681     *
682     *     [0,62] indicating [1,63] 16B units
683     *
684     *     Specifies the size of each vertex stored in the GS output entry
685     *     (following any Control Header data) as a number of 128-bit units
686     *     (minus one).
687     *
688     *     Programming Restrictions: The vertex size must be programmed as a
689     *     multiple of 32B units with the following exception: Rendering is
690     *     disabled (as per SOL stage state) and the vertex size output by the
691     *     GS thread is 16B.
692     *
693     *     If rendering is enabled (as per SOL state) the vertex size must be
694     *     programmed as a multiple of 32B units. In other words, the only time
695     *     software can program a vertex size with an odd number of 16B units
696     *     is when rendering is disabled.
697     *
698     * Note: B=bytes in the above text.
699     *
700     * It doesn't seem worth the extra trouble to optimize the case where the
701     * vertex size is 16B (especially since this would require special-casing
702     * the GEN assembly that writes to the URB).  So we just set the vertex
703     * size to a multiple of 32B (2 vec4's) in all cases.
704     *
705     * The maximum output vertex size is 62*16 = 992 bytes (31 hwords).  We
706     * budget that as follows:
707     *
708     *   512 bytes for varyings (a varying component is 4 bytes and
709     *             gl_MaxGeometryOutputComponents = 128)
710     *    16 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
711     *             bytes)
712     *    16 bytes overhead for gl_Position (we allocate it a slot in the VUE
713     *             even if it's not used)
714     *    32 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
715     *             whenever clip planes are enabled, even if the shader doesn't
716     *             write to gl_ClipDistance)
717     *    16 bytes overhead since the VUE size must be a multiple of 32 bytes
718     *             (see below)--this causes up to 1 VUE slot to be wasted
719     *   400 bytes available for varying packing overhead
720     *
721     * Worst-case varying packing overhead is 3/4 of a varying slot (12 bytes)
722     * per interpolation type, so this is plenty.
723     *
724     */
725    unsigned output_vertex_size_bytes = prog_data->base.vue_map.num_slots * 16;
726    assert(compiler->devinfo->ver == 6 ||
727           output_vertex_size_bytes <= GFX7_MAX_GS_OUTPUT_VERTEX_SIZE_BYTES);
728    prog_data->output_vertex_size_hwords =
729       ALIGN(output_vertex_size_bytes, 32) / 32;
730 
731    /* Compute URB entry size.  The maximum allowed URB entry size is 32k.
732     * That divides up as follows:
733     *
734     *     64 bytes for the control data header (cut indices or StreamID bits)
735     *   4096 bytes for varyings (a varying component is 4 bytes and
736     *              gl_MaxGeometryTotalOutputComponents = 1024)
737     *   4096 bytes overhead for VARYING_SLOT_PSIZ (each varying slot is 16
738     *              bytes/vertex and gl_MaxGeometryOutputVertices is 256)
739     *   4096 bytes overhead for gl_Position (we allocate it a slot in the VUE
740     *              even if it's not used)
741     *   8192 bytes overhead for gl_ClipDistance (we allocate it 2 VUE slots
742     *              whenever clip planes are enabled, even if the shader doesn't
743     *              write to gl_ClipDistance)
744     *   4096 bytes overhead since the VUE size must be a multiple of 32
745     *              bytes (see above)--this causes up to 1 VUE slot to be wasted
746     *   8128 bytes available for varying packing overhead
747     *
748     * Worst-case varying packing overhead is 3/4 of a varying slot per
749     * interpolation type, which works out to 3072 bytes, so this would allow
750     * us to accommodate 2 interpolation types without any danger of running
751     * out of URB space.
752     *
753     * In practice, the risk of running out of URB space is very small, since
754     * the above figures are all worst-case, and most of them scale with the
755     * number of output vertices.  So we'll just calculate the amount of space
756     * we need, and if it's too large, fail to compile.
757     *
758     * The above is for gfx7+ where we have a single URB entry that will hold
759     * all the output. In gfx6, we will have to allocate URB entries for every
760     * vertex we emit, so our URB entries only need to be large enough to hold
761     * a single vertex. Also, gfx6 does not have a control data header.
762     */
763    unsigned output_size_bytes;
764    if (compiler->devinfo->ver >= 7) {
765       output_size_bytes =
766          prog_data->output_vertex_size_hwords * 32 * nir->info.gs.vertices_out;
767       output_size_bytes += 32 * prog_data->control_data_header_size_hwords;
768    } else {
769       output_size_bytes = prog_data->output_vertex_size_hwords * 32;
770    }
771 
772    /* Broadwell stores "Vertex Count" as a full 8 DWord (32 byte) URB output,
773     * which comes before the control header.
774     */
775    if (compiler->devinfo->ver >= 8)
776       output_size_bytes += 32;
777 
778    /* Shaders can technically set max_vertices = 0, at which point we
779     * may have a URB size of 0 bytes.  Nothing good can come from that,
780     * so enforce a minimum size.
781     */
782    if (output_size_bytes == 0)
783       output_size_bytes = 1;
784 
785    unsigned max_output_size_bytes = GFX7_MAX_GS_URB_ENTRY_SIZE_BYTES;
786    if (compiler->devinfo->ver == 6)
787       max_output_size_bytes = GFX6_MAX_GS_URB_ENTRY_SIZE_BYTES;
788    if (output_size_bytes > max_output_size_bytes)
789       return NULL;
790 
791 
792    /* URB entry sizes are stored as a multiple of 64 bytes in gfx7+ and
793     * a multiple of 128 bytes in gfx6.
794     */
795    if (compiler->devinfo->ver >= 7) {
796       prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 64) / 64;
797    } else {
798       prog_data->base.urb_entry_size = ALIGN(output_size_bytes, 128) / 128;
799    }
800 
801    assert(nir->info.gs.output_primitive < ARRAY_SIZE(brw::gl_prim_to_hw_prim));
802    prog_data->output_topology =
803       brw::gl_prim_to_hw_prim[nir->info.gs.output_primitive];
804 
805    prog_data->vertices_in = nir->info.gs.vertices_in;
806 
807    /* GS inputs are read from the VUE 256 bits (2 vec4's) at a time, so we
808     * need to program a URB read length of ceiling(num_slots / 2).
809     */
810    prog_data->base.urb_read_length = (c.input_vue_map.num_slots + 1) / 2;
811 
812    /* Now that prog_data setup is done, we are ready to actually compile the
813     * program.
814     */
815    if (unlikely(debug_enabled)) {
816       fprintf(stderr, "GS Input ");
817       brw_print_vue_map(stderr, &c.input_vue_map, MESA_SHADER_GEOMETRY);
818       fprintf(stderr, "GS Output ");
819       brw_print_vue_map(stderr, &prog_data->base.vue_map, MESA_SHADER_GEOMETRY);
820    }
821 
822    if (is_scalar) {
823       fs_visitor v(compiler, log_data, mem_ctx, &c, prog_data, nir,
824                    shader_time_index, debug_enabled);
825       if (v.run_gs()) {
826          prog_data->base.dispatch_mode = DISPATCH_MODE_SIMD8;
827          prog_data->base.base.dispatch_grf_start_reg = v.payload.num_regs;
828 
829          fs_generator g(compiler, log_data, mem_ctx,
830                         &prog_data->base.base, false, MESA_SHADER_GEOMETRY);
831          if (unlikely(debug_enabled)) {
832             const char *label =
833                nir->info.label ? nir->info.label : "unnamed";
834             char *name = ralloc_asprintf(mem_ctx, "%s geometry shader %s",
835                                          label, nir->info.name);
836             g.enable_debug(name);
837          }
838          g.generate_code(v.cfg, 8, v.shader_stats,
839                          v.performance_analysis.require(), stats);
840          g.add_const_data(nir->constant_data, nir->constant_data_size);
841          return g.get_assembly();
842       }
843 
844       if (error_str)
845          *error_str = ralloc_strdup(mem_ctx, v.fail_msg);
846 
847       return NULL;
848    }
849 
850    if (compiler->devinfo->ver >= 7) {
851       /* Compile the geometry shader in DUAL_OBJECT dispatch mode, if we can do
852        * so without spilling. If the GS invocations count > 1, then we can't use
853        * dual object mode.
854        */
855       if (prog_data->invocations <= 1 &&
856           !INTEL_DEBUG(DEBUG_NO_DUAL_OBJECT_GS)) {
857          prog_data->base.dispatch_mode = DISPATCH_MODE_4X2_DUAL_OBJECT;
858 
859          brw::vec4_gs_visitor v(compiler, log_data, &c, prog_data, nir,
860                                 mem_ctx, true /* no_spills */,
861                                 shader_time_index, debug_enabled);
862 
863          /* Backup 'nr_params' and 'param' as they can be modified by the
864           * the DUAL_OBJECT visitor. If it fails, we will run the fallback
865           * (DUAL_INSTANCED or SINGLE mode) and we need to restore original
866           * values.
867           */
868          const unsigned param_count = prog_data->base.base.nr_params;
869          uint32_t *param = ralloc_array(NULL, uint32_t, param_count);
870          memcpy(param, prog_data->base.base.param,
871                 sizeof(uint32_t) * param_count);
872 
873          if (v.run()) {
874             /* Success! Backup is not needed */
875             ralloc_free(param);
876             return brw_vec4_generate_assembly(compiler, log_data, mem_ctx,
877                                               nir, &prog_data->base,
878                                               v.cfg,
879                                               v.performance_analysis.require(),
880                                               stats, debug_enabled);
881          } else {
882             /* These variables could be modified by the execution of the GS
883              * visitor if it packed the uniforms in the push constant buffer.
884              * As it failed, we need restore them so we can start again with
885              * DUAL_INSTANCED or SINGLE mode.
886              *
887              * FIXME: Could more variables be modified by this execution?
888              */
889             memcpy(prog_data->base.base.param, param,
890                    sizeof(uint32_t) * param_count);
891             prog_data->base.base.nr_params = param_count;
892             prog_data->base.base.nr_pull_params = 0;
893             ralloc_free(param);
894          }
895       }
896    }
897 
898    /* Either we failed to compile in DUAL_OBJECT mode (probably because it
899     * would have required spilling) or DUAL_OBJECT mode is disabled.  So fall
900     * back to DUAL_INSTANCED or SINGLE mode, which consumes fewer registers.
901     *
902     * FIXME: Single dispatch mode requires that the driver can handle
903     * interleaving of input registers, but this is already supported (dual
904     * instance mode has the same requirement). However, to take full advantage
905     * of single dispatch mode to reduce register pressure we would also need to
906     * do interleaved outputs, but currently, the vec4 visitor and generator
907     * classes do not support this, so at the moment register pressure in
908     * single and dual instance modes is the same.
909     *
910     * From the Ivy Bridge PRM, Vol2 Part1 7.2.1.1 "3DSTATE_GS"
911     * "If InstanceCount>1, DUAL_OBJECT mode is invalid. Software will likely
912     * want to use DUAL_INSTANCE mode for higher performance, but SINGLE mode
913     * is also supported. When InstanceCount=1 (one instance per object) software
914     * can decide which dispatch mode to use. DUAL_OBJECT mode would likely be
915     * the best choice for performance, followed by SINGLE mode."
916     *
917     * So SINGLE mode is more performant when invocations == 1 and DUAL_INSTANCE
918     * mode is more performant when invocations > 1. Gfx6 only supports
919     * SINGLE mode.
920     */
921    if (prog_data->invocations <= 1 || compiler->devinfo->ver < 7)
922       prog_data->base.dispatch_mode = DISPATCH_MODE_4X1_SINGLE;
923    else
924       prog_data->base.dispatch_mode = DISPATCH_MODE_4X2_DUAL_INSTANCE;
925 
926    brw::vec4_gs_visitor *gs = NULL;
927    const unsigned *ret = NULL;
928 
929    if (compiler->devinfo->ver >= 7)
930       gs = new brw::vec4_gs_visitor(compiler, log_data, &c, prog_data,
931                                     nir, mem_ctx, false /* no_spills */,
932                                     shader_time_index, debug_enabled);
933    else
934       gs = new brw::gfx6_gs_visitor(compiler, log_data, &c, prog_data,
935                                     nir, mem_ctx, false /* no_spills */,
936                                     shader_time_index, debug_enabled);
937 
938    if (!gs->run()) {
939       if (error_str)
940          *error_str = ralloc_strdup(mem_ctx, gs->fail_msg);
941    } else {
942       ret = brw_vec4_generate_assembly(compiler, log_data, mem_ctx, nir,
943                                        &prog_data->base, gs->cfg,
944                                        gs->performance_analysis.require(),
945                                        stats, debug_enabled);
946    }
947 
948    delete gs;
949    return ret;
950 }
951