1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file brw_fs_generator.cpp
25 *
26 * This file supports generating code from the FS LIR to the actual
27 * native instructions.
28 */
29
30 #include "brw_eu.h"
31 #include "brw_fs.h"
32 #include "brw_cfg.h"
33 #include "util/mesa-sha1.h"
34
35 static enum brw_reg_file
brw_file_from_reg(fs_reg * reg)36 brw_file_from_reg(fs_reg *reg)
37 {
38 switch (reg->file) {
39 case ARF:
40 return BRW_ARCHITECTURE_REGISTER_FILE;
41 case FIXED_GRF:
42 case VGRF:
43 return BRW_GENERAL_REGISTER_FILE;
44 case MRF:
45 return BRW_MESSAGE_REGISTER_FILE;
46 case IMM:
47 return BRW_IMMEDIATE_VALUE;
48 case BAD_FILE:
49 case ATTR:
50 case UNIFORM:
51 unreachable("not reached");
52 }
53 return BRW_ARCHITECTURE_REGISTER_FILE;
54 }
55
56 static struct brw_reg
brw_reg_from_fs_reg(const struct intel_device_info * devinfo,fs_inst * inst,fs_reg * reg,bool compressed)57 brw_reg_from_fs_reg(const struct intel_device_info *devinfo, fs_inst *inst,
58 fs_reg *reg, bool compressed)
59 {
60 struct brw_reg brw_reg;
61
62 switch (reg->file) {
63 case MRF:
64 assert((reg->nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(devinfo->ver));
65 FALLTHROUGH;
66 case VGRF:
67 if (reg->stride == 0) {
68 brw_reg = brw_vec1_reg(brw_file_from_reg(reg), reg->nr, 0);
69 } else {
70 /* From the Haswell PRM:
71 *
72 * "VertStride must be used to cross GRF register boundaries. This
73 * rule implies that elements within a 'Width' cannot cross GRF
74 * boundaries."
75 *
76 * The maximum width value that could satisfy this restriction is:
77 */
78 const unsigned reg_width = REG_SIZE / (reg->stride * type_sz(reg->type));
79
80 /* Because the hardware can only split source regions at a whole
81 * multiple of width during decompression (i.e. vertically), clamp
82 * the value obtained above to the physical execution size of a
83 * single decompressed chunk of the instruction:
84 */
85 const unsigned phys_width = compressed ? inst->exec_size / 2 :
86 inst->exec_size;
87
88 const unsigned max_hw_width = 16;
89
90 /* XXX - The equation above is strictly speaking not correct on
91 * hardware that supports unbalanced GRF writes -- On Gfx9+
92 * each decompressed chunk of the instruction may have a
93 * different execution size when the number of components
94 * written to each destination GRF is not the same.
95 */
96 if (reg->stride > 4) {
97 assert(reg != &inst->dst);
98 assert(reg->stride * type_sz(reg->type) <= REG_SIZE);
99 brw_reg = brw_vecn_reg(1, brw_file_from_reg(reg), reg->nr, 0);
100 brw_reg = stride(brw_reg, reg->stride, 1, 0);
101 } else {
102 const unsigned width = MIN3(reg_width, phys_width, max_hw_width);
103 brw_reg = brw_vecn_reg(width, brw_file_from_reg(reg), reg->nr, 0);
104 brw_reg = stride(brw_reg, width * reg->stride, width, reg->stride);
105 }
106
107 if (devinfo->verx10 == 70) {
108 /* From the IvyBridge PRM (EU Changes by Processor Generation, page 13):
109 * "Each DF (Double Float) operand uses an element size of 4 rather
110 * than 8 and all regioning parameters are twice what the values
111 * would be based on the true element size: ExecSize, Width,
112 * HorzStride, and VertStride. Each DF operand uses a pair of
113 * channels and all masking and swizzing should be adjusted
114 * appropriately."
115 *
116 * From the IvyBridge PRM (Special Requirements for Handling Double
117 * Precision Data Types, page 71):
118 * "In Align1 mode, all regioning parameters like stride, execution
119 * size, and width must use the syntax of a pair of packed
120 * floats. The offsets for these data types must be 64-bit
121 * aligned. The execution size and regioning parameters are in terms
122 * of floats."
123 *
124 * Summarized: when handling DF-typed arguments, ExecSize,
125 * VertStride, and Width must be doubled.
126 *
127 * It applies to BayTrail too.
128 */
129 if (type_sz(reg->type) == 8) {
130 brw_reg.width++;
131 if (brw_reg.vstride > 0)
132 brw_reg.vstride++;
133 assert(brw_reg.hstride == BRW_HORIZONTAL_STRIDE_1);
134 }
135
136 /* When converting from DF->F, we set the destination stride to 2
137 * because each d2f conversion implicitly writes 2 floats, being
138 * the first one the converted value. IVB/BYT actually writes two
139 * F components per SIMD channel, and every other component is
140 * filled with garbage.
141 */
142 if (reg == &inst->dst && get_exec_type_size(inst) == 8 &&
143 type_sz(inst->dst.type) < 8) {
144 assert(brw_reg.hstride > BRW_HORIZONTAL_STRIDE_1);
145 brw_reg.hstride--;
146 }
147 }
148 }
149
150 brw_reg = retype(brw_reg, reg->type);
151 brw_reg = byte_offset(brw_reg, reg->offset);
152 brw_reg.abs = reg->abs;
153 brw_reg.negate = reg->negate;
154 break;
155 case ARF:
156 case FIXED_GRF:
157 case IMM:
158 assert(reg->offset == 0);
159 brw_reg = reg->as_brw_reg();
160 break;
161 case BAD_FILE:
162 /* Probably unused. */
163 brw_reg = brw_null_reg();
164 break;
165 case ATTR:
166 case UNIFORM:
167 unreachable("not reached");
168 }
169
170 /* On HSW+, scalar DF sources can be accessed using the normal <0,1,0>
171 * region, but on IVB and BYT DF regions must be programmed in terms of
172 * floats. A <0,2,1> region accomplishes this.
173 */
174 if (devinfo->verx10 == 70 &&
175 type_sz(reg->type) == 8 &&
176 brw_reg.vstride == BRW_VERTICAL_STRIDE_0 &&
177 brw_reg.width == BRW_WIDTH_1 &&
178 brw_reg.hstride == BRW_HORIZONTAL_STRIDE_0) {
179 brw_reg.width = BRW_WIDTH_2;
180 brw_reg.hstride = BRW_HORIZONTAL_STRIDE_1;
181 }
182
183 return brw_reg;
184 }
185
fs_generator(const struct brw_compiler * compiler,void * log_data,void * mem_ctx,struct brw_stage_prog_data * prog_data,bool runtime_check_aads_emit,gl_shader_stage stage)186 fs_generator::fs_generator(const struct brw_compiler *compiler, void *log_data,
187 void *mem_ctx,
188 struct brw_stage_prog_data *prog_data,
189 bool runtime_check_aads_emit,
190 gl_shader_stage stage)
191
192 : compiler(compiler), log_data(log_data),
193 devinfo(compiler->devinfo),
194 prog_data(prog_data), dispatch_width(0),
195 runtime_check_aads_emit(runtime_check_aads_emit), debug_flag(false),
196 shader_name(NULL), stage(stage), mem_ctx(mem_ctx)
197 {
198 p = rzalloc(mem_ctx, struct brw_codegen);
199 brw_init_codegen(devinfo, p, mem_ctx);
200
201 /* In the FS code generator, we are very careful to ensure that we always
202 * set the right execution size so we don't need the EU code to "help" us
203 * by trying to infer it. Sometimes, it infers the wrong thing.
204 */
205 p->automatic_exec_sizes = false;
206 }
207
~fs_generator()208 fs_generator::~fs_generator()
209 {
210 }
211
212 class ip_record : public exec_node {
213 public:
214 DECLARE_RALLOC_CXX_OPERATORS(ip_record)
215
ip_record(int ip)216 ip_record(int ip)
217 {
218 this->ip = ip;
219 }
220
221 int ip;
222 };
223
224 bool
patch_halt_jumps()225 fs_generator::patch_halt_jumps()
226 {
227 if (this->discard_halt_patches.is_empty())
228 return false;
229
230 int scale = brw_jump_scale(p->devinfo);
231
232 if (devinfo->ver >= 6) {
233 /* There is a somewhat strange undocumented requirement of using
234 * HALT, according to the simulator. If some channel has HALTed to
235 * a particular UIP, then by the end of the program, every channel
236 * must have HALTed to that UIP. Furthermore, the tracking is a
237 * stack, so you can't do the final halt of a UIP after starting
238 * halting to a new UIP.
239 *
240 * Symptoms of not emitting this instruction on actual hardware
241 * included GPU hangs and sparkly rendering on the piglit discard
242 * tests.
243 */
244 brw_inst *last_halt = brw_HALT(p);
245 brw_inst_set_uip(p->devinfo, last_halt, 1 * scale);
246 brw_inst_set_jip(p->devinfo, last_halt, 1 * scale);
247 }
248
249 int ip = p->nr_insn;
250
251 foreach_in_list(ip_record, patch_ip, &discard_halt_patches) {
252 brw_inst *patch = &p->store[patch_ip->ip];
253
254 assert(brw_inst_opcode(p->devinfo, patch) == BRW_OPCODE_HALT);
255 if (devinfo->ver >= 6) {
256 /* HALT takes a half-instruction distance from the pre-incremented IP. */
257 brw_inst_set_uip(p->devinfo, patch, (ip - patch_ip->ip) * scale);
258 } else {
259 brw_set_src1(p, patch, brw_imm_d((ip - patch_ip->ip) * scale));
260 }
261 }
262
263 this->discard_halt_patches.make_empty();
264
265 if (devinfo->ver < 6) {
266 /* From the g965 PRM:
267 *
268 * "As DMask is not automatically reloaded into AMask upon completion
269 * of this instruction, software has to manually restore AMask upon
270 * completion."
271 *
272 * DMask lives in the bottom 16 bits of sr0.1.
273 */
274 brw_inst *reset = brw_MOV(p, brw_mask_reg(BRW_AMASK),
275 retype(brw_sr0_reg(1), BRW_REGISTER_TYPE_UW));
276 brw_inst_set_exec_size(devinfo, reset, BRW_EXECUTE_1);
277 brw_inst_set_mask_control(devinfo, reset, BRW_MASK_DISABLE);
278 brw_inst_set_qtr_control(devinfo, reset, BRW_COMPRESSION_NONE);
279 brw_inst_set_thread_control(devinfo, reset, BRW_THREAD_SWITCH);
280 }
281
282 if (devinfo->ver == 4 && devinfo->platform != INTEL_PLATFORM_G4X) {
283 /* From the g965 PRM:
284 *
285 * "[DevBW, DevCL] Erratum: The subfields in mask stack register are
286 * reset to zero during graphics reset, however, they are not
287 * initialized at thread dispatch. These subfields will retain the
288 * values from the previous thread. Software should make sure the
289 * mask stack is empty (reset to zero) before terminating the thread.
290 * In case that this is not practical, software may have to reset the
291 * mask stack at the beginning of each kernel, which will impact the
292 * performance."
293 *
294 * Luckily we can rely on:
295 *
296 * "[DevBW, DevCL] This register access restriction is not
297 * applicable, hardware does ensure execution pipeline coherency,
298 * when a mask stack register is used as an explicit source and/or
299 * destination."
300 */
301 brw_push_insn_state(p);
302 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
303 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
304
305 brw_set_default_exec_size(p, BRW_EXECUTE_2);
306 brw_MOV(p, vec2(brw_mask_stack_depth_reg(0)), brw_imm_uw(0));
307
308 brw_set_default_exec_size(p, BRW_EXECUTE_16);
309 /* Reset the if stack. */
310 brw_MOV(p, retype(brw_mask_stack_reg(0), BRW_REGISTER_TYPE_UW),
311 brw_imm_uw(0));
312
313 brw_pop_insn_state(p);
314 }
315
316 return true;
317 }
318
319 void
generate_send(fs_inst * inst,struct brw_reg dst,struct brw_reg desc,struct brw_reg ex_desc,struct brw_reg payload,struct brw_reg payload2)320 fs_generator::generate_send(fs_inst *inst,
321 struct brw_reg dst,
322 struct brw_reg desc,
323 struct brw_reg ex_desc,
324 struct brw_reg payload,
325 struct brw_reg payload2)
326 {
327 const bool dst_is_null = dst.file == BRW_ARCHITECTURE_REGISTER_FILE &&
328 dst.nr == BRW_ARF_NULL;
329 const unsigned rlen = dst_is_null ? 0 : inst->size_written / REG_SIZE;
330
331 uint32_t desc_imm = inst->desc |
332 brw_message_desc(devinfo, inst->mlen, rlen, inst->header_size);
333
334 uint32_t ex_desc_imm = inst->ex_desc |
335 brw_message_ex_desc(devinfo, inst->ex_mlen);
336
337 if (ex_desc.file != BRW_IMMEDIATE_VALUE || ex_desc.ud || ex_desc_imm) {
338 /* If we have any sort of extended descriptor, then we need SENDS. This
339 * also covers the dual-payload case because ex_mlen goes in ex_desc.
340 */
341 brw_send_indirect_split_message(p, inst->sfid, dst, payload, payload2,
342 desc, desc_imm, ex_desc, ex_desc_imm,
343 inst->eot);
344 if (inst->check_tdr)
345 brw_inst_set_opcode(p->devinfo, brw_last_inst,
346 devinfo->ver >= 12 ? BRW_OPCODE_SENDC : BRW_OPCODE_SENDSC);
347 } else {
348 brw_send_indirect_message(p, inst->sfid, dst, payload, desc, desc_imm,
349 inst->eot);
350 if (inst->check_tdr)
351 brw_inst_set_opcode(p->devinfo, brw_last_inst, BRW_OPCODE_SENDC);
352 }
353 }
354
355 void
fire_fb_write(fs_inst * inst,struct brw_reg payload,struct brw_reg implied_header,GLuint nr)356 fs_generator::fire_fb_write(fs_inst *inst,
357 struct brw_reg payload,
358 struct brw_reg implied_header,
359 GLuint nr)
360 {
361 struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
362
363 if (devinfo->ver < 6) {
364 brw_push_insn_state(p);
365 brw_set_default_exec_size(p, BRW_EXECUTE_8);
366 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
367 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
368 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
369 brw_MOV(p, offset(retype(payload, BRW_REGISTER_TYPE_UD), 1),
370 offset(retype(implied_header, BRW_REGISTER_TYPE_UD), 1));
371 brw_pop_insn_state(p);
372 }
373
374 uint32_t msg_control = brw_fb_write_msg_control(inst, prog_data);
375
376 /* We assume render targets start at 0, because headerless FB write
377 * messages set "Render Target Index" to 0. Using a different binding
378 * table index would make it impossible to use headerless messages.
379 */
380 const uint32_t surf_index = inst->target;
381
382 brw_inst *insn = brw_fb_WRITE(p,
383 payload,
384 retype(implied_header, BRW_REGISTER_TYPE_UW),
385 msg_control,
386 surf_index,
387 nr,
388 0,
389 inst->eot,
390 inst->last_rt,
391 inst->header_size != 0);
392
393 if (devinfo->ver >= 6)
394 brw_inst_set_rt_slot_group(devinfo, insn, inst->group / 16);
395 }
396
397 void
generate_fb_write(fs_inst * inst,struct brw_reg payload)398 fs_generator::generate_fb_write(fs_inst *inst, struct brw_reg payload)
399 {
400 if (devinfo->verx10 <= 70) {
401 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
402 brw_set_default_flag_reg(p, 0, 0);
403 }
404
405 const struct brw_reg implied_header =
406 devinfo->ver < 6 ? payload : brw_null_reg();
407
408 if (inst->base_mrf >= 0)
409 payload = brw_message_reg(inst->base_mrf);
410
411 if (!runtime_check_aads_emit) {
412 fire_fb_write(inst, payload, implied_header, inst->mlen);
413 } else {
414 /* This can only happen in gen < 6 */
415 assert(devinfo->ver < 6);
416
417 struct brw_reg v1_null_ud = vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD));
418
419 /* Check runtime bit to detect if we have to send AA data or not */
420 brw_push_insn_state(p);
421 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
422 brw_set_default_exec_size(p, BRW_EXECUTE_1);
423 brw_AND(p,
424 v1_null_ud,
425 retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD),
426 brw_imm_ud(1<<26));
427 brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_NZ);
428
429 int jmp = brw_JMPI(p, brw_imm_ud(0), BRW_PREDICATE_NORMAL) - p->store;
430 brw_pop_insn_state(p);
431 {
432 /* Don't send AA data */
433 fire_fb_write(inst, offset(payload, 1), implied_header, inst->mlen-1);
434 }
435 brw_land_fwd_jump(p, jmp);
436 fire_fb_write(inst, payload, implied_header, inst->mlen);
437 }
438 }
439
440 void
generate_fb_read(fs_inst * inst,struct brw_reg dst,struct brw_reg payload)441 fs_generator::generate_fb_read(fs_inst *inst, struct brw_reg dst,
442 struct brw_reg payload)
443 {
444 assert(inst->size_written % REG_SIZE == 0);
445 struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
446 /* We assume that render targets start at binding table index 0. */
447 const unsigned surf_index = inst->target;
448
449 gfx9_fb_READ(p, dst, payload, surf_index,
450 inst->header_size, inst->size_written / REG_SIZE,
451 prog_data->persample_dispatch);
452 }
453
454 void
generate_mov_indirect(fs_inst * inst,struct brw_reg dst,struct brw_reg reg,struct brw_reg indirect_byte_offset)455 fs_generator::generate_mov_indirect(fs_inst *inst,
456 struct brw_reg dst,
457 struct brw_reg reg,
458 struct brw_reg indirect_byte_offset)
459 {
460 assert(indirect_byte_offset.type == BRW_REGISTER_TYPE_UD);
461 assert(indirect_byte_offset.file == BRW_GENERAL_REGISTER_FILE);
462 assert(!reg.abs && !reg.negate);
463 assert(reg.type == dst.type);
464
465 unsigned imm_byte_offset = reg.nr * REG_SIZE + reg.subnr;
466
467 if (indirect_byte_offset.file == BRW_IMMEDIATE_VALUE) {
468 imm_byte_offset += indirect_byte_offset.ud;
469
470 reg.nr = imm_byte_offset / REG_SIZE;
471 reg.subnr = imm_byte_offset % REG_SIZE;
472 if (type_sz(reg.type) > 4 && !devinfo->has_64bit_float) {
473 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
474 subscript(reg, BRW_REGISTER_TYPE_D, 0));
475 brw_set_default_swsb(p, tgl_swsb_null());
476 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
477 subscript(reg, BRW_REGISTER_TYPE_D, 1));
478 } else {
479 brw_MOV(p, dst, reg);
480 }
481 } else {
482 /* Prior to Broadwell, there are only 8 address registers. */
483 assert(inst->exec_size <= 8 || devinfo->ver >= 8);
484
485 /* We use VxH indirect addressing, clobbering a0.0 through a0.7. */
486 struct brw_reg addr = vec8(brw_address_reg(0));
487
488 /* Whether we can use destination dependency control without running the
489 * risk of a hang if an instruction gets shot down.
490 */
491 const bool use_dep_ctrl = !inst->predicate &&
492 inst->exec_size == dispatch_width;
493 brw_inst *insn;
494
495 /* The destination stride of an instruction (in bytes) must be greater
496 * than or equal to the size of the rest of the instruction. Since the
497 * address register is of type UW, we can't use a D-type instruction.
498 * In order to get around this, re retype to UW and use a stride.
499 */
500 indirect_byte_offset =
501 retype(spread(indirect_byte_offset, 2), BRW_REGISTER_TYPE_UW);
502
503 /* There are a number of reasons why we don't use the base offset here.
504 * One reason is that the field is only 9 bits which means we can only
505 * use it to access the first 16 GRFs. Also, from the Haswell PRM
506 * section "Register Region Restrictions":
507 *
508 * "The lower bits of the AddressImmediate must not overflow to
509 * change the register address. The lower 5 bits of Address
510 * Immediate when added to lower 5 bits of address register gives
511 * the sub-register offset. The upper bits of Address Immediate
512 * when added to upper bits of address register gives the register
513 * address. Any overflow from sub-register offset is dropped."
514 *
515 * Since the indirect may cause us to cross a register boundary, this
516 * makes the base offset almost useless. We could try and do something
517 * clever where we use a actual base offset if base_offset % 32 == 0 but
518 * that would mean we were generating different code depending on the
519 * base offset. Instead, for the sake of consistency, we'll just do the
520 * add ourselves. This restriction is only listed in the Haswell PRM
521 * but empirical testing indicates that it applies on all older
522 * generations and is lifted on Broadwell.
523 *
524 * In the end, while base_offset is nice to look at in the generated
525 * code, using it saves us 0 instructions and would require quite a bit
526 * of case-by-case work. It's just not worth it.
527 *
528 * Due to a hardware bug some platforms (particularly Gfx11+) seem to
529 * require the address components of all channels to be valid whether or
530 * not they're active, which causes issues if we use VxH addressing
531 * under non-uniform control-flow. We can easily work around that by
532 * initializing the whole address register with a pipelined NoMask MOV
533 * instruction.
534 */
535 if (devinfo->ver >= 7) {
536 insn = brw_MOV(p, addr, brw_imm_uw(imm_byte_offset));
537 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
538 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
539 if (devinfo->ver >= 12)
540 brw_set_default_swsb(p, tgl_swsb_null());
541 else
542 brw_inst_set_no_dd_clear(devinfo, insn, use_dep_ctrl);
543 }
544
545 insn = brw_ADD(p, addr, indirect_byte_offset, brw_imm_uw(imm_byte_offset));
546 if (devinfo->ver >= 12)
547 brw_set_default_swsb(p, tgl_swsb_regdist(1));
548 else if (devinfo->ver >= 7)
549 brw_inst_set_no_dd_check(devinfo, insn, use_dep_ctrl);
550
551 if (type_sz(reg.type) > 4 &&
552 ((devinfo->verx10 == 70) ||
553 devinfo->platform == INTEL_PLATFORM_CHV || intel_device_info_is_9lp(devinfo) ||
554 !devinfo->has_64bit_float || devinfo->verx10 >= 125)) {
555 /* IVB has an issue (which we found empirically) where it reads two
556 * address register components per channel for indirectly addressed
557 * 64-bit sources.
558 *
559 * From the Cherryview PRM Vol 7. "Register Region Restrictions":
560 *
561 * "When source or destination datatype is 64b or operation is
562 * integer DWord multiply, indirect addressing must not be used."
563 *
564 * To work around both of these, we do two integer MOVs insead of one
565 * 64-bit MOV. Because no double value should ever cross a register
566 * boundary, it's safe to use the immediate offset in the indirect
567 * here to handle adding 4 bytes to the offset and avoid the extra
568 * ADD to the register file.
569 */
570 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
571 retype(brw_VxH_indirect(0, 0), BRW_REGISTER_TYPE_D));
572 brw_set_default_swsb(p, tgl_swsb_null());
573 brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
574 retype(brw_VxH_indirect(0, 4), BRW_REGISTER_TYPE_D));
575 } else {
576 struct brw_reg ind_src = brw_VxH_indirect(0, 0);
577
578 brw_inst *mov = brw_MOV(p, dst, retype(ind_src, reg.type));
579
580 if (devinfo->ver == 6 && dst.file == BRW_MESSAGE_REGISTER_FILE &&
581 !inst->get_next()->is_tail_sentinel() &&
582 ((fs_inst *)inst->get_next())->mlen > 0) {
583 /* From the Sandybridge PRM:
584 *
585 * "[Errata: DevSNB(SNB)] If MRF register is updated by any
586 * instruction that “indexed/indirect” source AND is followed
587 * by a send, the instruction requires a “Switch”. This is to
588 * avoid race condition where send may dispatch before MRF is
589 * updated."
590 */
591 brw_inst_set_thread_control(devinfo, mov, BRW_THREAD_SWITCH);
592 }
593 }
594 }
595 }
596
597 void
generate_shuffle(fs_inst * inst,struct brw_reg dst,struct brw_reg src,struct brw_reg idx)598 fs_generator::generate_shuffle(fs_inst *inst,
599 struct brw_reg dst,
600 struct brw_reg src,
601 struct brw_reg idx)
602 {
603 assert(src.file == BRW_GENERAL_REGISTER_FILE);
604 assert(!src.abs && !src.negate);
605
606 /* Ivy bridge has some strange behavior that makes this a real pain to
607 * implement for 64-bit values so we just don't bother.
608 */
609 assert((devinfo->verx10 >= 75 && devinfo->has_64bit_float) ||
610 type_sz(src.type) <= 4);
611
612 /* Because we're using the address register, we're limited to 8-wide
613 * execution on gfx7. On gfx8, we're limited to 16-wide by the address
614 * register file and 8-wide for 64-bit types. We could try and make this
615 * instruction splittable higher up in the compiler but that gets weird
616 * because it reads all of the channels regardless of execution size. It's
617 * easier just to split it here.
618 */
619 const unsigned lower_width =
620 devinfo->ver <= 7 || element_sz(src) > 4 || element_sz(dst) > 4 ? 8 :
621 MIN2(16, inst->exec_size);
622
623 brw_set_default_exec_size(p, cvt(lower_width) - 1);
624 for (unsigned group = 0; group < inst->exec_size; group += lower_width) {
625 brw_set_default_group(p, group);
626
627 if ((src.vstride == 0 && src.hstride == 0) ||
628 idx.file == BRW_IMMEDIATE_VALUE) {
629 /* Trivial, the source is already uniform or the index is a constant.
630 * We will typically not get here if the optimizer is doing its job,
631 * but asserting would be mean.
632 */
633 const unsigned i = idx.file == BRW_IMMEDIATE_VALUE ? idx.ud : 0;
634 struct brw_reg group_src = stride(suboffset(src, i), 0, 1, 0);
635 struct brw_reg group_dst = suboffset(dst, group << (dst.hstride - 1));
636 brw_MOV(p, group_dst, group_src);
637 } else {
638 /* We use VxH indirect addressing, clobbering a0.0 through a0.7. */
639 struct brw_reg addr = vec8(brw_address_reg(0));
640
641 struct brw_reg group_idx = suboffset(idx, group);
642
643 if (lower_width == 8 && group_idx.width == BRW_WIDTH_16) {
644 /* Things get grumpy if the register is too wide. */
645 group_idx.width--;
646 group_idx.vstride--;
647 }
648
649 assert(type_sz(group_idx.type) <= 4);
650 if (type_sz(group_idx.type) == 4) {
651 /* The destination stride of an instruction (in bytes) must be
652 * greater than or equal to the size of the rest of the
653 * instruction. Since the address register is of type UW, we
654 * can't use a D-type instruction. In order to get around this,
655 * re retype to UW and use a stride.
656 */
657 group_idx = retype(spread(group_idx, 2), BRW_REGISTER_TYPE_W);
658 }
659
660 uint32_t src_start_offset = src.nr * REG_SIZE + src.subnr;
661
662 /* From the Haswell PRM:
663 *
664 * "When a sequence of NoDDChk and NoDDClr are used, the last
665 * instruction that completes the scoreboard clear must have a
666 * non-zero execution mask. This means, if any kind of predication
667 * can change the execution mask or channel enable of the last
668 * instruction, the optimization must be avoided. This is to
669 * avoid instructions being shot down the pipeline when no writes
670 * are required."
671 *
672 * Whenever predication is enabled or the instructions being emitted
673 * aren't the full width, it's possible that it will be run with zero
674 * channels enabled so we can't use dependency control without
675 * running the risk of a hang if an instruction gets shot down.
676 */
677 const bool use_dep_ctrl = !inst->predicate &&
678 lower_width == dispatch_width;
679 brw_inst *insn;
680
681 /* Due to a hardware bug some platforms (particularly Gfx11+) seem
682 * to require the address components of all channels to be valid
683 * whether or not they're active, which causes issues if we use VxH
684 * addressing under non-uniform control-flow. We can easily work
685 * around that by initializing the whole address register with a
686 * pipelined NoMask MOV instruction.
687 */
688 insn = brw_MOV(p, addr, brw_imm_uw(src_start_offset));
689 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
690 brw_inst_set_pred_control(devinfo, insn, BRW_PREDICATE_NONE);
691 if (devinfo->ver >= 12)
692 brw_set_default_swsb(p, tgl_swsb_null());
693 else
694 brw_inst_set_no_dd_clear(devinfo, insn, use_dep_ctrl);
695
696 /* Take into account the component size and horizontal stride. */
697 assert(src.vstride == src.hstride + src.width);
698 insn = brw_SHL(p, addr, group_idx,
699 brw_imm_uw(util_logbase2(type_sz(src.type)) +
700 src.hstride - 1));
701 if (devinfo->ver >= 12)
702 brw_set_default_swsb(p, tgl_swsb_regdist(1));
703 else
704 brw_inst_set_no_dd_check(devinfo, insn, use_dep_ctrl);
705
706 /* Add on the register start offset */
707 brw_ADD(p, addr, addr, brw_imm_uw(src_start_offset));
708 brw_MOV(p, suboffset(dst, group << (dst.hstride - 1)),
709 retype(brw_VxH_indirect(0, 0), src.type));
710 }
711
712 brw_set_default_swsb(p, tgl_swsb_null());
713 }
714 }
715
716 void
generate_quad_swizzle(const fs_inst * inst,struct brw_reg dst,struct brw_reg src,unsigned swiz)717 fs_generator::generate_quad_swizzle(const fs_inst *inst,
718 struct brw_reg dst, struct brw_reg src,
719 unsigned swiz)
720 {
721 /* Requires a quad. */
722 assert(inst->exec_size >= 4);
723
724 if (src.file == BRW_IMMEDIATE_VALUE ||
725 has_scalar_region(src)) {
726 /* The value is uniform across all channels */
727 brw_MOV(p, dst, src);
728
729 } else if (devinfo->ver < 11 && type_sz(src.type) == 4) {
730 /* This only works on 8-wide 32-bit values */
731 assert(inst->exec_size == 8);
732 assert(src.hstride == BRW_HORIZONTAL_STRIDE_1);
733 assert(src.vstride == src.width + 1);
734 brw_set_default_access_mode(p, BRW_ALIGN_16);
735 struct brw_reg swiz_src = stride(src, 4, 4, 1);
736 swiz_src.swizzle = swiz;
737 brw_MOV(p, dst, swiz_src);
738
739 } else {
740 assert(src.hstride == BRW_HORIZONTAL_STRIDE_1);
741 assert(src.vstride == src.width + 1);
742 const struct brw_reg src_0 = suboffset(src, BRW_GET_SWZ(swiz, 0));
743
744 switch (swiz) {
745 case BRW_SWIZZLE_XXXX:
746 case BRW_SWIZZLE_YYYY:
747 case BRW_SWIZZLE_ZZZZ:
748 case BRW_SWIZZLE_WWWW:
749 brw_MOV(p, dst, stride(src_0, 4, 4, 0));
750 break;
751
752 case BRW_SWIZZLE_XXZZ:
753 case BRW_SWIZZLE_YYWW:
754 brw_MOV(p, dst, stride(src_0, 2, 2, 0));
755 break;
756
757 case BRW_SWIZZLE_XYXY:
758 case BRW_SWIZZLE_ZWZW:
759 assert(inst->exec_size == 4);
760 brw_MOV(p, dst, stride(src_0, 0, 2, 1));
761 break;
762
763 default:
764 assert(inst->force_writemask_all);
765 brw_set_default_exec_size(p, cvt(inst->exec_size / 4) - 1);
766
767 for (unsigned c = 0; c < 4; c++) {
768 brw_inst *insn = brw_MOV(
769 p, stride(suboffset(dst, c),
770 4 * inst->dst.stride, 1, 4 * inst->dst.stride),
771 stride(suboffset(src, BRW_GET_SWZ(swiz, c)), 4, 1, 0));
772
773 if (devinfo->ver < 12) {
774 brw_inst_set_no_dd_clear(devinfo, insn, c < 3);
775 brw_inst_set_no_dd_check(devinfo, insn, c > 0);
776 }
777
778 brw_set_default_swsb(p, tgl_swsb_null());
779 }
780
781 break;
782 }
783 }
784 }
785
786 void
generate_urb_read(fs_inst * inst,struct brw_reg dst,struct brw_reg header)787 fs_generator::generate_urb_read(fs_inst *inst,
788 struct brw_reg dst,
789 struct brw_reg header)
790 {
791 assert(inst->size_written % REG_SIZE == 0);
792 assert(header.file == BRW_GENERAL_REGISTER_FILE);
793 assert(header.type == BRW_REGISTER_TYPE_UD);
794
795 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
796 brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UD));
797 brw_set_src0(p, send, header);
798 if (devinfo->ver < 12)
799 brw_set_src1(p, send, brw_imm_ud(0u));
800
801 brw_inst_set_sfid(p->devinfo, send, BRW_SFID_URB);
802 brw_inst_set_urb_opcode(p->devinfo, send, GFX8_URB_OPCODE_SIMD8_READ);
803
804 if (inst->opcode == SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT)
805 brw_inst_set_urb_per_slot_offset(p->devinfo, send, true);
806
807 brw_inst_set_mlen(p->devinfo, send, inst->mlen);
808 brw_inst_set_rlen(p->devinfo, send, inst->size_written / REG_SIZE);
809 brw_inst_set_header_present(p->devinfo, send, true);
810 brw_inst_set_urb_global_offset(p->devinfo, send, inst->offset);
811 }
812
813 void
generate_urb_write(fs_inst * inst,struct brw_reg payload)814 fs_generator::generate_urb_write(fs_inst *inst, struct brw_reg payload)
815 {
816 brw_inst *insn = brw_next_insn(p, BRW_OPCODE_SEND);
817
818 brw_set_dest(p, insn, brw_null_reg());
819 brw_set_src0(p, insn, payload);
820 if (devinfo->ver < 12)
821 brw_set_src1(p, insn, brw_imm_ud(0u));
822
823 brw_inst_set_sfid(p->devinfo, insn, BRW_SFID_URB);
824 brw_inst_set_urb_opcode(p->devinfo, insn, GFX8_URB_OPCODE_SIMD8_WRITE);
825
826 if (inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT ||
827 inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT)
828 brw_inst_set_urb_per_slot_offset(p->devinfo, insn, true);
829
830 if (inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED ||
831 inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT)
832 brw_inst_set_urb_channel_mask_present(p->devinfo, insn, true);
833
834 brw_inst_set_mlen(p->devinfo, insn, inst->mlen);
835 brw_inst_set_rlen(p->devinfo, insn, 0);
836 brw_inst_set_eot(p->devinfo, insn, inst->eot);
837 brw_inst_set_header_present(p->devinfo, insn, true);
838 brw_inst_set_urb_global_offset(p->devinfo, insn, inst->offset);
839 }
840
841 void
generate_cs_terminate(fs_inst * inst,struct brw_reg payload)842 fs_generator::generate_cs_terminate(fs_inst *inst, struct brw_reg payload)
843 {
844 struct brw_inst *insn;
845
846 insn = brw_next_insn(p, BRW_OPCODE_SEND);
847
848 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
849 brw_set_src0(p, insn, retype(payload, BRW_REGISTER_TYPE_UW));
850 if (devinfo->ver < 12)
851 brw_set_src1(p, insn, brw_imm_ud(0u));
852
853 /* For XeHP and newer send a message to the message gateway to terminate a
854 * compute shader. For older devices, a message is sent to the thread
855 * spawner.
856 */
857 if (devinfo->verx10 >= 125)
858 brw_inst_set_sfid(devinfo, insn, BRW_SFID_MESSAGE_GATEWAY);
859 else
860 brw_inst_set_sfid(devinfo, insn, BRW_SFID_THREAD_SPAWNER);
861 brw_inst_set_mlen(devinfo, insn, 1);
862 brw_inst_set_rlen(devinfo, insn, 0);
863 brw_inst_set_eot(devinfo, insn, inst->eot);
864 brw_inst_set_header_present(devinfo, insn, false);
865
866 brw_inst_set_ts_opcode(devinfo, insn, 0); /* Dereference resource */
867
868 if (devinfo->ver < 11) {
869 brw_inst_set_ts_request_type(devinfo, insn, 0); /* Root thread */
870
871 /* Note that even though the thread has a URB resource associated with it,
872 * we set the "do not dereference URB" bit, because the URB resource is
873 * managed by the fixed-function unit, so it will free it automatically.
874 */
875 brw_inst_set_ts_resource_select(devinfo, insn, 1); /* Do not dereference URB */
876 }
877
878 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
879 }
880
881 void
generate_barrier(fs_inst *,struct brw_reg src)882 fs_generator::generate_barrier(fs_inst *, struct brw_reg src)
883 {
884 brw_barrier(p, src);
885 if (devinfo->ver >= 12) {
886 brw_set_default_swsb(p, tgl_swsb_null());
887 brw_SYNC(p, TGL_SYNC_BAR);
888 } else {
889 brw_WAIT(p);
890 }
891 }
892
893 bool
generate_linterp(fs_inst * inst,struct brw_reg dst,struct brw_reg * src)894 fs_generator::generate_linterp(fs_inst *inst,
895 struct brw_reg dst, struct brw_reg *src)
896 {
897 /* PLN reads:
898 * / in SIMD16 \
899 * -----------------------------------
900 * | src1+0 | src1+1 | src1+2 | src1+3 |
901 * |-----------------------------------|
902 * |(x0, x1)|(y0, y1)|(x2, x3)|(y2, y3)|
903 * -----------------------------------
904 *
905 * but for the LINE/MAC pair, the LINE reads Xs and the MAC reads Ys:
906 *
907 * -----------------------------------
908 * | src1+0 | src1+1 | src1+2 | src1+3 |
909 * |-----------------------------------|
910 * |(x0, x1)|(y0, y1)| | | in SIMD8
911 * |-----------------------------------|
912 * |(x0, x1)|(x2, x3)|(y0, y1)|(y2, y3)| in SIMD16
913 * -----------------------------------
914 *
915 * See also: emit_interpolation_setup_gfx4().
916 */
917 struct brw_reg delta_x = src[0];
918 struct brw_reg delta_y = offset(src[0], inst->exec_size / 8);
919 struct brw_reg interp = src[1];
920 brw_inst *i[2];
921
922 /* nir_lower_interpolation() will do the lowering to MAD instructions for
923 * us on gfx11+
924 */
925 assert(devinfo->ver < 11);
926
927 if (devinfo->has_pln) {
928 if (devinfo->ver <= 6 && (delta_x.nr & 1) != 0) {
929 /* From the Sandy Bridge PRM Vol. 4, Pt. 2, Section 8.3.53, "Plane":
930 *
931 * "[DevSNB]:<src1> must be even register aligned.
932 *
933 * This restriction is lifted on Ivy Bridge.
934 *
935 * This means that we need to split PLN into LINE+MAC on-the-fly.
936 * Unfortunately, the inputs are laid out for PLN and not LINE+MAC so
937 * we have to split into SIMD8 pieces. For gfx4 (!has_pln), the
938 * coordinate registers are laid out differently so we leave it as a
939 * SIMD16 instruction.
940 */
941 assert(inst->exec_size == 8 || inst->exec_size == 16);
942 assert(inst->group % 16 == 0);
943
944 brw_push_insn_state(p);
945 brw_set_default_exec_size(p, BRW_EXECUTE_8);
946
947 /* Thanks to two accumulators, we can emit all the LINEs and then all
948 * the MACs. This improves parallelism a bit.
949 */
950 for (unsigned g = 0; g < inst->exec_size / 8; g++) {
951 brw_inst *line = brw_LINE(p, brw_null_reg(), interp,
952 offset(delta_x, g * 2));
953 brw_inst_set_group(devinfo, line, inst->group + g * 8);
954
955 /* LINE writes the accumulator automatically on gfx4-5. On Sandy
956 * Bridge and later, we have to explicitly enable it.
957 */
958 if (devinfo->ver >= 6)
959 brw_inst_set_acc_wr_control(p->devinfo, line, true);
960
961 /* brw_set_default_saturate() is called before emitting
962 * instructions, so the saturate bit is set in each instruction,
963 * so we need to unset it on the LINE instructions.
964 */
965 brw_inst_set_saturate(p->devinfo, line, false);
966 }
967
968 for (unsigned g = 0; g < inst->exec_size / 8; g++) {
969 brw_inst *mac = brw_MAC(p, offset(dst, g), suboffset(interp, 1),
970 offset(delta_x, g * 2 + 1));
971 brw_inst_set_group(devinfo, mac, inst->group + g * 8);
972 brw_inst_set_cond_modifier(p->devinfo, mac, inst->conditional_mod);
973 }
974
975 brw_pop_insn_state(p);
976
977 return true;
978 } else {
979 brw_PLN(p, dst, interp, delta_x);
980
981 return false;
982 }
983 } else {
984 i[0] = brw_LINE(p, brw_null_reg(), interp, delta_x);
985 i[1] = brw_MAC(p, dst, suboffset(interp, 1), delta_y);
986
987 brw_inst_set_cond_modifier(p->devinfo, i[1], inst->conditional_mod);
988
989 /* brw_set_default_saturate() is called before emitting instructions, so
990 * the saturate bit is set in each instruction, so we need to unset it on
991 * the first instruction.
992 */
993 brw_inst_set_saturate(p->devinfo, i[0], false);
994
995 return true;
996 }
997 }
998
999 void
generate_get_buffer_size(fs_inst * inst,struct brw_reg dst,struct brw_reg src,struct brw_reg surf_index)1000 fs_generator::generate_get_buffer_size(fs_inst *inst,
1001 struct brw_reg dst,
1002 struct brw_reg src,
1003 struct brw_reg surf_index)
1004 {
1005 assert(devinfo->ver >= 7);
1006 assert(surf_index.file == BRW_IMMEDIATE_VALUE);
1007
1008 uint32_t simd_mode;
1009 int rlen = 4;
1010
1011 switch (inst->exec_size) {
1012 case 8:
1013 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
1014 break;
1015 case 16:
1016 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
1017 break;
1018 default:
1019 unreachable("Invalid width for texture instruction");
1020 }
1021
1022 if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) {
1023 rlen = 8;
1024 dst = vec16(dst);
1025 }
1026
1027 uint32_t return_format =
1028 devinfo->ver >= 8 ? GFX8_SAMPLER_RETURN_FORMAT_32BITS :
1029 BRW_SAMPLER_RETURN_FORMAT_SINT32;
1030 brw_SAMPLE(p,
1031 retype(dst, BRW_REGISTER_TYPE_UW),
1032 inst->base_mrf,
1033 src,
1034 surf_index.ud,
1035 0,
1036 GFX5_SAMPLER_MESSAGE_SAMPLE_RESINFO,
1037 rlen, /* response length */
1038 inst->mlen,
1039 inst->header_size > 0,
1040 simd_mode,
1041 return_format);
1042 }
1043
1044 void
generate_tex(fs_inst * inst,struct brw_reg dst,struct brw_reg surface_index,struct brw_reg sampler_index)1045 fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst,
1046 struct brw_reg surface_index,
1047 struct brw_reg sampler_index)
1048 {
1049 assert(devinfo->ver < 7);
1050 assert(inst->size_written % REG_SIZE == 0);
1051 int msg_type = -1;
1052 uint32_t simd_mode;
1053 uint32_t return_format;
1054
1055 /* Sampler EOT message of less than the dispatch width would kill the
1056 * thread prematurely.
1057 */
1058 assert(!inst->eot || inst->exec_size == dispatch_width);
1059
1060 switch (dst.type) {
1061 case BRW_REGISTER_TYPE_D:
1062 return_format = BRW_SAMPLER_RETURN_FORMAT_SINT32;
1063 break;
1064 case BRW_REGISTER_TYPE_UD:
1065 return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32;
1066 break;
1067 default:
1068 return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32;
1069 break;
1070 }
1071
1072 /* Stomp the resinfo output type to UINT32. On gens 4-5, the output type
1073 * is set as part of the message descriptor. On gfx4, the PRM seems to
1074 * allow UINT32 and FLOAT32 (i965 PRM, Vol. 4 Section 4.8.1.1), but on
1075 * later gens UINT32 is required. Once you hit Sandy Bridge, the bit is
1076 * gone from the message descriptor entirely and you just get UINT32 all
1077 * the time regasrdless. Since we can really only do non-UINT32 on gfx4,
1078 * just stomp it to UINT32 all the time.
1079 */
1080 if (inst->opcode == SHADER_OPCODE_TXS)
1081 return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32;
1082
1083 switch (inst->exec_size) {
1084 case 8:
1085 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
1086 break;
1087 case 16:
1088 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
1089 break;
1090 default:
1091 unreachable("Invalid width for texture instruction");
1092 }
1093
1094 if (devinfo->ver >= 5) {
1095 switch (inst->opcode) {
1096 case SHADER_OPCODE_TEX:
1097 if (inst->shadow_compare) {
1098 msg_type = GFX5_SAMPLER_MESSAGE_SAMPLE_COMPARE;
1099 } else {
1100 msg_type = GFX5_SAMPLER_MESSAGE_SAMPLE;
1101 }
1102 break;
1103 case FS_OPCODE_TXB:
1104 if (inst->shadow_compare) {
1105 msg_type = GFX5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE;
1106 } else {
1107 msg_type = GFX5_SAMPLER_MESSAGE_SAMPLE_BIAS;
1108 }
1109 break;
1110 case SHADER_OPCODE_TXL:
1111 if (inst->shadow_compare) {
1112 msg_type = GFX5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE;
1113 } else {
1114 msg_type = GFX5_SAMPLER_MESSAGE_SAMPLE_LOD;
1115 }
1116 break;
1117 case SHADER_OPCODE_TXS:
1118 msg_type = GFX5_SAMPLER_MESSAGE_SAMPLE_RESINFO;
1119 break;
1120 case SHADER_OPCODE_TXD:
1121 assert(!inst->shadow_compare);
1122 msg_type = GFX5_SAMPLER_MESSAGE_SAMPLE_DERIVS;
1123 break;
1124 case SHADER_OPCODE_TXF:
1125 msg_type = GFX5_SAMPLER_MESSAGE_SAMPLE_LD;
1126 break;
1127 case SHADER_OPCODE_TXF_CMS:
1128 msg_type = GFX5_SAMPLER_MESSAGE_SAMPLE_LD;
1129 break;
1130 case SHADER_OPCODE_LOD:
1131 msg_type = GFX5_SAMPLER_MESSAGE_LOD;
1132 break;
1133 case SHADER_OPCODE_TG4:
1134 assert(devinfo->ver == 6);
1135 assert(!inst->shadow_compare);
1136 msg_type = GFX7_SAMPLER_MESSAGE_SAMPLE_GATHER4;
1137 break;
1138 case SHADER_OPCODE_SAMPLEINFO:
1139 msg_type = GFX6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO;
1140 break;
1141 default:
1142 unreachable("not reached");
1143 }
1144 } else {
1145 switch (inst->opcode) {
1146 case SHADER_OPCODE_TEX:
1147 /* Note that G45 and older determines shadow compare and dispatch width
1148 * from message length for most messages.
1149 */
1150 if (inst->exec_size == 8) {
1151 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE;
1152 if (inst->shadow_compare) {
1153 assert(inst->mlen == 6);
1154 } else {
1155 assert(inst->mlen <= 4);
1156 }
1157 } else {
1158 if (inst->shadow_compare) {
1159 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE;
1160 assert(inst->mlen == 9);
1161 } else {
1162 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE;
1163 assert(inst->mlen <= 7 && inst->mlen % 2 == 1);
1164 }
1165 }
1166 break;
1167 case FS_OPCODE_TXB:
1168 if (inst->shadow_compare) {
1169 assert(inst->exec_size == 8);
1170 assert(inst->mlen == 6);
1171 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE;
1172 } else {
1173 assert(inst->mlen == 9);
1174 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS;
1175 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
1176 }
1177 break;
1178 case SHADER_OPCODE_TXL:
1179 if (inst->shadow_compare) {
1180 assert(inst->exec_size == 8);
1181 assert(inst->mlen == 6);
1182 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE;
1183 } else {
1184 assert(inst->mlen == 9);
1185 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD;
1186 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
1187 }
1188 break;
1189 case SHADER_OPCODE_TXD:
1190 /* There is no sample_d_c message; comparisons are done manually */
1191 assert(inst->exec_size == 8);
1192 assert(inst->mlen == 7 || inst->mlen == 10);
1193 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_GRADIENTS;
1194 break;
1195 case SHADER_OPCODE_TXF:
1196 assert(inst->mlen <= 9 && inst->mlen % 2 == 1);
1197 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_LD;
1198 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
1199 break;
1200 case SHADER_OPCODE_TXS:
1201 assert(inst->mlen == 3);
1202 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_RESINFO;
1203 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
1204 break;
1205 default:
1206 unreachable("not reached");
1207 }
1208 }
1209 assert(msg_type != -1);
1210
1211 if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) {
1212 dst = vec16(dst);
1213 }
1214
1215 assert(sampler_index.type == BRW_REGISTER_TYPE_UD);
1216
1217 /* Load the message header if present. If there's a texture offset,
1218 * we need to set it up explicitly and load the offset bitfield.
1219 * Otherwise, we can use an implied move from g0 to the first message reg.
1220 */
1221 struct brw_reg src = brw_null_reg();
1222 if (inst->header_size != 0) {
1223 if (devinfo->ver < 6 && !inst->offset) {
1224 /* Set up an implied move from g0 to the MRF. */
1225 src = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
1226 } else {
1227 const tgl_swsb swsb = brw_get_default_swsb(p);
1228 assert(inst->base_mrf != -1);
1229 struct brw_reg header_reg = brw_message_reg(inst->base_mrf);
1230
1231 brw_push_insn_state(p);
1232 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
1233 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1234 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1235 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1236 /* Explicitly set up the message header by copying g0 to the MRF. */
1237 brw_MOV(p, header_reg, brw_vec8_grf(0, 0));
1238 brw_set_default_swsb(p, tgl_swsb_regdist(1));
1239
1240 brw_set_default_exec_size(p, BRW_EXECUTE_1);
1241 if (inst->offset) {
1242 /* Set the offset bits in DWord 2. */
1243 brw_MOV(p, get_element_ud(header_reg, 2),
1244 brw_imm_ud(inst->offset));
1245 }
1246
1247 brw_pop_insn_state(p);
1248 brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
1249 }
1250 }
1251
1252 assert(surface_index.file == BRW_IMMEDIATE_VALUE);
1253 assert(sampler_index.file == BRW_IMMEDIATE_VALUE);
1254
1255 brw_SAMPLE(p,
1256 retype(dst, BRW_REGISTER_TYPE_UW),
1257 inst->base_mrf,
1258 src,
1259 surface_index.ud,
1260 sampler_index.ud % 16,
1261 msg_type,
1262 inst->size_written / REG_SIZE,
1263 inst->mlen,
1264 inst->header_size != 0,
1265 simd_mode,
1266 return_format);
1267 }
1268
1269
1270 /* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
1271 * looking like:
1272 *
1273 * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
1274 *
1275 * Ideally, we want to produce:
1276 *
1277 * DDX DDY
1278 * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
1279 * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
1280 * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
1281 * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
1282 * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
1283 * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
1284 * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
1285 * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
1286 *
1287 * and add another set of two more subspans if in 16-pixel dispatch mode.
1288 *
1289 * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
1290 * for each pair, and vertstride = 2 jumps us 2 elements after processing a
1291 * pair. But the ideal approximation may impose a huge performance cost on
1292 * sample_d. On at least Haswell, sample_d instruction does some
1293 * optimizations if the same LOD is used for all pixels in the subspan.
1294 *
1295 * For DDY, we need to use ALIGN16 mode since it's capable of doing the
1296 * appropriate swizzling.
1297 */
1298 void
generate_ddx(const fs_inst * inst,struct brw_reg dst,struct brw_reg src)1299 fs_generator::generate_ddx(const fs_inst *inst,
1300 struct brw_reg dst, struct brw_reg src)
1301 {
1302 unsigned vstride, width;
1303
1304 if (devinfo->ver >= 8) {
1305 if (inst->opcode == FS_OPCODE_DDX_FINE) {
1306 /* produce accurate derivatives */
1307 vstride = BRW_VERTICAL_STRIDE_2;
1308 width = BRW_WIDTH_2;
1309 } else {
1310 /* replicate the derivative at the top-left pixel to other pixels */
1311 vstride = BRW_VERTICAL_STRIDE_4;
1312 width = BRW_WIDTH_4;
1313 }
1314
1315 struct brw_reg src0 = byte_offset(src, type_sz(src.type));;
1316 struct brw_reg src1 = src;
1317
1318 src0.vstride = vstride;
1319 src0.width = width;
1320 src0.hstride = BRW_HORIZONTAL_STRIDE_0;
1321 src1.vstride = vstride;
1322 src1.width = width;
1323 src1.hstride = BRW_HORIZONTAL_STRIDE_0;
1324
1325 brw_ADD(p, dst, src0, negate(src1));
1326 } else {
1327 /* On Haswell and earlier, the region used above appears to not work
1328 * correctly for compressed instructions. At least on Haswell and
1329 * Iron Lake, compressed ALIGN16 instructions do work. Since we
1330 * would have to split to SIMD8 no matter which method we choose, we
1331 * may as well use ALIGN16 on all platforms gfx7 and earlier.
1332 */
1333 struct brw_reg src0 = stride(src, 4, 4, 1);
1334 struct brw_reg src1 = stride(src, 4, 4, 1);
1335 if (inst->opcode == FS_OPCODE_DDX_FINE) {
1336 src0.swizzle = BRW_SWIZZLE_XXZZ;
1337 src1.swizzle = BRW_SWIZZLE_YYWW;
1338 } else {
1339 src0.swizzle = BRW_SWIZZLE_XXXX;
1340 src1.swizzle = BRW_SWIZZLE_YYYY;
1341 }
1342
1343 brw_push_insn_state(p);
1344 brw_set_default_access_mode(p, BRW_ALIGN_16);
1345 brw_ADD(p, dst, negate(src0), src1);
1346 brw_pop_insn_state(p);
1347 }
1348 }
1349
1350 /* The negate_value boolean is used to negate the derivative computation for
1351 * FBOs, since they place the origin at the upper left instead of the lower
1352 * left.
1353 */
1354 void
generate_ddy(const fs_inst * inst,struct brw_reg dst,struct brw_reg src)1355 fs_generator::generate_ddy(const fs_inst *inst,
1356 struct brw_reg dst, struct brw_reg src)
1357 {
1358 const uint32_t type_size = type_sz(src.type);
1359
1360 if (inst->opcode == FS_OPCODE_DDY_FINE) {
1361 /* produce accurate derivatives.
1362 *
1363 * From the Broadwell PRM, Volume 7 (3D-Media-GPGPU)
1364 * "Register Region Restrictions", Section "1. Special Restrictions":
1365 *
1366 * "In Align16 mode, the channel selects and channel enables apply to
1367 * a pair of half-floats, because these parameters are defined for
1368 * DWord elements ONLY. This is applicable when both source and
1369 * destination are half-floats."
1370 *
1371 * So for half-float operations we use the Gfx11+ Align1 path. CHV
1372 * inherits its FP16 hardware from SKL, so it is not affected.
1373 */
1374 if (devinfo->ver >= 11 ||
1375 (devinfo->platform == INTEL_PLATFORM_BDW && src.type == BRW_REGISTER_TYPE_HF)) {
1376 src = stride(src, 0, 2, 1);
1377
1378 brw_push_insn_state(p);
1379 brw_set_default_exec_size(p, BRW_EXECUTE_4);
1380 for (uint32_t g = 0; g < inst->exec_size; g += 4) {
1381 brw_set_default_group(p, inst->group + g);
1382 brw_ADD(p, byte_offset(dst, g * type_size),
1383 negate(byte_offset(src, g * type_size)),
1384 byte_offset(src, (g + 2) * type_size));
1385 brw_set_default_swsb(p, tgl_swsb_null());
1386 }
1387 brw_pop_insn_state(p);
1388 } else {
1389 struct brw_reg src0 = stride(src, 4, 4, 1);
1390 struct brw_reg src1 = stride(src, 4, 4, 1);
1391 src0.swizzle = BRW_SWIZZLE_XYXY;
1392 src1.swizzle = BRW_SWIZZLE_ZWZW;
1393
1394 brw_push_insn_state(p);
1395 brw_set_default_access_mode(p, BRW_ALIGN_16);
1396 brw_ADD(p, dst, negate(src0), src1);
1397 brw_pop_insn_state(p);
1398 }
1399 } else {
1400 /* replicate the derivative at the top-left pixel to other pixels */
1401 if (devinfo->ver >= 8) {
1402 struct brw_reg src0 = byte_offset(stride(src, 4, 4, 0), 0 * type_size);
1403 struct brw_reg src1 = byte_offset(stride(src, 4, 4, 0), 2 * type_size);
1404
1405 brw_ADD(p, dst, negate(src0), src1);
1406 } else {
1407 /* On Haswell and earlier, the region used above appears to not work
1408 * correctly for compressed instructions. At least on Haswell and
1409 * Iron Lake, compressed ALIGN16 instructions do work. Since we
1410 * would have to split to SIMD8 no matter which method we choose, we
1411 * may as well use ALIGN16 on all platforms gfx7 and earlier.
1412 */
1413 struct brw_reg src0 = stride(src, 4, 4, 1);
1414 struct brw_reg src1 = stride(src, 4, 4, 1);
1415 src0.swizzle = BRW_SWIZZLE_XXXX;
1416 src1.swizzle = BRW_SWIZZLE_ZZZZ;
1417
1418 brw_push_insn_state(p);
1419 brw_set_default_access_mode(p, BRW_ALIGN_16);
1420 brw_ADD(p, dst, negate(src0), src1);
1421 brw_pop_insn_state(p);
1422 }
1423 }
1424 }
1425
1426 void
generate_halt(fs_inst *)1427 fs_generator::generate_halt(fs_inst *)
1428 {
1429 /* This HALT will be patched up at FB write time to point UIP at the end of
1430 * the program, and at brw_uip_jip() JIP will be set to the end of the
1431 * current block (or the program).
1432 */
1433 this->discard_halt_patches.push_tail(new(mem_ctx) ip_record(p->nr_insn));
1434 brw_HALT(p);
1435 }
1436
1437 void
generate_scratch_write(fs_inst * inst,struct brw_reg src)1438 fs_generator::generate_scratch_write(fs_inst *inst, struct brw_reg src)
1439 {
1440 /* The 32-wide messages only respect the first 16-wide half of the channel
1441 * enable signals which are replicated identically for the second group of
1442 * 16 channels, so we cannot use them unless the write is marked
1443 * force_writemask_all.
1444 */
1445 const unsigned lower_size = inst->force_writemask_all ? inst->exec_size :
1446 MIN2(16, inst->exec_size);
1447 const unsigned block_size = 4 * lower_size / REG_SIZE;
1448 const tgl_swsb swsb = brw_get_default_swsb(p);
1449 assert(inst->mlen != 0);
1450
1451 brw_push_insn_state(p);
1452 brw_set_default_exec_size(p, cvt(lower_size) - 1);
1453 brw_set_default_compression(p, lower_size > 8);
1454
1455 for (unsigned i = 0; i < inst->exec_size / lower_size; i++) {
1456 brw_set_default_group(p, inst->group + lower_size * i);
1457
1458 if (i > 0) {
1459 assert(swsb.mode & TGL_SBID_SET);
1460 brw_set_default_swsb(p, tgl_swsb_sbid(TGL_SBID_SRC, swsb.sbid));
1461 } else {
1462 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
1463 }
1464
1465 brw_MOV(p, brw_uvec_mrf(lower_size, inst->base_mrf + 1, 0),
1466 retype(offset(src, block_size * i), BRW_REGISTER_TYPE_UD));
1467
1468 brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
1469 brw_oword_block_write_scratch(p, brw_message_reg(inst->base_mrf),
1470 block_size,
1471 inst->offset + block_size * REG_SIZE * i);
1472 }
1473
1474 brw_pop_insn_state(p);
1475 }
1476
1477 void
generate_scratch_read(fs_inst * inst,struct brw_reg dst)1478 fs_generator::generate_scratch_read(fs_inst *inst, struct brw_reg dst)
1479 {
1480 assert(inst->exec_size <= 16 || inst->force_writemask_all);
1481 assert(inst->mlen != 0);
1482
1483 brw_oword_block_read_scratch(p, dst, brw_message_reg(inst->base_mrf),
1484 inst->exec_size / 8, inst->offset);
1485 }
1486
1487 void
generate_scratch_read_gfx7(fs_inst * inst,struct brw_reg dst)1488 fs_generator::generate_scratch_read_gfx7(fs_inst *inst, struct brw_reg dst)
1489 {
1490 assert(inst->exec_size <= 16 || inst->force_writemask_all);
1491
1492 gfx7_block_read_scratch(p, dst, inst->exec_size / 8, inst->offset);
1493 }
1494
1495 /* The A32 messages take a buffer base address in header.5:[31:0] (See
1496 * MH1_A32_PSM for typed messages or MH_A32_GO for byte/dword scattered
1497 * and OWord block messages in the SKL PRM Vol. 2d for more details.)
1498 * Unfortunately, there are a number of subtle differences:
1499 *
1500 * For the block read/write messages:
1501 *
1502 * - We always stomp header.2 to fill in the actual scratch address (in
1503 * units of OWORDs) so we don't care what's in there.
1504 *
1505 * - They rely on per-thread scratch space value in header.3[3:0] to do
1506 * bounds checking so that needs to be valid. The upper bits of
1507 * header.3 are ignored, though, so we can copy all of g0.3.
1508 *
1509 * - They ignore header.5[9:0] and assumes the address is 1KB aligned.
1510 *
1511 *
1512 * For the byte/dword scattered read/write messages:
1513 *
1514 * - We want header.2 to be zero because that gets added to the per-channel
1515 * offset in the non-header portion of the message.
1516 *
1517 * - Contrary to what the docs claim, they don't do any bounds checking so
1518 * the value of header.3[3:0] doesn't matter.
1519 *
1520 * - They consider all of header.5 for the base address and header.5[9:0]
1521 * are not ignored. This means that we can't copy g0.5 verbatim because
1522 * g0.5[9:0] contains the FFTID on most platforms. Instead, we have to
1523 * use an AND to mask off the bottom 10 bits.
1524 *
1525 *
1526 * For block messages, just copying g0 gives a valid header because all the
1527 * garbage gets ignored except for header.2 which we stomp as part of message
1528 * setup. For byte/dword scattered messages, we can just zero out the header
1529 * and copy over the bits we need from g0.5. This opcode, however, tries to
1530 * satisfy the requirements of both by starting with 0 and filling out the
1531 * information required by either set of opcodes.
1532 */
1533 void
generate_scratch_header(fs_inst * inst,struct brw_reg dst)1534 fs_generator::generate_scratch_header(fs_inst *inst, struct brw_reg dst)
1535 {
1536 assert(inst->exec_size == 8 && inst->force_writemask_all);
1537 assert(dst.file == BRW_GENERAL_REGISTER_FILE);
1538
1539 dst.type = BRW_REGISTER_TYPE_UD;
1540
1541 brw_inst *insn = brw_MOV(p, dst, brw_imm_ud(0));
1542 if (devinfo->ver >= 12)
1543 brw_set_default_swsb(p, tgl_swsb_null());
1544 else
1545 brw_inst_set_no_dd_clear(p->devinfo, insn, true);
1546
1547 /* Copy the per-thread scratch space size from g0.3[3:0] */
1548 brw_set_default_exec_size(p, BRW_EXECUTE_1);
1549 insn = brw_AND(p, suboffset(dst, 3),
1550 retype(brw_vec1_grf(0, 3), BRW_REGISTER_TYPE_UD),
1551 brw_imm_ud(INTEL_MASK(3, 0)));
1552 if (devinfo->ver < 12) {
1553 brw_inst_set_no_dd_clear(p->devinfo, insn, true);
1554 brw_inst_set_no_dd_check(p->devinfo, insn, true);
1555 }
1556
1557 /* Copy the scratch base address from g0.5[31:10] */
1558 insn = brw_AND(p, suboffset(dst, 5),
1559 retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
1560 brw_imm_ud(INTEL_MASK(31, 10)));
1561 if (devinfo->ver < 12)
1562 brw_inst_set_no_dd_check(p->devinfo, insn, true);
1563 }
1564
1565 void
generate_uniform_pull_constant_load(fs_inst * inst,struct brw_reg dst,struct brw_reg index,struct brw_reg offset)1566 fs_generator::generate_uniform_pull_constant_load(fs_inst *inst,
1567 struct brw_reg dst,
1568 struct brw_reg index,
1569 struct brw_reg offset)
1570 {
1571 assert(type_sz(dst.type) == 4);
1572 assert(inst->mlen != 0);
1573
1574 assert(index.file == BRW_IMMEDIATE_VALUE &&
1575 index.type == BRW_REGISTER_TYPE_UD);
1576 uint32_t surf_index = index.ud;
1577
1578 assert(offset.file == BRW_IMMEDIATE_VALUE &&
1579 offset.type == BRW_REGISTER_TYPE_UD);
1580 uint32_t read_offset = offset.ud;
1581
1582 brw_oword_block_read(p, dst, brw_message_reg(inst->base_mrf),
1583 read_offset, surf_index);
1584 }
1585
1586 void
generate_uniform_pull_constant_load_gfx7(fs_inst * inst,struct brw_reg dst,struct brw_reg index,struct brw_reg payload)1587 fs_generator::generate_uniform_pull_constant_load_gfx7(fs_inst *inst,
1588 struct brw_reg dst,
1589 struct brw_reg index,
1590 struct brw_reg payload)
1591 {
1592 assert(index.type == BRW_REGISTER_TYPE_UD);
1593 assert(payload.file == BRW_GENERAL_REGISTER_FILE);
1594 assert(type_sz(dst.type) == 4);
1595 assert(!devinfo->has_lsc);
1596
1597 if (index.file == BRW_IMMEDIATE_VALUE) {
1598 const uint32_t surf_index = index.ud;
1599
1600 brw_push_insn_state(p);
1601 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1602 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1603 brw_pop_insn_state(p);
1604
1605 brw_inst_set_sfid(devinfo, send, GFX6_SFID_DATAPORT_CONSTANT_CACHE);
1606 brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UD));
1607 brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
1608 brw_set_desc(p, send,
1609 brw_message_desc(devinfo, 1, DIV_ROUND_UP(inst->size_written,
1610 REG_SIZE), true) |
1611 brw_dp_desc(devinfo, surf_index,
1612 GFX7_DATAPORT_DC_OWORD_BLOCK_READ,
1613 BRW_DATAPORT_OWORD_BLOCK_DWORDS(inst->exec_size)));
1614
1615 } else {
1616 const tgl_swsb swsb = brw_get_default_swsb(p);
1617 struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
1618
1619 brw_push_insn_state(p);
1620 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1621
1622 /* a0.0 = surf_index & 0xff */
1623 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
1624 brw_inst *insn_and = brw_next_insn(p, BRW_OPCODE_AND);
1625 brw_inst_set_exec_size(p->devinfo, insn_and, BRW_EXECUTE_1);
1626 brw_set_dest(p, insn_and, addr);
1627 brw_set_src0(p, insn_and, vec1(retype(index, BRW_REGISTER_TYPE_UD)));
1628 brw_set_src1(p, insn_and, brw_imm_ud(0x0ff));
1629
1630 /* dst = send(payload, a0.0 | <descriptor>) */
1631 brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
1632 brw_send_indirect_message(
1633 p, GFX6_SFID_DATAPORT_CONSTANT_CACHE,
1634 retype(dst, BRW_REGISTER_TYPE_UD),
1635 retype(payload, BRW_REGISTER_TYPE_UD), addr,
1636 brw_message_desc(devinfo, 1,
1637 DIV_ROUND_UP(inst->size_written, REG_SIZE), true) |
1638 brw_dp_desc(devinfo, 0 /* surface */,
1639 GFX7_DATAPORT_DC_OWORD_BLOCK_READ,
1640 BRW_DATAPORT_OWORD_BLOCK_DWORDS(inst->exec_size)),
1641 false /* EOT */);
1642
1643 brw_pop_insn_state(p);
1644 }
1645 }
1646
1647 void
generate_varying_pull_constant_load_gfx4(fs_inst * inst,struct brw_reg dst,struct brw_reg index)1648 fs_generator::generate_varying_pull_constant_load_gfx4(fs_inst *inst,
1649 struct brw_reg dst,
1650 struct brw_reg index)
1651 {
1652 assert(devinfo->ver < 7); /* Should use the gfx7 variant. */
1653 assert(inst->header_size != 0);
1654 assert(inst->mlen);
1655
1656 assert(index.file == BRW_IMMEDIATE_VALUE &&
1657 index.type == BRW_REGISTER_TYPE_UD);
1658 uint32_t surf_index = index.ud;
1659
1660 uint32_t simd_mode, rlen, msg_type;
1661 if (inst->exec_size == 16) {
1662 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
1663 rlen = 8;
1664 } else {
1665 assert(inst->exec_size == 8);
1666 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
1667 rlen = 4;
1668 }
1669
1670 if (devinfo->ver >= 5)
1671 msg_type = GFX5_SAMPLER_MESSAGE_SAMPLE_LD;
1672 else {
1673 /* We always use the SIMD16 message so that we only have to load U, and
1674 * not V or R.
1675 */
1676 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_LD;
1677 assert(inst->mlen == 3);
1678 assert(inst->size_written == 8 * REG_SIZE);
1679 rlen = 8;
1680 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
1681 }
1682
1683 struct brw_reg header = brw_vec8_grf(0, 0);
1684 gfx6_resolve_implied_move(p, &header, inst->base_mrf);
1685
1686 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
1687 brw_inst_set_compression(devinfo, send, false);
1688 brw_inst_set_sfid(devinfo, send, BRW_SFID_SAMPLER);
1689 brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UW));
1690 brw_set_src0(p, send, header);
1691 if (devinfo->ver < 6)
1692 brw_inst_set_base_mrf(p->devinfo, send, inst->base_mrf);
1693
1694 /* Our surface is set up as floats, regardless of what actual data is
1695 * stored in it.
1696 */
1697 uint32_t return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32;
1698 brw_set_desc(p, send,
1699 brw_message_desc(devinfo, inst->mlen, rlen, inst->header_size) |
1700 brw_sampler_desc(devinfo, surf_index,
1701 0, /* sampler (unused) */
1702 msg_type, simd_mode, return_format));
1703 }
1704
1705 void
generate_pixel_interpolator_query(fs_inst * inst,struct brw_reg dst,struct brw_reg src,struct brw_reg msg_data,unsigned msg_type)1706 fs_generator::generate_pixel_interpolator_query(fs_inst *inst,
1707 struct brw_reg dst,
1708 struct brw_reg src,
1709 struct brw_reg msg_data,
1710 unsigned msg_type)
1711 {
1712 const bool has_payload = inst->src[0].file != BAD_FILE;
1713 assert(msg_data.type == BRW_REGISTER_TYPE_UD);
1714 assert(inst->size_written % REG_SIZE == 0);
1715
1716 struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
1717
1718 brw_pixel_interpolator_query(p,
1719 retype(dst, BRW_REGISTER_TYPE_UW),
1720 /* If we don't have a payload, what we send doesn't matter */
1721 has_payload ? src : brw_vec8_grf(0, 0),
1722 inst->pi_noperspective,
1723 prog_data->per_coarse_pixel_dispatch,
1724 msg_type,
1725 msg_data,
1726 has_payload ? 2 * inst->exec_size / 8 : 1,
1727 inst->size_written / REG_SIZE);
1728 }
1729
1730 /* Sets vstride=1, width=4, hstride=0 of register src1 during
1731 * the ADD instruction.
1732 */
1733 void
generate_set_sample_id(fs_inst * inst,struct brw_reg dst,struct brw_reg src0,struct brw_reg src1)1734 fs_generator::generate_set_sample_id(fs_inst *inst,
1735 struct brw_reg dst,
1736 struct brw_reg src0,
1737 struct brw_reg src1)
1738 {
1739 assert(dst.type == BRW_REGISTER_TYPE_D ||
1740 dst.type == BRW_REGISTER_TYPE_UD);
1741 assert(src0.type == BRW_REGISTER_TYPE_D ||
1742 src0.type == BRW_REGISTER_TYPE_UD);
1743
1744 const struct brw_reg reg = stride(src1, 1, 4, 0);
1745 const unsigned lower_size = MIN2(inst->exec_size,
1746 devinfo->ver >= 8 ? 16 : 8);
1747
1748 for (unsigned i = 0; i < inst->exec_size / lower_size; i++) {
1749 brw_inst *insn = brw_ADD(p, offset(dst, i * lower_size / 8),
1750 offset(src0, (src0.vstride == 0 ? 0 : (1 << (src0.vstride - 1)) *
1751 (i * lower_size / (1 << src0.width))) *
1752 type_sz(src0.type) / REG_SIZE),
1753 suboffset(reg, i * lower_size / 4));
1754 brw_inst_set_exec_size(devinfo, insn, cvt(lower_size) - 1);
1755 brw_inst_set_group(devinfo, insn, inst->group + lower_size * i);
1756 brw_inst_set_compression(devinfo, insn, lower_size > 8);
1757 brw_set_default_swsb(p, tgl_swsb_null());
1758 }
1759 }
1760
1761 void
generate_pack_half_2x16_split(fs_inst *,struct brw_reg dst,struct brw_reg x,struct brw_reg y)1762 fs_generator::generate_pack_half_2x16_split(fs_inst *,
1763 struct brw_reg dst,
1764 struct brw_reg x,
1765 struct brw_reg y)
1766 {
1767 assert(devinfo->ver >= 7);
1768 assert(dst.type == BRW_REGISTER_TYPE_UD);
1769 assert(x.type == BRW_REGISTER_TYPE_F);
1770 assert(y.type == BRW_REGISTER_TYPE_F);
1771
1772 /* From the Ivybridge PRM, Vol4, Part3, Section 6.27 f32to16:
1773 *
1774 * Because this instruction does not have a 16-bit floating-point type,
1775 * the destination data type must be Word (W).
1776 *
1777 * The destination must be DWord-aligned and specify a horizontal stride
1778 * (HorzStride) of 2. The 16-bit result is stored in the lower word of
1779 * each destination channel and the upper word is not modified.
1780 */
1781 const enum brw_reg_type t = devinfo->ver > 7
1782 ? BRW_REGISTER_TYPE_HF : BRW_REGISTER_TYPE_W;
1783 struct brw_reg dst_w = spread(retype(dst, t), 2);
1784
1785 /* Give each 32-bit channel of dst the form below, where "." means
1786 * unchanged.
1787 * 0x....hhhh
1788 */
1789 brw_F32TO16(p, dst_w, y);
1790
1791 /* Now the form:
1792 * 0xhhhh0000
1793 */
1794 brw_set_default_swsb(p, tgl_swsb_regdist(1));
1795 brw_SHL(p, dst, dst, brw_imm_ud(16u));
1796
1797 /* And, finally the form of packHalf2x16's output:
1798 * 0xhhhhllll
1799 */
1800 brw_F32TO16(p, dst_w, x);
1801 }
1802
1803 void
enable_debug(const char * shader_name)1804 fs_generator::enable_debug(const char *shader_name)
1805 {
1806 debug_flag = true;
1807 this->shader_name = shader_name;
1808 }
1809
1810 int
generate_code(const cfg_t * cfg,int dispatch_width,struct shader_stats shader_stats,const brw::performance & perf,struct brw_compile_stats * stats)1811 fs_generator::generate_code(const cfg_t *cfg, int dispatch_width,
1812 struct shader_stats shader_stats,
1813 const brw::performance &perf,
1814 struct brw_compile_stats *stats)
1815 {
1816 /* align to 64 byte boundary. */
1817 brw_realign(p, 64);
1818
1819 this->dispatch_width = dispatch_width;
1820
1821 int start_offset = p->next_insn_offset;
1822
1823 /* `send_count` explicitly does not include spills or fills, as we'd
1824 * like to use it as a metric for intentional memory access or other
1825 * shared function use. Otherwise, subtle changes to scheduling or
1826 * register allocation could cause it to fluctuate wildly - and that
1827 * effect is already counted in spill/fill counts.
1828 */
1829 int spill_count = 0, fill_count = 0;
1830 int loop_count = 0, send_count = 0, nop_count = 0;
1831 bool is_accum_used = false;
1832
1833 struct disasm_info *disasm_info = disasm_initialize(devinfo, cfg);
1834
1835 foreach_block_and_inst (block, fs_inst, inst, cfg) {
1836 if (inst->opcode == SHADER_OPCODE_UNDEF)
1837 continue;
1838
1839 struct brw_reg src[4], dst;
1840 unsigned int last_insn_offset = p->next_insn_offset;
1841 bool multiple_instructions_emitted = false;
1842 tgl_swsb swsb = inst->sched;
1843
1844 /* From the Broadwell PRM, Volume 7, "3D-Media-GPGPU", in the
1845 * "Register Region Restrictions" section: for BDW, SKL:
1846 *
1847 * "A POW/FDIV operation must not be followed by an instruction
1848 * that requires two destination registers."
1849 *
1850 * The documentation is often lacking annotations for Atom parts,
1851 * and empirically this affects CHV as well.
1852 */
1853 if (devinfo->ver >= 8 &&
1854 devinfo->ver <= 9 &&
1855 p->nr_insn > 1 &&
1856 brw_inst_opcode(devinfo, brw_last_inst) == BRW_OPCODE_MATH &&
1857 brw_inst_math_function(devinfo, brw_last_inst) == BRW_MATH_FUNCTION_POW &&
1858 inst->dst.component_size(inst->exec_size) > REG_SIZE) {
1859 brw_NOP(p);
1860 last_insn_offset = p->next_insn_offset;
1861
1862 /* In order to avoid spurious instruction count differences when the
1863 * instruction schedule changes, keep track of the number of inserted
1864 * NOPs.
1865 */
1866 nop_count++;
1867 }
1868
1869 /* Wa_14010017096:
1870 *
1871 * Clear accumulator register before end of thread.
1872 */
1873 if (inst->eot && is_accum_used && devinfo->ver >= 12) {
1874 brw_set_default_exec_size(p, BRW_EXECUTE_16);
1875 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1876 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
1877 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
1878 brw_MOV(p, brw_acc_reg(8), brw_imm_f(0.0f));
1879 last_insn_offset = p->next_insn_offset;
1880 swsb = tgl_swsb_dst_dep(swsb, 1);
1881 }
1882
1883 if (!is_accum_used && !inst->eot) {
1884 is_accum_used = inst->writes_accumulator_implicitly(devinfo) ||
1885 inst->dst.is_accumulator();
1886 }
1887
1888 /* Wa_14013745556:
1889 *
1890 * Always use @1 SWSB for EOT.
1891 */
1892 if (inst->eot && devinfo->ver >= 12) {
1893 if (tgl_swsb_src_dep(swsb).mode) {
1894 brw_set_default_exec_size(p, BRW_EXECUTE_1);
1895 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1896 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
1897 brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
1898 brw_SYNC(p, TGL_SYNC_NOP);
1899 last_insn_offset = p->next_insn_offset;
1900 }
1901
1902 swsb = tgl_swsb_dst_dep(swsb, 1);
1903 }
1904
1905 if (unlikely(debug_flag))
1906 disasm_annotate(disasm_info, inst, p->next_insn_offset);
1907
1908 /* If the instruction writes to more than one register, it needs to be
1909 * explicitly marked as compressed on Gen <= 5. On Gen >= 6 the
1910 * hardware figures out by itself what the right compression mode is,
1911 * but we still need to know whether the instruction is compressed to
1912 * set up the source register regions appropriately.
1913 *
1914 * XXX - This is wrong for instructions that write a single register but
1915 * read more than one which should strictly speaking be treated as
1916 * compressed. For instructions that don't write any registers it
1917 * relies on the destination being a null register of the correct
1918 * type and regioning so the instruction is considered compressed
1919 * or not accordingly.
1920 */
1921 const bool compressed =
1922 inst->dst.component_size(inst->exec_size) > REG_SIZE;
1923 brw_set_default_compression(p, compressed);
1924 brw_set_default_group(p, inst->group);
1925
1926 for (unsigned int i = 0; i < inst->sources; i++) {
1927 src[i] = brw_reg_from_fs_reg(devinfo, inst,
1928 &inst->src[i], compressed);
1929 /* The accumulator result appears to get used for the
1930 * conditional modifier generation. When negating a UD
1931 * value, there is a 33rd bit generated for the sign in the
1932 * accumulator value, so now you can't check, for example,
1933 * equality with a 32-bit value. See piglit fs-op-neg-uvec4.
1934 */
1935 assert(!inst->conditional_mod ||
1936 inst->src[i].type != BRW_REGISTER_TYPE_UD ||
1937 !inst->src[i].negate);
1938 }
1939 dst = brw_reg_from_fs_reg(devinfo, inst,
1940 &inst->dst, compressed);
1941
1942 brw_set_default_access_mode(p, BRW_ALIGN_1);
1943 brw_set_default_predicate_control(p, inst->predicate);
1944 brw_set_default_predicate_inverse(p, inst->predicate_inverse);
1945 /* On gfx7 and above, hardware automatically adds the group onto the
1946 * flag subregister number. On Sandy Bridge and older, we have to do it
1947 * ourselves.
1948 */
1949 const unsigned flag_subreg = inst->flag_subreg +
1950 (devinfo->ver >= 7 ? 0 : inst->group / 16);
1951 brw_set_default_flag_reg(p, flag_subreg / 2, flag_subreg % 2);
1952 brw_set_default_saturate(p, inst->saturate);
1953 brw_set_default_mask_control(p, inst->force_writemask_all);
1954 brw_set_default_acc_write_control(p, inst->writes_accumulator);
1955 brw_set_default_swsb(p, swsb);
1956
1957 unsigned exec_size = inst->exec_size;
1958 if (devinfo->verx10 == 70 &&
1959 (get_exec_type_size(inst) == 8 || type_sz(inst->dst.type) == 8)) {
1960 exec_size *= 2;
1961 }
1962
1963 brw_set_default_exec_size(p, cvt(exec_size) - 1);
1964
1965 assert(inst->force_writemask_all || inst->exec_size >= 4);
1966 assert(inst->force_writemask_all || inst->group % inst->exec_size == 0);
1967 assert(inst->base_mrf + inst->mlen <= BRW_MAX_MRF(devinfo->ver));
1968 assert(inst->mlen <= BRW_MAX_MSG_LENGTH);
1969
1970 switch (inst->opcode) {
1971 case BRW_OPCODE_SYNC:
1972 assert(src[0].file == BRW_IMMEDIATE_VALUE);
1973 brw_SYNC(p, tgl_sync_function(src[0].ud));
1974 break;
1975 case BRW_OPCODE_MOV:
1976 brw_MOV(p, dst, src[0]);
1977 break;
1978 case BRW_OPCODE_ADD:
1979 brw_ADD(p, dst, src[0], src[1]);
1980 break;
1981 case BRW_OPCODE_MUL:
1982 brw_MUL(p, dst, src[0], src[1]);
1983 break;
1984 case BRW_OPCODE_AVG:
1985 brw_AVG(p, dst, src[0], src[1]);
1986 break;
1987 case BRW_OPCODE_MACH:
1988 brw_MACH(p, dst, src[0], src[1]);
1989 break;
1990
1991 case BRW_OPCODE_DP4A:
1992 assert(devinfo->ver >= 12);
1993 brw_DP4A(p, dst, src[0], src[1], src[2]);
1994 break;
1995
1996 case BRW_OPCODE_LINE:
1997 brw_LINE(p, dst, src[0], src[1]);
1998 break;
1999
2000 case BRW_OPCODE_MAD:
2001 assert(devinfo->ver >= 6);
2002 if (devinfo->ver < 10)
2003 brw_set_default_access_mode(p, BRW_ALIGN_16);
2004 brw_MAD(p, dst, src[0], src[1], src[2]);
2005 break;
2006
2007 case BRW_OPCODE_LRP:
2008 assert(devinfo->ver >= 6 && devinfo->ver <= 10);
2009 if (devinfo->ver < 10)
2010 brw_set_default_access_mode(p, BRW_ALIGN_16);
2011 brw_LRP(p, dst, src[0], src[1], src[2]);
2012 break;
2013
2014 case BRW_OPCODE_ADD3:
2015 assert(devinfo->verx10 >= 125);
2016 brw_ADD3(p, dst, src[0], src[1], src[2]);
2017 break;
2018
2019 case BRW_OPCODE_FRC:
2020 brw_FRC(p, dst, src[0]);
2021 break;
2022 case BRW_OPCODE_RNDD:
2023 brw_RNDD(p, dst, src[0]);
2024 break;
2025 case BRW_OPCODE_RNDE:
2026 brw_RNDE(p, dst, src[0]);
2027 break;
2028 case BRW_OPCODE_RNDZ:
2029 brw_RNDZ(p, dst, src[0]);
2030 break;
2031
2032 case BRW_OPCODE_AND:
2033 brw_AND(p, dst, src[0], src[1]);
2034 break;
2035 case BRW_OPCODE_OR:
2036 brw_OR(p, dst, src[0], src[1]);
2037 break;
2038 case BRW_OPCODE_XOR:
2039 brw_XOR(p, dst, src[0], src[1]);
2040 break;
2041 case BRW_OPCODE_NOT:
2042 brw_NOT(p, dst, src[0]);
2043 break;
2044 case BRW_OPCODE_ASR:
2045 brw_ASR(p, dst, src[0], src[1]);
2046 break;
2047 case BRW_OPCODE_SHR:
2048 brw_SHR(p, dst, src[0], src[1]);
2049 break;
2050 case BRW_OPCODE_SHL:
2051 brw_SHL(p, dst, src[0], src[1]);
2052 break;
2053 case BRW_OPCODE_ROL:
2054 assert(devinfo->ver >= 11);
2055 assert(src[0].type == dst.type);
2056 brw_ROL(p, dst, src[0], src[1]);
2057 break;
2058 case BRW_OPCODE_ROR:
2059 assert(devinfo->ver >= 11);
2060 assert(src[0].type == dst.type);
2061 brw_ROR(p, dst, src[0], src[1]);
2062 break;
2063 case BRW_OPCODE_F32TO16:
2064 assert(devinfo->ver >= 7);
2065 brw_F32TO16(p, dst, src[0]);
2066 break;
2067 case BRW_OPCODE_F16TO32:
2068 assert(devinfo->ver >= 7);
2069 brw_F16TO32(p, dst, src[0]);
2070 break;
2071 case BRW_OPCODE_CMP:
2072 if (inst->exec_size >= 16 && devinfo->verx10 == 70 &&
2073 dst.file == BRW_ARCHITECTURE_REGISTER_FILE) {
2074 /* For unknown reasons the WaCMPInstFlagDepClearedEarly workaround
2075 * implemented in the compiler is not sufficient. Overriding the
2076 * type when the destination is the null register is necessary but
2077 * not sufficient by itself.
2078 */
2079 dst.type = BRW_REGISTER_TYPE_D;
2080 }
2081 brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
2082 break;
2083 case BRW_OPCODE_CMPN:
2084 if (inst->exec_size >= 16 && devinfo->verx10 == 70 &&
2085 dst.file == BRW_ARCHITECTURE_REGISTER_FILE) {
2086 /* For unknown reasons the WaCMPInstFlagDepClearedEarly workaround
2087 * implemented in the compiler is not sufficient. Overriding the
2088 * type when the destination is the null register is necessary but
2089 * not sufficient by itself.
2090 */
2091 dst.type = BRW_REGISTER_TYPE_D;
2092 }
2093 brw_CMPN(p, dst, inst->conditional_mod, src[0], src[1]);
2094 break;
2095 case BRW_OPCODE_SEL:
2096 brw_SEL(p, dst, src[0], src[1]);
2097 break;
2098 case BRW_OPCODE_CSEL:
2099 assert(devinfo->ver >= 8);
2100 if (devinfo->ver < 10)
2101 brw_set_default_access_mode(p, BRW_ALIGN_16);
2102 brw_CSEL(p, dst, src[0], src[1], src[2]);
2103 break;
2104 case BRW_OPCODE_BFREV:
2105 assert(devinfo->ver >= 7);
2106 brw_BFREV(p, retype(dst, BRW_REGISTER_TYPE_UD),
2107 retype(src[0], BRW_REGISTER_TYPE_UD));
2108 break;
2109 case BRW_OPCODE_FBH:
2110 assert(devinfo->ver >= 7);
2111 brw_FBH(p, retype(dst, src[0].type), src[0]);
2112 break;
2113 case BRW_OPCODE_FBL:
2114 assert(devinfo->ver >= 7);
2115 brw_FBL(p, retype(dst, BRW_REGISTER_TYPE_UD),
2116 retype(src[0], BRW_REGISTER_TYPE_UD));
2117 break;
2118 case BRW_OPCODE_LZD:
2119 brw_LZD(p, dst, src[0]);
2120 break;
2121 case BRW_OPCODE_CBIT:
2122 assert(devinfo->ver >= 7);
2123 brw_CBIT(p, retype(dst, BRW_REGISTER_TYPE_UD),
2124 retype(src[0], BRW_REGISTER_TYPE_UD));
2125 break;
2126 case BRW_OPCODE_ADDC:
2127 assert(devinfo->ver >= 7);
2128 brw_ADDC(p, dst, src[0], src[1]);
2129 break;
2130 case BRW_OPCODE_SUBB:
2131 assert(devinfo->ver >= 7);
2132 brw_SUBB(p, dst, src[0], src[1]);
2133 break;
2134 case BRW_OPCODE_MAC:
2135 brw_MAC(p, dst, src[0], src[1]);
2136 break;
2137
2138 case BRW_OPCODE_BFE:
2139 assert(devinfo->ver >= 7);
2140 if (devinfo->ver < 10)
2141 brw_set_default_access_mode(p, BRW_ALIGN_16);
2142 brw_BFE(p, dst, src[0], src[1], src[2]);
2143 break;
2144
2145 case BRW_OPCODE_BFI1:
2146 assert(devinfo->ver >= 7);
2147 brw_BFI1(p, dst, src[0], src[1]);
2148 break;
2149 case BRW_OPCODE_BFI2:
2150 assert(devinfo->ver >= 7);
2151 if (devinfo->ver < 10)
2152 brw_set_default_access_mode(p, BRW_ALIGN_16);
2153 brw_BFI2(p, dst, src[0], src[1], src[2]);
2154 break;
2155
2156 case BRW_OPCODE_IF:
2157 if (inst->src[0].file != BAD_FILE) {
2158 /* The instruction has an embedded compare (only allowed on gfx6) */
2159 assert(devinfo->ver == 6);
2160 gfx6_IF(p, inst->conditional_mod, src[0], src[1]);
2161 } else {
2162 brw_IF(p, brw_get_default_exec_size(p));
2163 }
2164 break;
2165
2166 case BRW_OPCODE_ELSE:
2167 brw_ELSE(p);
2168 break;
2169 case BRW_OPCODE_ENDIF:
2170 brw_ENDIF(p);
2171 break;
2172
2173 case BRW_OPCODE_DO:
2174 brw_DO(p, brw_get_default_exec_size(p));
2175 break;
2176
2177 case BRW_OPCODE_BREAK:
2178 brw_BREAK(p);
2179 break;
2180 case BRW_OPCODE_CONTINUE:
2181 brw_CONT(p);
2182 break;
2183
2184 case BRW_OPCODE_WHILE:
2185 brw_WHILE(p);
2186 loop_count++;
2187 break;
2188
2189 case SHADER_OPCODE_RCP:
2190 case SHADER_OPCODE_RSQ:
2191 case SHADER_OPCODE_SQRT:
2192 case SHADER_OPCODE_EXP2:
2193 case SHADER_OPCODE_LOG2:
2194 case SHADER_OPCODE_SIN:
2195 case SHADER_OPCODE_COS:
2196 assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
2197 if (devinfo->ver >= 6) {
2198 assert(inst->mlen == 0);
2199 assert(devinfo->ver >= 7 || inst->exec_size == 8);
2200 gfx6_math(p, dst, brw_math_function(inst->opcode),
2201 src[0], brw_null_reg());
2202 } else {
2203 assert(inst->mlen >= 1);
2204 assert(devinfo->ver == 5 || devinfo->platform == INTEL_PLATFORM_G4X || inst->exec_size == 8);
2205 gfx4_math(p, dst,
2206 brw_math_function(inst->opcode),
2207 inst->base_mrf, src[0],
2208 BRW_MATH_PRECISION_FULL);
2209 send_count++;
2210 }
2211 break;
2212 case SHADER_OPCODE_INT_QUOTIENT:
2213 case SHADER_OPCODE_INT_REMAINDER:
2214 case SHADER_OPCODE_POW:
2215 assert(devinfo->verx10 < 125);
2216 assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
2217 if (devinfo->ver >= 6) {
2218 assert(inst->mlen == 0);
2219 assert((devinfo->ver >= 7 && inst->opcode == SHADER_OPCODE_POW) ||
2220 inst->exec_size == 8);
2221 gfx6_math(p, dst, brw_math_function(inst->opcode), src[0], src[1]);
2222 } else {
2223 assert(inst->mlen >= 1);
2224 assert(inst->exec_size == 8);
2225 gfx4_math(p, dst, brw_math_function(inst->opcode),
2226 inst->base_mrf, src[0],
2227 BRW_MATH_PRECISION_FULL);
2228 send_count++;
2229 }
2230 break;
2231 case FS_OPCODE_LINTERP:
2232 multiple_instructions_emitted = generate_linterp(inst, dst, src);
2233 break;
2234 case FS_OPCODE_PIXEL_X:
2235 assert(src[0].type == BRW_REGISTER_TYPE_UW);
2236 assert(src[1].type == BRW_REGISTER_TYPE_UW);
2237 src[0].subnr = 0 * type_sz(src[0].type);
2238 if (src[1].file == BRW_IMMEDIATE_VALUE) {
2239 assert(src[1].ud == 0);
2240 brw_MOV(p, dst, stride(src[0], 8, 4, 1));
2241 } else {
2242 /* Coarse pixel case */
2243 brw_ADD(p, dst, stride(src[0], 8, 4, 1), src[1]);
2244 }
2245 break;
2246 case FS_OPCODE_PIXEL_Y:
2247 assert(src[0].type == BRW_REGISTER_TYPE_UW);
2248 assert(src[1].type == BRW_REGISTER_TYPE_UW);
2249 src[0].subnr = 4 * type_sz(src[0].type);
2250 if (src[1].file == BRW_IMMEDIATE_VALUE) {
2251 assert(src[1].ud == 0);
2252 brw_MOV(p, dst, stride(src[0], 8, 4, 1));
2253 } else {
2254 /* Coarse pixel case */
2255 brw_ADD(p, dst, stride(src[0], 8, 4, 1), src[1]);
2256 }
2257 break;
2258
2259 case SHADER_OPCODE_SEND:
2260 generate_send(inst, dst, src[0], src[1], src[2],
2261 inst->ex_mlen > 0 ? src[3] : brw_null_reg());
2262 if ((inst->desc & 0xff) == BRW_BTI_STATELESS ||
2263 (inst->desc & 0xff) == GFX8_BTI_STATELESS_NON_COHERENT) {
2264 if (inst->size_written)
2265 fill_count++;
2266 else
2267 spill_count++;
2268 } else {
2269 send_count++;
2270 }
2271 break;
2272
2273 case SHADER_OPCODE_GET_BUFFER_SIZE:
2274 generate_get_buffer_size(inst, dst, src[0], src[1]);
2275 send_count++;
2276 break;
2277 case SHADER_OPCODE_TEX:
2278 case FS_OPCODE_TXB:
2279 case SHADER_OPCODE_TXD:
2280 case SHADER_OPCODE_TXF:
2281 case SHADER_OPCODE_TXF_CMS:
2282 case SHADER_OPCODE_TXL:
2283 case SHADER_OPCODE_TXS:
2284 case SHADER_OPCODE_LOD:
2285 case SHADER_OPCODE_TG4:
2286 case SHADER_OPCODE_SAMPLEINFO:
2287 assert(inst->src[0].file == BAD_FILE);
2288 generate_tex(inst, dst, src[1], src[2]);
2289 send_count++;
2290 break;
2291
2292 case FS_OPCODE_DDX_COARSE:
2293 case FS_OPCODE_DDX_FINE:
2294 generate_ddx(inst, dst, src[0]);
2295 break;
2296 case FS_OPCODE_DDY_COARSE:
2297 case FS_OPCODE_DDY_FINE:
2298 generate_ddy(inst, dst, src[0]);
2299 break;
2300
2301 case SHADER_OPCODE_GFX4_SCRATCH_WRITE:
2302 generate_scratch_write(inst, src[0]);
2303 spill_count++;
2304 break;
2305
2306 case SHADER_OPCODE_GFX4_SCRATCH_READ:
2307 generate_scratch_read(inst, dst);
2308 fill_count++;
2309 break;
2310
2311 case SHADER_OPCODE_GFX7_SCRATCH_READ:
2312 generate_scratch_read_gfx7(inst, dst);
2313 fill_count++;
2314 break;
2315
2316 case SHADER_OPCODE_SCRATCH_HEADER:
2317 generate_scratch_header(inst, dst);
2318 break;
2319
2320 case SHADER_OPCODE_MOV_INDIRECT:
2321 generate_mov_indirect(inst, dst, src[0], src[1]);
2322 break;
2323
2324 case SHADER_OPCODE_MOV_RELOC_IMM:
2325 assert(src[0].file == BRW_IMMEDIATE_VALUE);
2326 brw_MOV_reloc_imm(p, dst, dst.type, src[0].ud);
2327 break;
2328
2329 case SHADER_OPCODE_URB_READ_SIMD8:
2330 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
2331 generate_urb_read(inst, dst, src[0]);
2332 send_count++;
2333 break;
2334
2335 case SHADER_OPCODE_URB_WRITE_SIMD8:
2336 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
2337 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
2338 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
2339 generate_urb_write(inst, src[0]);
2340 send_count++;
2341 break;
2342
2343 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
2344 assert(inst->force_writemask_all);
2345 generate_uniform_pull_constant_load(inst, dst, src[0], src[1]);
2346 send_count++;
2347 break;
2348
2349 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GFX7:
2350 assert(inst->force_writemask_all);
2351 generate_uniform_pull_constant_load_gfx7(inst, dst, src[0], src[1]);
2352 send_count++;
2353 break;
2354
2355 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GFX4:
2356 generate_varying_pull_constant_load_gfx4(inst, dst, src[0]);
2357 send_count++;
2358 break;
2359
2360 case FS_OPCODE_REP_FB_WRITE:
2361 case FS_OPCODE_FB_WRITE:
2362 generate_fb_write(inst, src[0]);
2363 send_count++;
2364 break;
2365
2366 case FS_OPCODE_FB_READ:
2367 generate_fb_read(inst, dst, src[0]);
2368 send_count++;
2369 break;
2370
2371 case BRW_OPCODE_HALT:
2372 generate_halt(inst);
2373 break;
2374
2375 case SHADER_OPCODE_INTERLOCK:
2376 case SHADER_OPCODE_MEMORY_FENCE: {
2377 assert(src[1].file == BRW_IMMEDIATE_VALUE);
2378 assert(src[2].file == BRW_IMMEDIATE_VALUE);
2379
2380 const enum opcode send_op = inst->opcode == SHADER_OPCODE_INTERLOCK ?
2381 BRW_OPCODE_SENDC : BRW_OPCODE_SEND;
2382
2383 brw_memory_fence(p, dst, src[0], send_op,
2384 brw_message_target(inst->sfid),
2385 inst->desc,
2386 /* commit_enable */ src[1].ud,
2387 /* bti */ src[2].ud);
2388 send_count++;
2389 break;
2390 }
2391
2392 case FS_OPCODE_SCHEDULING_FENCE:
2393 if (inst->sources == 0 && swsb.regdist == 0 &&
2394 swsb.mode == TGL_SBID_NULL) {
2395 if (unlikely(debug_flag))
2396 disasm_info->use_tail = true;
2397 break;
2398 }
2399
2400 if (devinfo->ver >= 12) {
2401 /* Use the available SWSB information to stall. A single SYNC is
2402 * sufficient since if there were multiple dependencies, the
2403 * scoreboard algorithm already injected other SYNCs before this
2404 * instruction.
2405 */
2406 brw_SYNC(p, TGL_SYNC_NOP);
2407 } else {
2408 for (unsigned i = 0; i < inst->sources; i++) {
2409 /* Emit a MOV to force a stall until the instruction producing the
2410 * registers finishes.
2411 */
2412 brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW),
2413 retype(src[i], BRW_REGISTER_TYPE_UW));
2414 }
2415
2416 if (inst->sources > 1)
2417 multiple_instructions_emitted = true;
2418 }
2419
2420 break;
2421
2422 case SHADER_OPCODE_FIND_LIVE_CHANNEL: {
2423 const struct brw_reg mask =
2424 brw_stage_has_packed_dispatch(devinfo, stage,
2425 prog_data) ? brw_imm_ud(~0u) :
2426 stage == MESA_SHADER_FRAGMENT ? brw_vmask_reg() :
2427 brw_dmask_reg();
2428
2429 brw_find_live_channel(p, dst, mask, false);
2430 break;
2431 }
2432 case SHADER_OPCODE_FIND_LAST_LIVE_CHANNEL: {
2433 /* ce0 doesn't consider the thread dispatch mask, so if we want
2434 * to find the true last enabled channel, we need to apply that too.
2435 */
2436 const struct brw_reg mask =
2437 stage == MESA_SHADER_FRAGMENT ? brw_vmask_reg() : brw_dmask_reg();
2438
2439 brw_find_live_channel(p, dst, mask, true);
2440 break;
2441 }
2442
2443 case FS_OPCODE_LOAD_LIVE_CHANNELS: {
2444 assert(devinfo->ver >= 8);
2445 assert(inst->force_writemask_all && inst->group == 0);
2446 assert(inst->dst.file == BAD_FILE);
2447 brw_set_default_exec_size(p, BRW_EXECUTE_1);
2448 brw_MOV(p, retype(brw_flag_subreg(inst->flag_subreg),
2449 BRW_REGISTER_TYPE_UD),
2450 retype(brw_mask_reg(0), BRW_REGISTER_TYPE_UD));
2451 break;
2452 }
2453 case SHADER_OPCODE_BROADCAST:
2454 assert(inst->force_writemask_all);
2455 brw_broadcast(p, dst, src[0], src[1]);
2456 break;
2457
2458 case SHADER_OPCODE_SHUFFLE:
2459 generate_shuffle(inst, dst, src[0], src[1]);
2460 break;
2461
2462 case SHADER_OPCODE_SEL_EXEC:
2463 assert(inst->force_writemask_all);
2464 assert(devinfo->has_64bit_float || type_sz(dst.type) <= 4);
2465 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
2466 brw_MOV(p, dst, src[1]);
2467 brw_set_default_mask_control(p, BRW_MASK_ENABLE);
2468 brw_set_default_swsb(p, tgl_swsb_null());
2469 brw_MOV(p, dst, src[0]);
2470 break;
2471
2472 case SHADER_OPCODE_QUAD_SWIZZLE:
2473 assert(src[1].file == BRW_IMMEDIATE_VALUE);
2474 assert(src[1].type == BRW_REGISTER_TYPE_UD);
2475 generate_quad_swizzle(inst, dst, src[0], src[1].ud);
2476 break;
2477
2478 case SHADER_OPCODE_CLUSTER_BROADCAST: {
2479 assert((devinfo->platform != INTEL_PLATFORM_CHV &&
2480 !intel_device_info_is_9lp(devinfo) &&
2481 devinfo->has_64bit_float) || type_sz(src[0].type) <= 4);
2482 assert(!src[0].negate && !src[0].abs);
2483 assert(src[1].file == BRW_IMMEDIATE_VALUE);
2484 assert(src[1].type == BRW_REGISTER_TYPE_UD);
2485 assert(src[2].file == BRW_IMMEDIATE_VALUE);
2486 assert(src[2].type == BRW_REGISTER_TYPE_UD);
2487 const unsigned component = src[1].ud;
2488 const unsigned cluster_size = src[2].ud;
2489 assert(inst->src[0].file != ARF && inst->src[0].file != FIXED_GRF);
2490 const unsigned s = inst->src[0].stride;
2491 unsigned vstride = cluster_size * s;
2492 unsigned width = cluster_size;
2493
2494 /* The maximum exec_size is 32, but the maximum width is only 16. */
2495 if (inst->exec_size == width) {
2496 vstride = 0;
2497 width = 1;
2498 }
2499
2500 struct brw_reg strided = stride(suboffset(src[0], component * s),
2501 vstride, width, 0);
2502 brw_MOV(p, dst, strided);
2503 break;
2504 }
2505
2506 case FS_OPCODE_SET_SAMPLE_ID:
2507 generate_set_sample_id(inst, dst, src[0], src[1]);
2508 break;
2509
2510 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
2511 generate_pack_half_2x16_split(inst, dst, src[0], src[1]);
2512 break;
2513
2514 case SHADER_OPCODE_HALT_TARGET:
2515 /* This is the place where the final HALT needs to be inserted if
2516 * we've emitted any discards. If not, this will emit no code.
2517 */
2518 if (!patch_halt_jumps()) {
2519 if (unlikely(debug_flag)) {
2520 disasm_info->use_tail = true;
2521 }
2522 }
2523 break;
2524
2525 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
2526 generate_pixel_interpolator_query(inst, dst, src[0], src[1],
2527 GFX7_PIXEL_INTERPOLATOR_LOC_SAMPLE);
2528 send_count++;
2529 break;
2530
2531 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
2532 generate_pixel_interpolator_query(inst, dst, src[0], src[1],
2533 GFX7_PIXEL_INTERPOLATOR_LOC_SHARED_OFFSET);
2534 send_count++;
2535 break;
2536
2537 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
2538 generate_pixel_interpolator_query(inst, dst, src[0], src[1],
2539 GFX7_PIXEL_INTERPOLATOR_LOC_PER_SLOT_OFFSET);
2540 send_count++;
2541 break;
2542
2543 case CS_OPCODE_CS_TERMINATE:
2544 generate_cs_terminate(inst, src[0]);
2545 send_count++;
2546 break;
2547
2548 case SHADER_OPCODE_BARRIER:
2549 generate_barrier(inst, src[0]);
2550 send_count++;
2551 break;
2552
2553 case BRW_OPCODE_DIM:
2554 assert(devinfo->platform == INTEL_PLATFORM_HSW);
2555 assert(src[0].type == BRW_REGISTER_TYPE_DF);
2556 assert(dst.type == BRW_REGISTER_TYPE_DF);
2557 brw_DIM(p, dst, retype(src[0], BRW_REGISTER_TYPE_F));
2558 break;
2559
2560 case SHADER_OPCODE_RND_MODE: {
2561 assert(src[0].file == BRW_IMMEDIATE_VALUE);
2562 /*
2563 * Changes the floating point rounding mode updating the control
2564 * register field defined at cr0.0[5-6] bits.
2565 */
2566 enum brw_rnd_mode mode =
2567 (enum brw_rnd_mode) (src[0].d << BRW_CR0_RND_MODE_SHIFT);
2568 brw_float_controls_mode(p, mode, BRW_CR0_RND_MODE_MASK);
2569 }
2570 break;
2571
2572 case SHADER_OPCODE_FLOAT_CONTROL_MODE:
2573 assert(src[0].file == BRW_IMMEDIATE_VALUE);
2574 assert(src[1].file == BRW_IMMEDIATE_VALUE);
2575 brw_float_controls_mode(p, src[0].d, src[1].d);
2576 break;
2577
2578 case SHADER_OPCODE_READ_SR_REG:
2579 if (devinfo->ver >= 12) {
2580 /* There is a SWSB restriction that requires that any time sr0 is
2581 * accessed both the instruction doing the access and the next one
2582 * have SWSB set to RegDist(1).
2583 */
2584 if (brw_get_default_swsb(p).mode != TGL_SBID_NULL)
2585 brw_SYNC(p, TGL_SYNC_NOP);
2586 assert(src[0].file == BRW_IMMEDIATE_VALUE);
2587 brw_set_default_swsb(p, tgl_swsb_regdist(1));
2588 brw_MOV(p, dst, brw_sr0_reg(src[0].ud));
2589 brw_set_default_swsb(p, tgl_swsb_regdist(1));
2590 brw_AND(p, dst, dst, brw_imm_ud(0xffffffff));
2591 } else {
2592 brw_MOV(p, dst, brw_sr0_reg(src[0].ud));
2593 }
2594 break;
2595
2596 default:
2597 unreachable("Unsupported opcode");
2598
2599 case SHADER_OPCODE_LOAD_PAYLOAD:
2600 unreachable("Should be lowered by lower_load_payload()");
2601 }
2602
2603 if (multiple_instructions_emitted)
2604 continue;
2605
2606 if (inst->no_dd_clear || inst->no_dd_check || inst->conditional_mod) {
2607 assert(p->next_insn_offset == last_insn_offset + 16 ||
2608 !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
2609 "emitting more than 1 instruction");
2610
2611 brw_inst *last = &p->store[last_insn_offset / 16];
2612
2613 if (inst->conditional_mod)
2614 brw_inst_set_cond_modifier(p->devinfo, last, inst->conditional_mod);
2615 if (devinfo->ver < 12) {
2616 brw_inst_set_no_dd_clear(p->devinfo, last, inst->no_dd_clear);
2617 brw_inst_set_no_dd_check(p->devinfo, last, inst->no_dd_check);
2618 }
2619 }
2620 }
2621
2622 brw_set_uip_jip(p, start_offset);
2623
2624 /* end of program sentinel */
2625 disasm_new_inst_group(disasm_info, p->next_insn_offset);
2626
2627 #ifndef NDEBUG
2628 bool validated =
2629 #else
2630 if (unlikely(debug_flag))
2631 #endif
2632 brw_validate_instructions(devinfo, p->store,
2633 start_offset,
2634 p->next_insn_offset,
2635 disasm_info);
2636
2637 int before_size = p->next_insn_offset - start_offset;
2638 brw_compact_instructions(p, start_offset, disasm_info);
2639 int after_size = p->next_insn_offset - start_offset;
2640
2641 if (unlikely(debug_flag)) {
2642 unsigned char sha1[21];
2643 char sha1buf[41];
2644
2645 _mesa_sha1_compute(p->store + start_offset / sizeof(brw_inst),
2646 after_size, sha1);
2647 _mesa_sha1_format(sha1buf, sha1);
2648
2649 fprintf(stderr, "Native code for %s (sha1 %s)\n"
2650 "SIMD%d shader: %d instructions. %d loops. %u cycles. "
2651 "%d:%d spills:fills, %u sends, "
2652 "scheduled with mode %s. "
2653 "Promoted %u constants. "
2654 "Compacted %d to %d bytes (%.0f%%)\n",
2655 shader_name, sha1buf,
2656 dispatch_width, before_size / 16,
2657 loop_count, perf.latency,
2658 spill_count, fill_count, send_count,
2659 shader_stats.scheduler_mode,
2660 shader_stats.promoted_constants,
2661 before_size, after_size,
2662 100.0f * (before_size - after_size) / before_size);
2663
2664 /* overriding the shader makes disasm_info invalid */
2665 if (!brw_try_override_assembly(p, start_offset, sha1buf)) {
2666 dump_assembly(p->store, start_offset, p->next_insn_offset,
2667 disasm_info, perf.block_latency);
2668 } else {
2669 fprintf(stderr, "Successfully overrode shader with sha1 %s\n\n", sha1buf);
2670 }
2671 }
2672 ralloc_free(disasm_info);
2673 #ifndef NDEBUG
2674 if (!validated && !debug_flag) {
2675 fprintf(stderr,
2676 "Validation failed. Rerun with INTEL_DEBUG=shaders to get more information.\n");
2677 }
2678 #endif
2679 assert(validated);
2680
2681 brw_shader_debug_log(compiler, log_data,
2682 "%s SIMD%d shader: %d inst, %d loops, %u cycles, "
2683 "%d:%d spills:fills, %u sends, "
2684 "scheduled with mode %s, "
2685 "Promoted %u constants, "
2686 "compacted %d to %d bytes.\n",
2687 _mesa_shader_stage_to_abbrev(stage),
2688 dispatch_width, before_size / 16 - nop_count,
2689 loop_count, perf.latency,
2690 spill_count, fill_count, send_count,
2691 shader_stats.scheduler_mode,
2692 shader_stats.promoted_constants,
2693 before_size, after_size);
2694 if (stats) {
2695 stats->dispatch_width = dispatch_width;
2696 stats->instructions = before_size / 16 - nop_count;
2697 stats->sends = send_count;
2698 stats->loops = loop_count;
2699 stats->cycles = perf.latency;
2700 stats->spills = spill_count;
2701 stats->fills = fill_count;
2702 }
2703
2704 return start_offset;
2705 }
2706
2707 void
add_const_data(void * data,unsigned size)2708 fs_generator::add_const_data(void *data, unsigned size)
2709 {
2710 assert(prog_data->const_data_size == 0);
2711 if (size > 0) {
2712 prog_data->const_data_size = size;
2713 prog_data->const_data_offset = brw_append_data(p, data, size, 32);
2714 }
2715 }
2716
2717 void
add_resume_sbt(unsigned num_resume_shaders,uint64_t * sbt)2718 fs_generator::add_resume_sbt(unsigned num_resume_shaders, uint64_t *sbt)
2719 {
2720 assert(brw_shader_stage_is_bindless(stage));
2721 struct brw_bs_prog_data *bs_prog_data = brw_bs_prog_data(prog_data);
2722 if (num_resume_shaders > 0) {
2723 bs_prog_data->resume_sbt_offset =
2724 brw_append_data(p, sbt, num_resume_shaders * sizeof(uint64_t), 32);
2725 for (unsigned i = 0; i < num_resume_shaders; i++) {
2726 size_t offset = bs_prog_data->resume_sbt_offset + i * sizeof(*sbt);
2727 assert(offset <= UINT32_MAX);
2728 brw_add_reloc(p, BRW_SHADER_RELOC_SHADER_START_OFFSET,
2729 BRW_SHADER_RELOC_TYPE_U32,
2730 (uint32_t)offset, (uint32_t)sbt[i]);
2731 }
2732 }
2733 }
2734
2735 const unsigned *
get_assembly()2736 fs_generator::get_assembly()
2737 {
2738 prog_data->relocs = brw_get_shader_relocs(p, &prog_data->num_relocs);
2739
2740 return brw_get_program(p, &prog_data->program_size);
2741 }
2742