1 /*
2  * Copyright © 2011 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "util/register_allocate.h"
25 #include "brw_vec4.h"
26 #include "brw_cfg.h"
27 
28 using namespace brw;
29 
30 namespace brw {
31 
32 static void
assign(unsigned int * reg_hw_locations,backend_reg * reg)33 assign(unsigned int *reg_hw_locations, backend_reg *reg)
34 {
35    if (reg->file == VGRF) {
36       reg->nr = reg_hw_locations[reg->nr] + reg->offset / REG_SIZE;
37       reg->offset %= REG_SIZE;
38    }
39 }
40 
41 bool
reg_allocate_trivial()42 vec4_visitor::reg_allocate_trivial()
43 {
44    unsigned int hw_reg_mapping[this->alloc.count];
45    bool virtual_grf_used[this->alloc.count];
46    int next;
47 
48    /* Calculate which virtual GRFs are actually in use after whatever
49     * optimization passes have occurred.
50     */
51    for (unsigned i = 0; i < this->alloc.count; i++) {
52       virtual_grf_used[i] = false;
53    }
54 
55    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
56       if (inst->dst.file == VGRF)
57          virtual_grf_used[inst->dst.nr] = true;
58 
59       for (unsigned i = 0; i < 3; i++) {
60 	 if (inst->src[i].file == VGRF)
61             virtual_grf_used[inst->src[i].nr] = true;
62       }
63    }
64 
65    hw_reg_mapping[0] = this->first_non_payload_grf;
66    next = hw_reg_mapping[0] + this->alloc.sizes[0];
67    for (unsigned i = 1; i < this->alloc.count; i++) {
68       if (virtual_grf_used[i]) {
69 	 hw_reg_mapping[i] = next;
70 	 next += this->alloc.sizes[i];
71       }
72    }
73    prog_data->total_grf = next;
74 
75    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
76       assign(hw_reg_mapping, &inst->dst);
77       assign(hw_reg_mapping, &inst->src[0]);
78       assign(hw_reg_mapping, &inst->src[1]);
79       assign(hw_reg_mapping, &inst->src[2]);
80    }
81 
82    if (prog_data->total_grf > max_grf) {
83       fail("Ran out of regs on trivial allocator (%d/%d)\n",
84 	   prog_data->total_grf, max_grf);
85       return false;
86    }
87 
88    return true;
89 }
90 
91 extern "C" void
brw_vec4_alloc_reg_set(struct brw_compiler * compiler)92 brw_vec4_alloc_reg_set(struct brw_compiler *compiler)
93 {
94    int base_reg_count =
95       compiler->devinfo->ver >= 7 ? GFX7_MRF_HACK_START : BRW_MAX_GRF;
96 
97    /* After running split_virtual_grfs(), almost all VGRFs will be of size 1.
98     * SEND-from-GRF sources cannot be split, so we also need classes for each
99     * potential message length.
100     */
101    const int class_count = MAX_VGRF_SIZE;
102    int class_sizes[MAX_VGRF_SIZE];
103 
104    for (int i = 0; i < class_count; i++)
105       class_sizes[i] = i + 1;
106 
107 
108    ralloc_free(compiler->vec4_reg_set.regs);
109    compiler->vec4_reg_set.regs = ra_alloc_reg_set(compiler, base_reg_count, false);
110    if (compiler->devinfo->ver >= 6)
111       ra_set_allocate_round_robin(compiler->vec4_reg_set.regs);
112    ralloc_free(compiler->vec4_reg_set.classes);
113    compiler->vec4_reg_set.classes = ralloc_array(compiler, struct ra_class *, class_count);
114 
115    /* Now, add the registers to their classes, and add the conflicts
116     * between them and the base GRF registers (and also each other).
117     */
118    for (int i = 0; i < class_count; i++) {
119       int class_reg_count = base_reg_count - (class_sizes[i] - 1);
120       compiler->vec4_reg_set.classes[i] =
121          ra_alloc_contig_reg_class(compiler->vec4_reg_set.regs, class_sizes[i]);
122 
123       for (int j = 0; j < class_reg_count; j++)
124          ra_class_add_reg(compiler->vec4_reg_set.classes[i], j);
125    }
126 
127    ra_set_finalize(compiler->vec4_reg_set.regs, NULL);
128 }
129 
130 void
setup_payload_interference(struct ra_graph * g,int first_payload_node,int reg_node_count)131 vec4_visitor::setup_payload_interference(struct ra_graph *g,
132                                          int first_payload_node,
133                                          int reg_node_count)
134 {
135    int payload_node_count = this->first_non_payload_grf;
136 
137    for (int i = 0; i < payload_node_count; i++) {
138       /* Mark each payload reg node as being allocated to its physical register.
139        *
140        * The alternative would be to have per-physical register classes, which
141        * would just be silly.
142        */
143       ra_set_node_reg(g, first_payload_node + i, i);
144 
145       /* For now, just mark each payload node as interfering with every other
146        * node to be allocated.
147        */
148       for (int j = 0; j < reg_node_count; j++) {
149          ra_add_node_interference(g, first_payload_node + i, j);
150       }
151    }
152 }
153 
154 bool
reg_allocate()155 vec4_visitor::reg_allocate()
156 {
157    unsigned int hw_reg_mapping[alloc.count];
158    int payload_reg_count = this->first_non_payload_grf;
159 
160    /* Using the trivial allocator can be useful in debugging undefined
161     * register access as a result of broken optimization passes.
162     */
163    if (0)
164       return reg_allocate_trivial();
165 
166    const vec4_live_variables &live = live_analysis.require();
167    int node_count = alloc.count;
168    int first_payload_node = node_count;
169    node_count += payload_reg_count;
170    struct ra_graph *g =
171       ra_alloc_interference_graph(compiler->vec4_reg_set.regs, node_count);
172 
173    for (unsigned i = 0; i < alloc.count; i++) {
174       int size = this->alloc.sizes[i];
175       assert(size >= 1 && size <= MAX_VGRF_SIZE);
176       ra_set_node_class(g, i, compiler->vec4_reg_set.classes[size - 1]);
177 
178       for (unsigned j = 0; j < i; j++) {
179 	 if (live.vgrfs_interfere(i, j)) {
180 	    ra_add_node_interference(g, i, j);
181 	 }
182       }
183    }
184 
185    /* Certain instructions can't safely use the same register for their
186     * sources and destination.  Add interference.
187     */
188    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
189       if (inst->dst.file == VGRF && inst->has_source_and_destination_hazard()) {
190          for (unsigned i = 0; i < 3; i++) {
191             if (inst->src[i].file == VGRF) {
192                ra_add_node_interference(g, inst->dst.nr, inst->src[i].nr);
193             }
194          }
195       }
196    }
197 
198    setup_payload_interference(g, first_payload_node, node_count);
199 
200    if (!ra_allocate(g)) {
201       /* Failed to allocate registers.  Spill a reg, and the caller will
202        * loop back into here to try again.
203        */
204       int reg = choose_spill_reg(g);
205       if (this->no_spills) {
206          fail("Failure to register allocate.  Reduce number of live "
207               "values to avoid this.");
208       } else if (reg == -1) {
209          fail("no register to spill\n");
210       } else {
211          spill_reg(reg);
212       }
213       ralloc_free(g);
214       return false;
215    }
216 
217    /* Get the chosen virtual registers for each node, and map virtual
218     * regs in the register classes back down to real hardware reg
219     * numbers.
220     */
221    prog_data->total_grf = payload_reg_count;
222    for (unsigned i = 0; i < alloc.count; i++) {
223       hw_reg_mapping[i] = ra_get_node_reg(g, i);
224       prog_data->total_grf = MAX2(prog_data->total_grf,
225 				  hw_reg_mapping[i] + alloc.sizes[i]);
226    }
227 
228    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
229       assign(hw_reg_mapping, &inst->dst);
230       assign(hw_reg_mapping, &inst->src[0]);
231       assign(hw_reg_mapping, &inst->src[1]);
232       assign(hw_reg_mapping, &inst->src[2]);
233    }
234 
235    ralloc_free(g);
236 
237    return true;
238 }
239 
240 /**
241  * When we decide to spill a register, instead of blindly spilling every use,
242  * save unspills when the spill register is used (read) in consecutive
243  * instructions. This can potentially save a bunch of unspills that would
244  * have very little impact in register allocation anyway.
245  *
246  * Notice that we need to account for this behavior when spilling a register
247  * and when evaluating spilling costs. This function is designed so it can
248  * be called from both places and avoid repeating the logic.
249  *
250  *  - When we call this function from spill_reg(), we pass in scratch_reg the
251  *    actual unspill/spill register that we want to reuse in the current
252  *    instruction.
253  *
254  *  - When we call this from evaluate_spill_costs(), we pass the register for
255  *    which we are evaluating spilling costs.
256  *
257  * In either case, we check if the previous instructions read scratch_reg until
258  * we find one that writes to it with a compatible mask or does not read/write
259  * scratch_reg at all.
260  */
261 static bool
can_use_scratch_for_source(const vec4_instruction * inst,unsigned i,unsigned scratch_reg)262 can_use_scratch_for_source(const vec4_instruction *inst, unsigned i,
263                            unsigned scratch_reg)
264 {
265    assert(inst->src[i].file == VGRF);
266    bool prev_inst_read_scratch_reg = false;
267 
268    /* See if any previous source in the same instructions reads scratch_reg */
269    for (unsigned n = 0; n < i; n++) {
270       if (inst->src[n].file == VGRF && inst->src[n].nr == scratch_reg)
271          prev_inst_read_scratch_reg = true;
272    }
273 
274    /* Now check if previous instructions read/write scratch_reg */
275    for (vec4_instruction *prev_inst = (vec4_instruction *) inst->prev;
276         !prev_inst->is_head_sentinel();
277         prev_inst = (vec4_instruction *) prev_inst->prev) {
278 
279       /* If the previous instruction writes to scratch_reg then we can reuse
280        * it if the write is not conditional and the channels we write are
281        * compatible with our read mask
282        */
283       if (prev_inst->dst.file == VGRF && prev_inst->dst.nr == scratch_reg) {
284          return (!prev_inst->predicate || prev_inst->opcode == BRW_OPCODE_SEL) &&
285                 (brw_mask_for_swizzle(inst->src[i].swizzle) &
286                  ~prev_inst->dst.writemask) == 0;
287       }
288 
289       /* Skip scratch read/writes so that instructions generated by spilling
290        * other registers (that won't read/write scratch_reg) do not stop us from
291        * reusing scratch_reg for this instruction.
292        */
293       if (prev_inst->opcode == SHADER_OPCODE_GFX4_SCRATCH_WRITE ||
294           prev_inst->opcode == SHADER_OPCODE_GFX4_SCRATCH_READ)
295          continue;
296 
297       /* If the previous instruction does not write to scratch_reg, then check
298        * if it reads it
299        */
300       int n;
301       for (n = 0; n < 3; n++) {
302          if (prev_inst->src[n].file == VGRF &&
303              prev_inst->src[n].nr == scratch_reg) {
304             prev_inst_read_scratch_reg = true;
305             break;
306          }
307       }
308       if (n == 3) {
309          /* The previous instruction does not read scratch_reg. At this point,
310           * if no previous instruction has read scratch_reg it means that we
311           * will need to unspill it here and we can't reuse it (so we return
312           * false). Otherwise, if we found at least one consecutive instruction
313           * that read scratch_reg, then we know that we got here from
314           * evaluate_spill_costs (since for the spill_reg path any block of
315           * consecutive instructions using scratch_reg must start with a write
316           * to that register, so we would've exited the loop in the check for
317           * the write that we have at the start of this loop), and in that case
318           * it means that we found the point at which the scratch_reg would be
319           * unspilled. Since we always unspill a full vec4, it means that we
320           * have all the channels available and we can just return true to
321           * signal that we can reuse the register in the current instruction
322           * too.
323           */
324          return prev_inst_read_scratch_reg;
325       }
326    }
327 
328    return prev_inst_read_scratch_reg;
329 }
330 
331 static inline float
spill_cost_for_type(enum brw_reg_type type)332 spill_cost_for_type(enum brw_reg_type type)
333 {
334    /* Spilling of a 64-bit register involves emitting 2 32-bit scratch
335     * messages plus the 64b/32b shuffling code.
336     */
337    return type_sz(type) == 8 ? 2.25f : 1.0f;
338 }
339 
340 void
evaluate_spill_costs(float * spill_costs,bool * no_spill)341 vec4_visitor::evaluate_spill_costs(float *spill_costs, bool *no_spill)
342 {
343    float loop_scale = 1.0;
344 
345    unsigned *reg_type_size = (unsigned *)
346       ralloc_size(NULL, this->alloc.count * sizeof(unsigned));
347 
348    for (unsigned i = 0; i < this->alloc.count; i++) {
349       spill_costs[i] = 0.0;
350       no_spill[i] = alloc.sizes[i] != 1 && alloc.sizes[i] != 2;
351       reg_type_size[i] = 0;
352    }
353 
354    /* Calculate costs for spilling nodes.  Call it a cost of 1 per
355     * spill/unspill we'll have to do, and guess that the insides of
356     * loops run 10 times.
357     */
358    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
359       for (unsigned int i = 0; i < 3; i++) {
360          if (inst->src[i].file == VGRF && !no_spill[inst->src[i].nr]) {
361             /* We will only unspill src[i] it it wasn't unspilled for the
362              * previous instruction, in which case we'll just reuse the scratch
363              * reg for this instruction.
364              */
365             if (!can_use_scratch_for_source(inst, i, inst->src[i].nr)) {
366                spill_costs[inst->src[i].nr] +=
367                   loop_scale * spill_cost_for_type(inst->src[i].type);
368                if (inst->src[i].reladdr ||
369                    inst->src[i].offset >= REG_SIZE)
370                   no_spill[inst->src[i].nr] = true;
371 
372                /* We don't support unspills of partial DF reads.
373                 *
374                 * Our 64-bit unspills are implemented with two 32-bit scratch
375                 * messages, each one reading that for both SIMD4x2 threads that
376                 * we need to shuffle into correct 64-bit data. Ensure that we
377                 * are reading data for both threads.
378                 */
379                if (type_sz(inst->src[i].type) == 8 && inst->exec_size != 8)
380                   no_spill[inst->src[i].nr] = true;
381             }
382 
383             /* We can't spill registers that mix 32-bit and 64-bit access (that
384              * contain 64-bit data that is operated on via 32-bit instructions)
385              */
386             unsigned type_size = type_sz(inst->src[i].type);
387             if (reg_type_size[inst->src[i].nr] == 0)
388                reg_type_size[inst->src[i].nr] = type_size;
389             else if (reg_type_size[inst->src[i].nr] != type_size)
390                no_spill[inst->src[i].nr] = true;
391          }
392       }
393 
394       if (inst->dst.file == VGRF && !no_spill[inst->dst.nr]) {
395          spill_costs[inst->dst.nr] +=
396             loop_scale * spill_cost_for_type(inst->dst.type);
397          if (inst->dst.reladdr || inst->dst.offset >= REG_SIZE)
398             no_spill[inst->dst.nr] = true;
399 
400          /* We don't support spills of partial DF writes.
401           *
402           * Our 64-bit spills are implemented with two 32-bit scratch messages,
403           * each one writing that for both SIMD4x2 threads. Ensure that we
404           * are writing data for both threads.
405           */
406          if (type_sz(inst->dst.type) == 8 && inst->exec_size != 8)
407             no_spill[inst->dst.nr] = true;
408 
409          /* We can't spill registers that mix 32-bit and 64-bit access (that
410           * contain 64-bit data that is operated on via 32-bit instructions)
411           */
412          unsigned type_size = type_sz(inst->dst.type);
413          if (reg_type_size[inst->dst.nr] == 0)
414             reg_type_size[inst->dst.nr] = type_size;
415          else if (reg_type_size[inst->dst.nr] != type_size)
416             no_spill[inst->dst.nr] = true;
417       }
418 
419       switch (inst->opcode) {
420 
421       case BRW_OPCODE_DO:
422          loop_scale *= 10;
423          break;
424 
425       case BRW_OPCODE_WHILE:
426          loop_scale /= 10;
427          break;
428 
429       case SHADER_OPCODE_GFX4_SCRATCH_READ:
430       case SHADER_OPCODE_GFX4_SCRATCH_WRITE:
431       case VEC4_OPCODE_MOV_FOR_SCRATCH:
432          for (int i = 0; i < 3; i++) {
433             if (inst->src[i].file == VGRF)
434                no_spill[inst->src[i].nr] = true;
435          }
436          if (inst->dst.file == VGRF)
437             no_spill[inst->dst.nr] = true;
438          break;
439 
440       default:
441          break;
442       }
443    }
444 
445    ralloc_free(reg_type_size);
446 }
447 
448 int
choose_spill_reg(struct ra_graph * g)449 vec4_visitor::choose_spill_reg(struct ra_graph *g)
450 {
451    float spill_costs[this->alloc.count];
452    bool no_spill[this->alloc.count];
453 
454    evaluate_spill_costs(spill_costs, no_spill);
455 
456    for (unsigned i = 0; i < this->alloc.count; i++) {
457       if (!no_spill[i])
458          ra_set_node_spill_cost(g, i, spill_costs[i]);
459    }
460 
461    return ra_get_best_spill_node(g);
462 }
463 
464 void
spill_reg(unsigned spill_reg_nr)465 vec4_visitor::spill_reg(unsigned spill_reg_nr)
466 {
467    assert(alloc.sizes[spill_reg_nr] == 1 || alloc.sizes[spill_reg_nr] == 2);
468    unsigned spill_offset = last_scratch;
469    last_scratch += alloc.sizes[spill_reg_nr];
470 
471    /* Generate spill/unspill instructions for the objects being spilled. */
472    unsigned scratch_reg = ~0u;
473    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
474       for (unsigned i = 0; i < 3; i++) {
475          if (inst->src[i].file == VGRF && inst->src[i].nr == spill_reg_nr) {
476             if (scratch_reg == ~0u ||
477                 !can_use_scratch_for_source(inst, i, scratch_reg)) {
478                /* We need to unspill anyway so make sure we read the full vec4
479                 * in any case. This way, the cached register can be reused
480                 * for consecutive instructions that read different channels of
481                 * the same vec4.
482                 */
483                scratch_reg = alloc.allocate(alloc.sizes[spill_reg_nr]);
484                src_reg temp = inst->src[i];
485                temp.nr = scratch_reg;
486                temp.offset = 0;
487                temp.swizzle = BRW_SWIZZLE_XYZW;
488                emit_scratch_read(block, inst,
489                                  dst_reg(temp), inst->src[i], spill_offset);
490                temp.offset = inst->src[i].offset;
491             }
492             assert(scratch_reg != ~0u);
493             inst->src[i].nr = scratch_reg;
494          }
495       }
496 
497       if (inst->dst.file == VGRF && inst->dst.nr == spill_reg_nr) {
498          emit_scratch_write(block, inst, spill_offset);
499          scratch_reg = inst->dst.nr;
500       }
501    }
502 
503    invalidate_analysis(DEPENDENCY_INSTRUCTIONS | DEPENDENCY_VARIABLES);
504 }
505 
506 } /* namespace brw */
507