1 /*
2  * Copyright © 2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 /** @file register_allocate.c
29  *
30  * Graph-coloring register allocator.
31  *
32  * The basic idea of graph coloring is to make a node in a graph for
33  * every thing that needs a register (color) number assigned, and make
34  * edges in the graph between nodes that interfere (can't be allocated
35  * to the same register at the same time).
36  *
37  * During the "simplify" process, any any node with fewer edges than
38  * there are registers means that that edge can get assigned a
39  * register regardless of what its neighbors choose, so that node is
40  * pushed on a stack and removed (with its edges) from the graph.
41  * That likely causes other nodes to become trivially colorable as well.
42  *
43  * Then during the "select" process, nodes are popped off of that
44  * stack, their edges restored, and assigned a color different from
45  * their neighbors.  Because they were pushed on the stack only when
46  * they were trivially colorable, any color chosen won't interfere
47  * with the registers to be popped later.
48  *
49  * The downside to most graph coloring is that real hardware often has
50  * limitations, like registers that need to be allocated to a node in
51  * pairs, or aligned on some boundary.  This implementation follows
52  * the paper "Retargetable Graph-Coloring Register Allocation for
53  * Irregular Architectures" by Johan Runeson and Sven-Olof Nyström.
54  *
55  * In this system, there are register classes each containing various
56  * registers, and registers may interfere with other registers.  For
57  * example, one might have a class of base registers, and a class of
58  * aligned register pairs that would each interfere with their pair of
59  * the base registers.  Each node has a register class it needs to be
60  * assigned to.  Define p(B) to be the size of register class B, and
61  * q(B,C) to be the number of registers in B that the worst choice
62  * register in C could conflict with.  Then, this system replaces the
63  * basic graph coloring test of "fewer edges from this node than there
64  * are registers" with "For this node of class B, the sum of q(B,C)
65  * for each neighbor node of class C is less than pB".
66  *
67  * A nice feature of the pq test is that q(B,C) can be computed once
68  * up front and stored in a 2-dimensional array, so that the cost of
69  * coloring a node is constant with the number of registers.  We do
70  * this during ra_set_finalize().
71  */
72 
73 #include <stdbool.h>
74 #include <stdlib.h>
75 
76 #include "blob.h"
77 #include "ralloc.h"
78 #include "main/macros.h"
79 #include "util/bitset.h"
80 #include "util/u_dynarray.h"
81 #include "u_math.h"
82 #include "register_allocate.h"
83 
84 struct ra_reg {
85    BITSET_WORD *conflicts;
86    struct util_dynarray conflict_list;
87 };
88 
89 struct ra_regs {
90    struct ra_reg *regs;
91    unsigned int count;
92 
93    struct ra_class **classes;
94    unsigned int class_count;
95 
96    bool round_robin;
97 };
98 
99 struct ra_class {
100    /**
101     * Bitset indicating which registers belong to this class.
102     *
103     * (If bit N is set, then register N belongs to this class.)
104     */
105    BITSET_WORD *regs;
106 
107    /**
108     * p(B) in Runeson/Nyström paper.
109     *
110     * This is "how many regs are in the set."
111     */
112    unsigned int p;
113 
114    /**
115     * q(B,C) (indexed by C, B is this register class) in
116     * Runeson/Nyström paper.  This is "how many registers of B could
117     * the worst choice register from C conflict with".
118     */
119    unsigned int *q;
120 };
121 
122 struct ra_node {
123    /** @{
124     *
125     * List of which nodes this node interferes with.  This should be
126     * symmetric with the other node.
127     */
128    BITSET_WORD *adjacency;
129 
130    struct util_dynarray adjacency_list;
131    /** @} */
132 
133    unsigned int class;
134 
135    /* Client-assigned register, if assigned, or NO_REG. */
136    unsigned int forced_reg;
137 
138    /* Register, if assigned, or NO_REG. */
139    unsigned int reg;
140 
141    /**
142     * The q total, as defined in the Runeson/Nyström paper, for all the
143     * interfering nodes not in the stack.
144     */
145    unsigned int q_total;
146 
147    /* For an implementation that needs register spilling, this is the
148     * approximate cost of spilling this node.
149     */
150    float spill_cost;
151 
152    /* Temporary data for the algorithm to scratch around in */
153    struct {
154       /**
155        * Temporary version of q_total which we decrement as things are placed
156        * into the stack.
157        */
158       unsigned int q_total;
159    } tmp;
160 };
161 
162 struct ra_graph {
163    struct ra_regs *regs;
164    /**
165     * the variables that need register allocation.
166     */
167    struct ra_node *nodes;
168    unsigned int count; /**< count of nodes. */
169 
170    unsigned int alloc; /**< count of nodes allocated. */
171 
172    ra_select_reg_callback select_reg_callback;
173    void *select_reg_callback_data;
174 
175    /* Temporary data for the algorithm to scratch around in */
176    struct {
177       unsigned int *stack;
178       unsigned int stack_count;
179 
180       /** Bit-set indicating, for each register, if it's in the stack */
181       BITSET_WORD *in_stack;
182 
183       /** Bit-set indicating, for each register, if it pre-assigned */
184       BITSET_WORD *reg_assigned;
185 
186       /** Bit-set indicating, for each register, the value of the pq test */
187       BITSET_WORD *pq_test;
188 
189       /** For each BITSET_WORD, the minimum q value or ~0 if unknown */
190       unsigned int *min_q_total;
191 
192       /*
193        * * For each BITSET_WORD, the node with the minimum q_total if
194        * min_q_total[i] != ~0.
195        */
196       unsigned int *min_q_node;
197 
198       /**
199        * Tracks the start of the set of optimistically-colored registers in the
200        * stack.
201        */
202       unsigned int stack_optimistic_start;
203    } tmp;
204 };
205 
206 /**
207  * Creates a set of registers for the allocator.
208  *
209  * mem_ctx is a ralloc context for the allocator.  The reg set may be freed
210  * using ralloc_free().
211  */
212 struct ra_regs *
ra_alloc_reg_set(void * mem_ctx,unsigned int count,bool need_conflict_lists)213 ra_alloc_reg_set(void *mem_ctx, unsigned int count, bool need_conflict_lists)
214 {
215    unsigned int i;
216    struct ra_regs *regs;
217 
218    regs = rzalloc(mem_ctx, struct ra_regs);
219    regs->count = count;
220    regs->regs = rzalloc_array(regs, struct ra_reg, count);
221 
222    for (i = 0; i < count; i++) {
223       regs->regs[i].conflicts = rzalloc_array(regs->regs, BITSET_WORD,
224                                               BITSET_WORDS(count));
225       BITSET_SET(regs->regs[i].conflicts, i);
226 
227       util_dynarray_init(&regs->regs[i].conflict_list,
228                          need_conflict_lists ? regs->regs : NULL);
229       if (need_conflict_lists)
230          util_dynarray_append(&regs->regs[i].conflict_list, unsigned int, i);
231    }
232 
233    return regs;
234 }
235 
236 /**
237  * The register allocator by default prefers to allocate low register numbers,
238  * since it was written for hardware (gen4/5 Intel) that is limited in its
239  * multithreadedness by the number of registers used in a given shader.
240  *
241  * However, for hardware without that restriction, densely packed register
242  * allocation can put serious constraints on instruction scheduling.  This
243  * function tells the allocator to rotate around the registers if possible as
244  * it allocates the nodes.
245  */
246 void
ra_set_allocate_round_robin(struct ra_regs * regs)247 ra_set_allocate_round_robin(struct ra_regs *regs)
248 {
249    regs->round_robin = true;
250 }
251 
252 static void
ra_add_conflict_list(struct ra_regs * regs,unsigned int r1,unsigned int r2)253 ra_add_conflict_list(struct ra_regs *regs, unsigned int r1, unsigned int r2)
254 {
255    struct ra_reg *reg1 = &regs->regs[r1];
256 
257    if (reg1->conflict_list.mem_ctx) {
258       util_dynarray_append(&reg1->conflict_list, unsigned int, r2);
259    }
260    BITSET_SET(reg1->conflicts, r2);
261 }
262 
263 void
ra_add_reg_conflict(struct ra_regs * regs,unsigned int r1,unsigned int r2)264 ra_add_reg_conflict(struct ra_regs *regs, unsigned int r1, unsigned int r2)
265 {
266    if (!BITSET_TEST(regs->regs[r1].conflicts, r2)) {
267       ra_add_conflict_list(regs, r1, r2);
268       ra_add_conflict_list(regs, r2, r1);
269    }
270 }
271 
272 /**
273  * Adds a conflict between base_reg and reg, and also between reg and
274  * anything that base_reg conflicts with.
275  *
276  * This can simplify code for setting up multiple register classes
277  * which are aggregates of some base hardware registers, compared to
278  * explicitly using ra_add_reg_conflict.
279  */
280 void
ra_add_transitive_reg_conflict(struct ra_regs * regs,unsigned int base_reg,unsigned int reg)281 ra_add_transitive_reg_conflict(struct ra_regs *regs,
282                                unsigned int base_reg, unsigned int reg)
283 {
284    ra_add_reg_conflict(regs, reg, base_reg);
285 
286    util_dynarray_foreach(&regs->regs[base_reg].conflict_list, unsigned int,
287                          r2p) {
288       ra_add_reg_conflict(regs, reg, *r2p);
289    }
290 }
291 
292 /**
293  * Set up conflicts between base_reg and it's two half registers reg0 and
294  * reg1, but take care to not add conflicts between reg0 and reg1.
295  *
296  * This is useful for architectures where full size registers are aliased by
297  * two half size registers (eg 32 bit float and 16 bit float registers).
298  */
299 void
ra_add_transitive_reg_pair_conflict(struct ra_regs * regs,unsigned int base_reg,unsigned int reg0,unsigned int reg1)300 ra_add_transitive_reg_pair_conflict(struct ra_regs *regs,
301                                     unsigned int base_reg, unsigned int reg0, unsigned int reg1)
302 {
303    ra_add_reg_conflict(regs, reg0, base_reg);
304    ra_add_reg_conflict(regs, reg1, base_reg);
305 
306    util_dynarray_foreach(&regs->regs[base_reg].conflict_list, unsigned int, i) {
307       unsigned int conflict = *i;
308       if (conflict != reg1)
309          ra_add_reg_conflict(regs, reg0, conflict);
310       if (conflict != reg0)
311          ra_add_reg_conflict(regs, reg1, conflict);
312    }
313 }
314 
315 /**
316  * Makes every conflict on the given register transitive.  In other words,
317  * every register that conflicts with r will now conflict with every other
318  * register conflicting with r.
319  *
320  * This can simplify code for setting up multiple register classes
321  * which are aggregates of some base hardware registers, compared to
322  * explicitly using ra_add_reg_conflict.
323  */
324 void
ra_make_reg_conflicts_transitive(struct ra_regs * regs,unsigned int r)325 ra_make_reg_conflicts_transitive(struct ra_regs *regs, unsigned int r)
326 {
327    struct ra_reg *reg = &regs->regs[r];
328    int c;
329 
330    BITSET_FOREACH_SET(c, reg->conflicts, regs->count) {
331       struct ra_reg *other = &regs->regs[c];
332       unsigned i;
333       for (i = 0; i < BITSET_WORDS(regs->count); i++)
334          other->conflicts[i] |= reg->conflicts[i];
335    }
336 }
337 
338 unsigned int
ra_alloc_reg_class(struct ra_regs * regs)339 ra_alloc_reg_class(struct ra_regs *regs)
340 {
341    struct ra_class *class;
342 
343    regs->classes = reralloc(regs->regs, regs->classes, struct ra_class *,
344                             regs->class_count + 1);
345 
346    class = rzalloc(regs, struct ra_class);
347    regs->classes[regs->class_count] = class;
348 
349    class->regs = rzalloc_array(class, BITSET_WORD, BITSET_WORDS(regs->count));
350 
351    return regs->class_count++;
352 }
353 
354 void
ra_class_add_reg(struct ra_regs * regs,unsigned int c,unsigned int r)355 ra_class_add_reg(struct ra_regs *regs, unsigned int c, unsigned int r)
356 {
357    struct ra_class *class = regs->classes[c];
358 
359    assert(r < regs->count);
360 
361    BITSET_SET(class->regs, r);
362    class->p++;
363 }
364 
365 /**
366  * Returns true if the register belongs to the given class.
367  */
368 static bool
reg_belongs_to_class(unsigned int r,struct ra_class * c)369 reg_belongs_to_class(unsigned int r, struct ra_class *c)
370 {
371    return BITSET_TEST(c->regs, r);
372 }
373 
374 /**
375  * Must be called after all conflicts and register classes have been
376  * set up and before the register set is used for allocation.
377  * To avoid costly q value computation, use the q_values paramater
378  * to pass precomputed q values to this function.
379  */
380 void
ra_set_finalize(struct ra_regs * regs,unsigned int ** q_values)381 ra_set_finalize(struct ra_regs *regs, unsigned int **q_values)
382 {
383    unsigned int b, c;
384 
385    for (b = 0; b < regs->class_count; b++) {
386       regs->classes[b]->q = ralloc_array(regs, unsigned int, regs->class_count);
387    }
388 
389    if (q_values) {
390       for (b = 0; b < regs->class_count; b++) {
391          for (c = 0; c < regs->class_count; c++) {
392             regs->classes[b]->q[c] = q_values[b][c];
393          }
394       }
395    } else {
396       /* Compute, for each class B and C, how many regs of B an
397        * allocation to C could conflict with.
398        */
399       for (b = 0; b < regs->class_count; b++) {
400          for (c = 0; c < regs->class_count; c++) {
401             unsigned int rc;
402             int max_conflicts = 0;
403 
404             BITSET_FOREACH_SET(rc, regs->classes[c]->regs, regs->count) {
405                int conflicts = 0;
406 
407                util_dynarray_foreach(&regs->regs[rc].conflict_list,
408                                      unsigned int, rbp) {
409                   unsigned int rb = *rbp;
410                   if (reg_belongs_to_class(rb, regs->classes[b]))
411                      conflicts++;
412                }
413                max_conflicts = MAX2(max_conflicts, conflicts);
414             }
415             regs->classes[b]->q[c] = max_conflicts;
416          }
417       }
418    }
419 
420    for (b = 0; b < regs->count; b++) {
421       util_dynarray_fini(&regs->regs[b].conflict_list);
422    }
423 }
424 
425 void
ra_set_serialize(const struct ra_regs * regs,struct blob * blob)426 ra_set_serialize(const struct ra_regs *regs, struct blob *blob)
427 {
428    blob_write_uint32(blob, regs->count);
429    blob_write_uint32(blob, regs->class_count);
430 
431    for (unsigned int r = 0; r < regs->count; r++) {
432       struct ra_reg *reg = &regs->regs[r];
433       blob_write_bytes(blob, reg->conflicts, BITSET_WORDS(regs->count) *
434                                              sizeof(BITSET_WORD));
435       assert(util_dynarray_num_elements(&reg->conflict_list, unsigned int) == 0);
436    }
437 
438    for (unsigned int c = 0; c < regs->class_count; c++) {
439       struct ra_class *class = regs->classes[c];
440       blob_write_bytes(blob, class->regs, BITSET_WORDS(regs->count) *
441                                           sizeof(BITSET_WORD));
442       blob_write_uint32(blob, class->p);
443       blob_write_bytes(blob, class->q, regs->class_count * sizeof(*class->q));
444    }
445 
446    blob_write_uint32(blob, regs->round_robin);
447 }
448 
449 struct ra_regs *
ra_set_deserialize(void * mem_ctx,struct blob_reader * blob)450 ra_set_deserialize(void *mem_ctx, struct blob_reader *blob)
451 {
452    unsigned int reg_count = blob_read_uint32(blob);
453    unsigned int class_count = blob_read_uint32(blob);
454 
455    struct ra_regs *regs = ra_alloc_reg_set(mem_ctx, reg_count, false);
456    assert(regs->count == reg_count);
457 
458    for (unsigned int r = 0; r < reg_count; r++) {
459       struct ra_reg *reg = &regs->regs[r];
460       blob_copy_bytes(blob, reg->conflicts, BITSET_WORDS(reg_count) *
461                                             sizeof(BITSET_WORD));
462    }
463 
464    assert(regs->classes == NULL);
465    regs->classes = ralloc_array(regs->regs, struct ra_class *, class_count);
466    regs->class_count = class_count;
467 
468    for (unsigned int c = 0; c < class_count; c++) {
469       struct ra_class *class = rzalloc(regs, struct ra_class);
470       regs->classes[c] = class;
471 
472       class->regs = ralloc_array(class, BITSET_WORD, BITSET_WORDS(reg_count));
473       blob_copy_bytes(blob, class->regs, BITSET_WORDS(reg_count) *
474                                          sizeof(BITSET_WORD));
475 
476       class->p = blob_read_uint32(blob);
477 
478       class->q = ralloc_array(regs->classes[c], unsigned int, class_count);
479       blob_copy_bytes(blob, class->q, class_count * sizeof(*class->q));
480    }
481 
482    regs->round_robin = blob_read_uint32(blob);
483 
484    return regs;
485 }
486 
487 static void
ra_add_node_adjacency(struct ra_graph * g,unsigned int n1,unsigned int n2)488 ra_add_node_adjacency(struct ra_graph *g, unsigned int n1, unsigned int n2)
489 {
490    BITSET_SET(g->nodes[n1].adjacency, n2);
491 
492    assert(n1 != n2);
493 
494    int n1_class = g->nodes[n1].class;
495    int n2_class = g->nodes[n2].class;
496    g->nodes[n1].q_total += g->regs->classes[n1_class]->q[n2_class];
497 
498    util_dynarray_append(&g->nodes[n1].adjacency_list, unsigned int, n2);
499 }
500 
501 static void
ra_node_remove_adjacency(struct ra_graph * g,unsigned int n1,unsigned int n2)502 ra_node_remove_adjacency(struct ra_graph *g, unsigned int n1, unsigned int n2)
503 {
504    BITSET_CLEAR(g->nodes[n1].adjacency, n2);
505 
506    assert(n1 != n2);
507 
508    int n1_class = g->nodes[n1].class;
509    int n2_class = g->nodes[n2].class;
510    g->nodes[n1].q_total -= g->regs->classes[n1_class]->q[n2_class];
511 
512    util_dynarray_delete_unordered(&g->nodes[n1].adjacency_list, unsigned int,
513                                   n2);
514 }
515 
516 static void
ra_realloc_interference_graph(struct ra_graph * g,unsigned int alloc)517 ra_realloc_interference_graph(struct ra_graph *g, unsigned int alloc)
518 {
519    if (alloc <= g->alloc)
520       return;
521 
522    /* If we always have a whole number of BITSET_WORDs, it makes it much
523     * easier to memset the top of the growing bitsets.
524     */
525    assert(g->alloc % BITSET_WORDBITS == 0);
526    alloc = align64(alloc, BITSET_WORDBITS);
527 
528    g->nodes = reralloc(g, g->nodes, struct ra_node, alloc);
529 
530    unsigned g_bitset_count = BITSET_WORDS(g->alloc);
531    unsigned bitset_count = BITSET_WORDS(alloc);
532    /* For nodes already in the graph, we just have to grow the adjacency set */
533    for (unsigned i = 0; i < g->alloc; i++) {
534       assert(g->nodes[i].adjacency != NULL);
535       g->nodes[i].adjacency = rerzalloc(g, g->nodes[i].adjacency, BITSET_WORD,
536                                         g_bitset_count, bitset_count);
537    }
538 
539    /* For new nodes, we have to fully initialize them */
540    for (unsigned i = g->alloc; i < alloc; i++) {
541       memset(&g->nodes[i], 0, sizeof(g->nodes[i]));
542       g->nodes[i].adjacency = rzalloc_array(g, BITSET_WORD, bitset_count);
543       util_dynarray_init(&g->nodes[i].adjacency_list, g);
544       g->nodes[i].q_total = 0;
545 
546       g->nodes[i].forced_reg = NO_REG;
547       g->nodes[i].reg = NO_REG;
548    }
549 
550    /* These are scratch values and don't need to be zeroed.  We'll clear them
551     * as part of ra_select() setup.
552     */
553    g->tmp.stack = reralloc(g, g->tmp.stack, unsigned int, alloc);
554    g->tmp.in_stack = reralloc(g, g->tmp.in_stack, BITSET_WORD, bitset_count);
555 
556    g->tmp.reg_assigned = reralloc(g, g->tmp.reg_assigned, BITSET_WORD,
557                                   bitset_count);
558    g->tmp.pq_test = reralloc(g, g->tmp.pq_test, BITSET_WORD, bitset_count);
559    g->tmp.min_q_total = reralloc(g, g->tmp.min_q_total, unsigned int,
560                                  bitset_count);
561    g->tmp.min_q_node = reralloc(g, g->tmp.min_q_node, unsigned int,
562                                 bitset_count);
563 
564    g->alloc = alloc;
565 }
566 
567 struct ra_graph *
ra_alloc_interference_graph(struct ra_regs * regs,unsigned int count)568 ra_alloc_interference_graph(struct ra_regs *regs, unsigned int count)
569 {
570    struct ra_graph *g;
571 
572    g = rzalloc(NULL, struct ra_graph);
573    g->regs = regs;
574    g->count = count;
575    ra_realloc_interference_graph(g, count);
576 
577    return g;
578 }
579 
580 void
ra_resize_interference_graph(struct ra_graph * g,unsigned int count)581 ra_resize_interference_graph(struct ra_graph *g, unsigned int count)
582 {
583    g->count = count;
584    if (count > g->alloc)
585       ra_realloc_interference_graph(g, g->alloc * 2);
586 }
587 
ra_set_select_reg_callback(struct ra_graph * g,ra_select_reg_callback callback,void * data)588 void ra_set_select_reg_callback(struct ra_graph *g,
589                                 ra_select_reg_callback callback,
590                                 void *data)
591 {
592    g->select_reg_callback = callback;
593    g->select_reg_callback_data = data;
594 }
595 
596 void
ra_set_node_class(struct ra_graph * g,unsigned int n,unsigned int class)597 ra_set_node_class(struct ra_graph *g,
598                   unsigned int n, unsigned int class)
599 {
600    g->nodes[n].class = class;
601 }
602 
603 unsigned int
ra_get_node_class(struct ra_graph * g,unsigned int n)604 ra_get_node_class(struct ra_graph *g,
605                   unsigned int n)
606 {
607    return g->nodes[n].class;
608 }
609 
610 unsigned int
ra_add_node(struct ra_graph * g,unsigned int class)611 ra_add_node(struct ra_graph *g, unsigned int class)
612 {
613    unsigned int n = g->count;
614    ra_resize_interference_graph(g, g->count + 1);
615 
616    ra_set_node_class(g, n, class);
617 
618    return n;
619 }
620 
621 void
ra_add_node_interference(struct ra_graph * g,unsigned int n1,unsigned int n2)622 ra_add_node_interference(struct ra_graph *g,
623                          unsigned int n1, unsigned int n2)
624 {
625    assert(n1 < g->count && n2 < g->count);
626    if (n1 != n2 && !BITSET_TEST(g->nodes[n1].adjacency, n2)) {
627       ra_add_node_adjacency(g, n1, n2);
628       ra_add_node_adjacency(g, n2, n1);
629    }
630 }
631 
632 void
ra_reset_node_interference(struct ra_graph * g,unsigned int n)633 ra_reset_node_interference(struct ra_graph *g, unsigned int n)
634 {
635    util_dynarray_foreach(&g->nodes[n].adjacency_list, unsigned int, n2p) {
636       ra_node_remove_adjacency(g, *n2p, n);
637    }
638 
639    memset(g->nodes[n].adjacency, 0,
640           BITSET_WORDS(g->count) * sizeof(BITSET_WORD));
641    util_dynarray_clear(&g->nodes[n].adjacency_list);
642 }
643 
644 static void
update_pq_info(struct ra_graph * g,unsigned int n)645 update_pq_info(struct ra_graph *g, unsigned int n)
646 {
647    int i = n / BITSET_WORDBITS;
648    int n_class = g->nodes[n].class;
649    if (g->nodes[n].tmp.q_total < g->regs->classes[n_class]->p) {
650       BITSET_SET(g->tmp.pq_test, n);
651    } else if (g->tmp.min_q_total[i] != UINT_MAX) {
652       /* Only update min_q_total and min_q_node if min_q_total != UINT_MAX so
653        * that we don't update while we have stale data and accidentally mark
654        * it as non-stale.  Also, in order to remain consistent with the old
655        * naive implementation of the algorithm, we do a lexicographical sort
656        * to ensure that we always choose the node with the highest node index.
657        */
658       if (g->nodes[n].tmp.q_total < g->tmp.min_q_total[i] ||
659           (g->nodes[n].tmp.q_total == g->tmp.min_q_total[i] &&
660            n > g->tmp.min_q_node[i])) {
661          g->tmp.min_q_total[i] = g->nodes[n].tmp.q_total;
662          g->tmp.min_q_node[i] = n;
663       }
664    }
665 }
666 
667 static void
add_node_to_stack(struct ra_graph * g,unsigned int n)668 add_node_to_stack(struct ra_graph *g, unsigned int n)
669 {
670    int n_class = g->nodes[n].class;
671 
672    assert(!BITSET_TEST(g->tmp.in_stack, n));
673 
674    util_dynarray_foreach(&g->nodes[n].adjacency_list, unsigned int, n2p) {
675       unsigned int n2 = *n2p;
676       unsigned int n2_class = g->nodes[n2].class;
677 
678       if (!BITSET_TEST(g->tmp.in_stack, n2) &&
679           !BITSET_TEST(g->tmp.reg_assigned, n2)) {
680          assert(g->nodes[n2].tmp.q_total >= g->regs->classes[n2_class]->q[n_class]);
681          g->nodes[n2].tmp.q_total -= g->regs->classes[n2_class]->q[n_class];
682          update_pq_info(g, n2);
683       }
684    }
685 
686    g->tmp.stack[g->tmp.stack_count] = n;
687    g->tmp.stack_count++;
688    BITSET_SET(g->tmp.in_stack, n);
689 
690    /* Flag the min_q_total for n's block as dirty so it gets recalculated */
691    g->tmp.min_q_total[n / BITSET_WORDBITS] = UINT_MAX;
692 }
693 
694 /**
695  * Simplifies the interference graph by pushing all
696  * trivially-colorable nodes into a stack of nodes to be colored,
697  * removing them from the graph, and rinsing and repeating.
698  *
699  * If we encounter a case where we can't push any nodes on the stack, then
700  * we optimistically choose a node and push it on the stack. We heuristically
701  * push the node with the lowest total q value, since it has the fewest
702  * neighbors and therefore is most likely to be allocated.
703  */
704 static void
ra_simplify(struct ra_graph * g)705 ra_simplify(struct ra_graph *g)
706 {
707    bool progress = true;
708    unsigned int stack_optimistic_start = UINT_MAX;
709 
710    /* Figure out the high bit and bit mask for the first iteration of a loop
711     * over BITSET_WORDs.
712     */
713    const unsigned int top_word_high_bit = (g->count - 1) % BITSET_WORDBITS;
714 
715    /* Do a quick pre-pass to set things up */
716    g->tmp.stack_count = 0;
717    for (int i = BITSET_WORDS(g->count) - 1, high_bit = top_word_high_bit;
718         i >= 0; i--, high_bit = BITSET_WORDBITS - 1) {
719       g->tmp.in_stack[i] = 0;
720       g->tmp.reg_assigned[i] = 0;
721       g->tmp.pq_test[i] = 0;
722       g->tmp.min_q_total[i] = UINT_MAX;
723       g->tmp.min_q_node[i] = UINT_MAX;
724       for (int j = high_bit; j >= 0; j--) {
725          unsigned int n = i * BITSET_WORDBITS + j;
726          g->nodes[n].reg = g->nodes[n].forced_reg;
727          g->nodes[n].tmp.q_total = g->nodes[n].q_total;
728          if (g->nodes[n].reg != NO_REG)
729             g->tmp.reg_assigned[i] |= BITSET_BIT(j);
730          update_pq_info(g, n);
731       }
732    }
733 
734    while (progress) {
735       unsigned int min_q_total = UINT_MAX;
736       unsigned int min_q_node = UINT_MAX;
737 
738       progress = false;
739 
740       for (int i = BITSET_WORDS(g->count) - 1, high_bit = top_word_high_bit;
741            i >= 0; i--, high_bit = BITSET_WORDBITS - 1) {
742          BITSET_WORD mask = ~(BITSET_WORD)0 >> (31 - high_bit);
743 
744          BITSET_WORD skip = g->tmp.in_stack[i] | g->tmp.reg_assigned[i];
745          if (skip == mask)
746             continue;
747 
748          BITSET_WORD pq = g->tmp.pq_test[i] & ~skip;
749          if (pq) {
750             /* In this case, we have stuff we can immediately take off the
751              * stack.  This also means that we're guaranteed to make progress
752              * and we don't need to bother updating lowest_q_total because we
753              * know we're going to loop again before attempting to do anything
754              * optimistic.
755              */
756             for (int j = high_bit; j >= 0; j--) {
757                if (pq & BITSET_BIT(j)) {
758                   unsigned int n = i * BITSET_WORDBITS + j;
759                   assert(n < g->count);
760                   add_node_to_stack(g, n);
761                   /* add_node_to_stack() may update pq_test for this word so
762                    * we need to update our local copy.
763                    */
764                   pq = g->tmp.pq_test[i] & ~skip;
765                   progress = true;
766                }
767             }
768          } else if (!progress) {
769             if (g->tmp.min_q_total[i] == UINT_MAX) {
770                /* The min_q_total and min_q_node are dirty because we added
771                 * one of these nodes to the stack.  It needs to be
772                 * recalculated.
773                 */
774                for (int j = high_bit; j >= 0; j--) {
775                   if (skip & BITSET_BIT(j))
776                      continue;
777 
778                   unsigned int n = i * BITSET_WORDBITS + j;
779                   assert(n < g->count);
780                   if (g->nodes[n].tmp.q_total < g->tmp.min_q_total[i]) {
781                      g->tmp.min_q_total[i] = g->nodes[n].tmp.q_total;
782                      g->tmp.min_q_node[i] = n;
783                   }
784                }
785             }
786             if (g->tmp.min_q_total[i] < min_q_total) {
787                min_q_node = g->tmp.min_q_node[i];
788                min_q_total = g->tmp.min_q_total[i];
789             }
790          }
791       }
792 
793       if (!progress && min_q_total != UINT_MAX) {
794          if (stack_optimistic_start == UINT_MAX)
795             stack_optimistic_start = g->tmp.stack_count;
796 
797          add_node_to_stack(g, min_q_node);
798          progress = true;
799       }
800    }
801 
802    g->tmp.stack_optimistic_start = stack_optimistic_start;
803 }
804 
805 static bool
ra_any_neighbors_conflict(struct ra_graph * g,unsigned int n,unsigned int r)806 ra_any_neighbors_conflict(struct ra_graph *g, unsigned int n, unsigned int r)
807 {
808    util_dynarray_foreach(&g->nodes[n].adjacency_list, unsigned int, n2p) {
809       unsigned int n2 = *n2p;
810 
811       if (!BITSET_TEST(g->tmp.in_stack, n2) &&
812           BITSET_TEST(g->regs->regs[r].conflicts, g->nodes[n2].reg)) {
813          return true;
814       }
815    }
816 
817    return false;
818 }
819 
820 /* Computes a bitfield of what regs are available for a given register
821  * selection.
822  *
823  * This lets drivers implement a more complicated policy than our simple first
824  * or round robin policies (which don't require knowing the whole bitset)
825  */
826 static bool
ra_compute_available_regs(struct ra_graph * g,unsigned int n,BITSET_WORD * regs)827 ra_compute_available_regs(struct ra_graph *g, unsigned int n, BITSET_WORD *regs)
828 {
829    struct ra_class *c = g->regs->classes[g->nodes[n].class];
830 
831    /* Populate with the set of regs that are in the node's class. */
832    memcpy(regs, c->regs, BITSET_WORDS(g->regs->count) * sizeof(BITSET_WORD));
833 
834    /* Remove any regs that conflict with nodes that we're adjacent to and have
835     * already colored.
836     */
837    util_dynarray_foreach(&g->nodes[n].adjacency_list, unsigned int, n2p) {
838       unsigned int n2 = *n2p;
839       unsigned int r = g->nodes[n2].reg;
840 
841       if (!BITSET_TEST(g->tmp.in_stack, n2)) {
842          for (int j = 0; j < BITSET_WORDS(g->regs->count); j++)
843             regs[j] &= ~g->regs->regs[r].conflicts[j];
844       }
845    }
846 
847    for (int i = 0; i < BITSET_WORDS(g->regs->count); i++) {
848       if (regs[i])
849          return true;
850    }
851 
852    return false;
853 }
854 
855 /**
856  * Pops nodes from the stack back into the graph, coloring them with
857  * registers as they go.
858  *
859  * If all nodes were trivially colorable, then this must succeed.  If
860  * not (optimistic coloring), then it may return false;
861  */
862 static bool
ra_select(struct ra_graph * g)863 ra_select(struct ra_graph *g)
864 {
865    int start_search_reg = 0;
866    BITSET_WORD *select_regs = NULL;
867 
868    if (g->select_reg_callback)
869       select_regs = malloc(BITSET_WORDS(g->regs->count) * sizeof(BITSET_WORD));
870 
871    while (g->tmp.stack_count != 0) {
872       unsigned int ri;
873       unsigned int r = -1;
874       int n = g->tmp.stack[g->tmp.stack_count - 1];
875       struct ra_class *c = g->regs->classes[g->nodes[n].class];
876 
877       /* set this to false even if we return here so that
878        * ra_get_best_spill_node() considers this node later.
879        */
880       BITSET_CLEAR(g->tmp.in_stack, n);
881 
882       if (g->select_reg_callback) {
883          if (!ra_compute_available_regs(g, n, select_regs)) {
884             free(select_regs);
885             return false;
886          }
887 
888          r = g->select_reg_callback(n, select_regs, g->select_reg_callback_data);
889          assert(r < g->regs->count);
890       } else {
891          /* Find the lowest-numbered reg which is not used by a member
892           * of the graph adjacent to us.
893           */
894          for (ri = 0; ri < g->regs->count; ri++) {
895             r = (start_search_reg + ri) % g->regs->count;
896             if (!reg_belongs_to_class(r, c))
897                continue;
898 
899             if (!ra_any_neighbors_conflict(g, n, r))
900                break;
901          }
902 
903          if (ri >= g->regs->count)
904             return false;
905       }
906 
907       g->nodes[n].reg = r;
908       g->tmp.stack_count--;
909 
910       /* Rotate the starting point except for any nodes above the lowest
911        * optimistically colorable node.  The likelihood that we will succeed
912        * at allocating optimistically colorable nodes is highly dependent on
913        * the way that the previous nodes popped off the stack are laid out.
914        * The round-robin strategy increases the fragmentation of the register
915        * file and decreases the number of nearby nodes assigned to the same
916        * color, what increases the likelihood of spilling with respect to the
917        * dense packing strategy.
918        */
919       if (g->regs->round_robin &&
920           g->tmp.stack_count - 1 <= g->tmp.stack_optimistic_start)
921          start_search_reg = r + 1;
922    }
923 
924    free(select_regs);
925 
926    return true;
927 }
928 
929 bool
ra_allocate(struct ra_graph * g)930 ra_allocate(struct ra_graph *g)
931 {
932    ra_simplify(g);
933    return ra_select(g);
934 }
935 
936 unsigned int
ra_get_node_reg(struct ra_graph * g,unsigned int n)937 ra_get_node_reg(struct ra_graph *g, unsigned int n)
938 {
939    if (g->nodes[n].forced_reg != NO_REG)
940       return g->nodes[n].forced_reg;
941    else
942       return g->nodes[n].reg;
943 }
944 
945 /**
946  * Forces a node to a specific register.  This can be used to avoid
947  * creating a register class containing one node when handling data
948  * that must live in a fixed location and is known to not conflict
949  * with other forced register assignment (as is common with shader
950  * input data).  These nodes do not end up in the stack during
951  * ra_simplify(), and thus at ra_select() time it is as if they were
952  * the first popped off the stack and assigned their fixed locations.
953  * Nodes that use this function do not need to be assigned a register
954  * class.
955  *
956  * Must be called before ra_simplify().
957  */
958 void
ra_set_node_reg(struct ra_graph * g,unsigned int n,unsigned int reg)959 ra_set_node_reg(struct ra_graph *g, unsigned int n, unsigned int reg)
960 {
961    g->nodes[n].forced_reg = reg;
962 }
963 
964 static float
ra_get_spill_benefit(struct ra_graph * g,unsigned int n)965 ra_get_spill_benefit(struct ra_graph *g, unsigned int n)
966 {
967    float benefit = 0;
968    int n_class = g->nodes[n].class;
969 
970    /* Define the benefit of eliminating an interference between n, n2
971     * through spilling as q(C, B) / p(C).  This is similar to the
972     * "count number of edges" approach of traditional graph coloring,
973     * but takes classes into account.
974     */
975    util_dynarray_foreach(&g->nodes[n].adjacency_list, unsigned int, n2p) {
976       unsigned int n2 = *n2p;
977       unsigned int n2_class = g->nodes[n2].class;
978       benefit += ((float)g->regs->classes[n_class]->q[n2_class] /
979                   g->regs->classes[n_class]->p);
980    }
981 
982    return benefit;
983 }
984 
985 /**
986  * Returns a node number to be spilled according to the cost/benefit using
987  * the pq test, or -1 if there are no spillable nodes.
988  */
989 int
ra_get_best_spill_node(struct ra_graph * g)990 ra_get_best_spill_node(struct ra_graph *g)
991 {
992    unsigned int best_node = -1;
993    float best_benefit = 0.0;
994    unsigned int n;
995 
996    /* Consider any nodes that we colored successfully or the node we failed to
997     * color for spilling. When we failed to color a node in ra_select(), we
998     * only considered these nodes, so spilling any other ones would not result
999     * in us making progress.
1000     */
1001    for (n = 0; n < g->count; n++) {
1002       float cost = g->nodes[n].spill_cost;
1003       float benefit;
1004 
1005       if (cost <= 0.0f)
1006          continue;
1007 
1008       if (BITSET_TEST(g->tmp.in_stack, n))
1009          continue;
1010 
1011       benefit = ra_get_spill_benefit(g, n);
1012 
1013       if (benefit / cost > best_benefit) {
1014          best_benefit = benefit / cost;
1015          best_node = n;
1016       }
1017    }
1018 
1019    return best_node;
1020 }
1021 
1022 /**
1023  * Only nodes with a spill cost set (cost != 0.0) will be considered
1024  * for register spilling.
1025  */
1026 void
ra_set_node_spill_cost(struct ra_graph * g,unsigned int n,float cost)1027 ra_set_node_spill_cost(struct ra_graph *g, unsigned int n, float cost)
1028 {
1029    g->nodes[n].spill_cost = cost;
1030 }
1031