1 /*
2  * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include "compiler.h"
25 #include "midgard_ops.h"
26 #include "midgard_quirks.h"
27 
28 static midgard_int_mod
mir_get_imod(bool shift,nir_alu_type T,bool half,bool scalar)29 mir_get_imod(bool shift, nir_alu_type T, bool half, bool scalar)
30 {
31         if (!half) {
32                 assert(!shift);
33                 /* Sign-extension, really... */
34                 return scalar ? 0 : midgard_int_normal;
35         }
36 
37         if (shift)
38                 return midgard_int_shift;
39 
40         if (nir_alu_type_get_base_type(T) == nir_type_int)
41                 return midgard_int_sign_extend;
42         else
43                 return midgard_int_zero_extend;
44 }
45 
46 unsigned
mir_pack_mod(midgard_instruction * ins,unsigned i,bool scalar)47 mir_pack_mod(midgard_instruction *ins, unsigned i, bool scalar)
48 {
49         bool integer = midgard_is_integer_op(ins->op);
50         unsigned base_size = max_bitsize_for_alu(ins);
51         unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
52         bool half = (sz == (base_size >> 1));
53 
54         return integer ?
55                 mir_get_imod(ins->src_shift[i], ins->src_types[i], half, scalar) :
56                 ((ins->src_abs[i] << 0) |
57                  ((ins->src_neg[i] << 1)));
58 }
59 
60 /* Midgard IR only knows vector ALU types, but we sometimes need to actually
61  * use scalar ALU instructions, for functional or performance reasons. To do
62  * this, we just demote vector ALU payloads to scalar. */
63 
64 static int
component_from_mask(unsigned mask)65 component_from_mask(unsigned mask)
66 {
67         for (int c = 0; c < 8; ++c) {
68                 if (mask & (1 << c))
69                         return c;
70         }
71 
72         assert(0);
73         return 0;
74 }
75 
76 static unsigned
mir_pack_scalar_source(unsigned mod,bool is_full,unsigned component)77 mir_pack_scalar_source(unsigned mod, bool is_full, unsigned component)
78 {
79         midgard_scalar_alu_src s = {
80                 .mod = mod,
81                 .full = is_full,
82                 .component = component << (is_full ? 1 : 0)
83         };
84 
85         unsigned o;
86         memcpy(&o, &s, sizeof(s));
87 
88         return o & ((1 << 6) - 1);
89 }
90 
91 static midgard_scalar_alu
vector_to_scalar_alu(midgard_vector_alu v,midgard_instruction * ins)92 vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
93 {
94         bool is_full = nir_alu_type_get_type_size(ins->dest_type) == 32;
95 
96         bool half_0 = nir_alu_type_get_type_size(ins->src_types[0]) == 16;
97         bool half_1 = nir_alu_type_get_type_size(ins->src_types[1]) == 16;
98         unsigned comp = component_from_mask(ins->mask);
99 
100         unsigned packed_src[2] = {
101                 mir_pack_scalar_source(mir_pack_mod(ins, 0, true), !half_0, ins->swizzle[0][comp]),
102                 mir_pack_scalar_source(mir_pack_mod(ins, 1, true), !half_1, ins->swizzle[1][comp])
103         };
104 
105         /* The output component is from the mask */
106         midgard_scalar_alu s = {
107                 .op = v.op,
108                 .src1 = packed_src[0],
109                 .src2 = packed_src[1],
110                 .unknown = 0,
111                 .outmod = v.outmod,
112                 .output_full = is_full,
113                 .output_component = comp
114         };
115 
116         /* Full components are physically spaced out */
117         if (is_full) {
118                 assert(s.output_component < 4);
119                 s.output_component <<= 1;
120         }
121 
122         /* Inline constant is passed along rather than trying to extract it
123          * from v */
124 
125         if (ins->has_inline_constant) {
126                 uint16_t imm = 0;
127                 int lower_11 = ins->inline_constant & ((1 << 12) - 1);
128                 imm |= (lower_11 >> 9) & 3;
129                 imm |= (lower_11 >> 6) & 4;
130                 imm |= (lower_11 >> 2) & 0x38;
131                 imm |= (lower_11 & 63) << 6;
132 
133                 s.src2 = imm;
134         }
135 
136         return s;
137 }
138 
139 /* 64-bit swizzles are super easy since there are 2 components of 2 components
140  * in an 8-bit field ... lots of duplication to go around!
141  *
142  * Swizzles of 32-bit vectors accessed from 64-bit instructions are a little
143  * funny -- pack them *as if* they were native 64-bit, using rep_* flags to
144  * flag upper. For instance, xy would become 64-bit XY but that's just xyzw
145  * native. Likewise, zz would become 64-bit XX with rep* so it would be xyxy
146  * with rep. Pretty nifty, huh? */
147 
148 static unsigned
mir_pack_swizzle_64(unsigned * swizzle,unsigned max_component)149 mir_pack_swizzle_64(unsigned *swizzle, unsigned max_component)
150 {
151         unsigned packed = 0;
152 
153         for (unsigned i = 0; i < 2; ++i) {
154                 assert(swizzle[i] <= max_component);
155 
156                 unsigned a = (swizzle[i] & 1) ?
157                         (COMPONENT_W << 2) | COMPONENT_Z :
158                         (COMPONENT_Y << 2) | COMPONENT_X;
159 
160                 packed |= a << (i * 4);
161         }
162 
163         return packed;
164 }
165 
166 static void
mir_pack_mask_alu(midgard_instruction * ins,midgard_vector_alu * alu)167 mir_pack_mask_alu(midgard_instruction *ins, midgard_vector_alu *alu)
168 {
169         unsigned effective = ins->mask;
170 
171         /* If we have a destination override, we need to figure out whether to
172          * override to the lower or upper half, shifting the effective mask in
173          * the latter, so AAAA.... becomes AAAA */
174 
175         unsigned inst_size = max_bitsize_for_alu(ins);
176         signed upper_shift = mir_upper_override(ins, inst_size);
177 
178         if (upper_shift >= 0) {
179                 effective >>= upper_shift;
180                 alu->dest_override = upper_shift ?
181                         midgard_dest_override_upper :
182                         midgard_dest_override_lower;
183         } else {
184                 alu->dest_override = midgard_dest_override_none;
185         }
186 
187         if (inst_size == 32)
188                 alu->mask = expand_writemask(effective, 2);
189         else if (inst_size == 64)
190                 alu->mask = expand_writemask(effective, 1);
191         else
192                 alu->mask = effective;
193 }
194 
195 static unsigned
mir_pack_swizzle(unsigned mask,unsigned * swizzle,nir_alu_type T,midgard_reg_mode reg_mode,bool op_channeled,bool * rep_low,bool * rep_high)196 mir_pack_swizzle(unsigned mask, unsigned *swizzle,
197                 nir_alu_type T, midgard_reg_mode reg_mode,
198                 bool op_channeled, bool *rep_low, bool *rep_high)
199 {
200         unsigned packed = 0;
201         unsigned sz = nir_alu_type_get_type_size(T);
202 
203         if (reg_mode == midgard_reg_mode_64) {
204                 assert(sz == 64 || sz == 32);
205                 unsigned components = (sz == 32) ? 4 : 2;
206 
207                 packed = mir_pack_swizzle_64(swizzle, components);
208 
209                 if (sz == 32) {
210                         bool lo = swizzle[0] >= COMPONENT_Z;
211                         bool hi = swizzle[1] >= COMPONENT_Z;
212 
213                         if (mask & 0x1) {
214                                 /* We can't mix halves... */
215                                 if (mask & 2)
216                                         assert(lo == hi);
217 
218                                 *rep_low = lo;
219                         } else {
220                                 *rep_low = hi;
221                         }
222                 } else if (sz < 32) {
223                         unreachable("Cannot encode 8/16 swizzle in 64-bit");
224                 }
225         } else {
226                 /* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
227                  * the strategy is to check whether the nibble we're on is
228                  * upper or lower. We need all components to be on the same
229                  * "side"; that much is enforced by the ISA and should have
230                  * been lowered. TODO: 8-bit packing. TODO: vec8 */
231 
232                 unsigned first = mask ? ffs(mask) - 1 : 0;
233                 bool upper = swizzle[first] > 3;
234 
235                 if (upper && mask)
236                         assert(sz <= 16);
237 
238                 bool dest_up = !op_channeled && (first >= 4);
239 
240                 for (unsigned c = (dest_up ? 4 : 0); c < (dest_up ? 8 : 4); ++c) {
241                         unsigned v = swizzle[c];
242 
243                         bool t_upper = v > 3;
244 
245                         /* Ensure we're doing something sane */
246 
247                         if (mask & (1 << c)) {
248                                 assert(t_upper == upper);
249                                 assert(v <= 7);
250                         }
251 
252                         /* Use the non upper part */
253                         v &= 0x3;
254 
255                         packed |= v << (2 * (c % 4));
256                 }
257 
258 
259                 /* Replicate for now.. should really pick a side for
260                  * dot products */
261 
262                 if (reg_mode == midgard_reg_mode_16 && sz == 16) {
263                         *rep_low = !upper;
264                         *rep_high = upper;
265                 } else if (reg_mode == midgard_reg_mode_16 && sz == 8) {
266                         *rep_low = upper;
267                         *rep_high = upper;
268                 } else if (reg_mode == midgard_reg_mode_32) {
269                         *rep_low = upper;
270                 } else {
271                         unreachable("Unhandled reg mode");
272                 }
273         }
274 
275         return packed;
276 }
277 
278 static void
mir_pack_vector_srcs(midgard_instruction * ins,midgard_vector_alu * alu)279 mir_pack_vector_srcs(midgard_instruction *ins, midgard_vector_alu *alu)
280 {
281         bool channeled = GET_CHANNEL_COUNT(alu_opcode_props[ins->op].props);
282 
283         unsigned base_size = max_bitsize_for_alu(ins);
284 
285         for (unsigned i = 0; i < 2; ++i) {
286                 if (ins->has_inline_constant && (i == 1))
287                         continue;
288 
289                 if (ins->src[i] == ~0)
290                         continue;
291 
292                 bool rep_lo = false, rep_hi = false;
293                 unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
294                 bool half = (sz == (base_size >> 1));
295 
296                 assert((sz == base_size) || half);
297 
298                 unsigned swizzle = mir_pack_swizzle(ins->mask, ins->swizzle[i],
299                                 ins->src_types[i], reg_mode_for_bitsize(base_size),
300                                 channeled, &rep_lo, &rep_hi);
301 
302                 midgard_vector_alu_src pack = {
303                         .mod = mir_pack_mod(ins, i, false),
304                         .rep_low = rep_lo,
305                         .rep_high = rep_hi,
306                         .half = half,
307                         .swizzle = swizzle
308                 };
309 
310                 unsigned p = vector_alu_srco_unsigned(pack);
311 
312                 if (i == 0)
313                         alu->src1 = p;
314                 else
315                         alu->src2 = p;
316         }
317 }
318 
319 static void
mir_pack_swizzle_ldst(midgard_instruction * ins)320 mir_pack_swizzle_ldst(midgard_instruction *ins)
321 {
322         /* TODO: non-32-bit, non-vec4 */
323         for (unsigned c = 0; c < 4; ++c) {
324                 unsigned v = ins->swizzle[0][c];
325 
326                 /* Check vec4 */
327                 assert(v <= 3);
328 
329                 ins->load_store.swizzle |= v << (2 * c);
330         }
331 
332         /* TODO: arg_1/2 */
333 }
334 
335 static void
mir_pack_swizzle_tex(midgard_instruction * ins)336 mir_pack_swizzle_tex(midgard_instruction *ins)
337 {
338         for (unsigned i = 0; i < 2; ++i) {
339                 unsigned packed = 0;
340 
341                 for (unsigned c = 0; c < 4; ++c) {
342                         unsigned v = ins->swizzle[i][c];
343 
344                         /* Check vec4 */
345                         assert(v <= 3);
346 
347                         packed |= v << (2 * c);
348                 }
349 
350                 if (i == 0)
351                         ins->texture.swizzle = packed;
352                 else
353                         ins->texture.in_reg_swizzle = packed;
354         }
355 
356         /* TODO: bias component */
357 }
358 
359 /* Up to 3 { ALU, LDST } bundles can execute in parallel with a texture op.
360  * Given a texture op, lookahead to see how many such bundles we can flag for
361  * OoO execution */
362 
363 static bool
mir_can_run_ooo(midgard_block * block,midgard_bundle * bundle,unsigned dependency)364 mir_can_run_ooo(midgard_block *block, midgard_bundle *bundle,
365                 unsigned dependency)
366 {
367         /* Don't read out of bounds */
368         if (bundle >= (midgard_bundle *) ((char *) block->bundles.data + block->bundles.size))
369                 return false;
370 
371         /* Texture ops can't execute with other texture ops */
372         if (!IS_ALU(bundle->tag) && bundle->tag != TAG_LOAD_STORE_4)
373                 return false;
374 
375         /* Ensure there is no read-after-write dependency */
376 
377         for (unsigned i = 0; i < bundle->instruction_count; ++i) {
378                 midgard_instruction *ins = bundle->instructions[i];
379 
380                 mir_foreach_src(ins, s) {
381                         if (ins->src[s] == dependency)
382                                 return false;
383                 }
384         }
385 
386         /* Otherwise, we're okay */
387         return true;
388 }
389 
390 static void
mir_pack_tex_ooo(midgard_block * block,midgard_bundle * bundle,midgard_instruction * ins)391 mir_pack_tex_ooo(midgard_block *block, midgard_bundle *bundle, midgard_instruction *ins)
392 {
393         unsigned count = 0;
394 
395         for (count = 0; count < 3; ++count) {
396                 if (!mir_can_run_ooo(block, bundle + count + 1, ins->dest))
397                         break;
398         }
399 
400         ins->texture.out_of_order = count;
401 }
402 
403 /* Load store masks are 4-bits. Load/store ops pack for that. vec4 is the
404  * natural mask width; vec8 is constrained to be in pairs, vec2 is duplicated. TODO: 8-bit?
405  */
406 
407 static void
mir_pack_ldst_mask(midgard_instruction * ins)408 mir_pack_ldst_mask(midgard_instruction *ins)
409 {
410         unsigned sz = nir_alu_type_get_type_size(ins->dest_type);
411         unsigned packed = ins->mask;
412 
413         if (sz == 64) {
414                 packed = ((ins->mask & 0x2) ? (0x8 | 0x4) : 0) |
415                          ((ins->mask & 0x1) ? (0x2 | 0x1) : 0);
416         } else if (sz == 16) {
417                 packed = 0;
418 
419                 for (unsigned i = 0; i < 4; ++i) {
420                         /* Make sure we're duplicated */
421                         bool u = (ins->mask & (1 << (2*i + 0))) != 0;
422                         bool v = (ins->mask & (1 << (2*i + 1))) != 0;
423                         assert(u == v);
424 
425                         packed |= (u << i);
426                 }
427         } else {
428                 assert(sz == 32);
429         }
430 
431         ins->load_store.mask = packed;
432 }
433 
434 static void
mir_lower_inverts(midgard_instruction * ins)435 mir_lower_inverts(midgard_instruction *ins)
436 {
437         bool inv[3] = {
438                 ins->src_invert[0],
439                 ins->src_invert[1],
440                 ins->src_invert[2]
441         };
442 
443         switch (ins->op) {
444         case midgard_alu_op_iand:
445                 /* a & ~b = iandnot(a, b) */
446                 /* ~a & ~b = ~(a | b) = inor(a, b) */
447 
448                 if (inv[0] && inv[1])
449                         ins->op = midgard_alu_op_inor;
450                 else if (inv[1])
451                         ins->op = midgard_alu_op_iandnot;
452 
453                 break;
454         case midgard_alu_op_ior:
455                 /*  a | ~b = iornot(a, b) */
456                 /* ~a | ~b = ~(a & b) = inand(a, b) */
457 
458                 if (inv[0] && inv[1])
459                         ins->op = midgard_alu_op_inand;
460                 else if (inv[1])
461                         ins->op = midgard_alu_op_iornot;
462 
463                 break;
464 
465         case midgard_alu_op_ixor:
466                 /* ~a ^ b = a ^ ~b = ~(a ^ b) = inxor(a, b) */
467                 /* ~a ^ ~b = a ^ b */
468 
469                 if (inv[0] ^ inv[1])
470                         ins->op = midgard_alu_op_inxor;
471 
472                 break;
473 
474         default:
475                 break;
476         }
477 }
478 
479 /* Opcodes with ROUNDS are the base (rte/0) type so we can just add */
480 
481 static void
mir_lower_roundmode(midgard_instruction * ins)482 mir_lower_roundmode(midgard_instruction *ins)
483 {
484         if (alu_opcode_props[ins->op].props & MIDGARD_ROUNDS) {
485                 assert(ins->roundmode <= 0x3);
486                 ins->op += ins->roundmode;
487         }
488 }
489 
490 static midgard_load_store_word
load_store_from_instr(midgard_instruction * ins)491 load_store_from_instr(midgard_instruction *ins)
492 {
493         midgard_load_store_word ldst = ins->load_store;
494         ldst.op = ins->op;
495 
496         if (OP_IS_STORE(ldst.op)) {
497                 ldst.reg = SSA_REG_FROM_FIXED(ins->src[0]) & 1;
498         } else {
499                 ldst.reg = SSA_REG_FROM_FIXED(ins->dest);
500         }
501 
502         if (ins->src[1] != ~0) {
503                 unsigned src = SSA_REG_FROM_FIXED(ins->src[1]);
504                 ldst.arg_1 |= midgard_ldst_reg(src, ins->swizzle[1][0]);
505         }
506 
507         if (ins->src[2] != ~0) {
508                 unsigned src = SSA_REG_FROM_FIXED(ins->src[2]);
509                 ldst.arg_2 |= midgard_ldst_reg(src, ins->swizzle[2][0]);
510         }
511 
512         return ldst;
513 }
514 
515 static midgard_texture_word
texture_word_from_instr(midgard_instruction * ins)516 texture_word_from_instr(midgard_instruction *ins)
517 {
518         midgard_texture_word tex = ins->texture;
519         tex.op = ins->op;
520 
521         unsigned src1 = ins->src[1] == ~0 ? REGISTER_UNUSED : SSA_REG_FROM_FIXED(ins->src[1]);
522         tex.in_reg_select = src1 & 1;
523 
524         unsigned dest = ins->dest == ~0 ? REGISTER_UNUSED : SSA_REG_FROM_FIXED(ins->dest);
525         tex.out_reg_select = dest & 1;
526 
527         if (ins->src[2] != ~0) {
528                 midgard_tex_register_select sel = {
529                         .select = SSA_REG_FROM_FIXED(ins->src[2]) & 1,
530                         .full = 1,
531                         .component = ins->swizzle[2][0]
532                 };
533                 uint8_t packed;
534                 memcpy(&packed, &sel, sizeof(packed));
535                 tex.bias = packed;
536         }
537 
538         if (ins->src[3] != ~0) {
539                 unsigned x = ins->swizzle[3][0];
540                 unsigned y = x + 1;
541                 unsigned z = x + 2;
542 
543                 /* Check range, TODO: half-registers */
544                 assert(z < 4);
545 
546                 unsigned offset_reg = SSA_REG_FROM_FIXED(ins->src[3]);
547                 tex.offset =
548                         (1)                   | /* full */
549                         (offset_reg & 1) << 1 | /* select */
550                         (0 << 2)              | /* upper */
551                         (x << 3)              | /* swizzle */
552                         (y << 5)              | /* swizzle */
553                         (z << 7);               /* swizzle */
554         }
555 
556         return tex;
557 }
558 
559 static midgard_vector_alu
vector_alu_from_instr(midgard_instruction * ins)560 vector_alu_from_instr(midgard_instruction *ins)
561 {
562         midgard_vector_alu alu = {
563                 .op = ins->op,
564                 .outmod = ins->outmod,
565                 .reg_mode = reg_mode_for_bitsize(max_bitsize_for_alu(ins))
566         };
567 
568         if (ins->has_inline_constant) {
569                 /* Encode inline 16-bit constant. See disassembler for
570                  * where the algorithm is from */
571 
572                 int lower_11 = ins->inline_constant & ((1 << 12) - 1);
573                 uint16_t imm = ((lower_11 >> 8) & 0x7) |
574                                ((lower_11 & 0xFF) << 3);
575 
576                 alu.src2 = imm << 2;
577         }
578 
579         return alu;
580 }
581 
582 static midgard_branch_extended
midgard_create_branch_extended(midgard_condition cond,midgard_jmp_writeout_op op,unsigned dest_tag,signed quadword_offset)583 midgard_create_branch_extended( midgard_condition cond,
584                                 midgard_jmp_writeout_op op,
585                                 unsigned dest_tag,
586                                 signed quadword_offset)
587 {
588         /* The condition code is actually a LUT describing a function to
589          * combine multiple condition codes. However, we only support a single
590          * condition code at the moment, so we just duplicate over a bunch of
591          * times. */
592 
593         uint16_t duplicated_cond =
594                 (cond << 14) |
595                 (cond << 12) |
596                 (cond << 10) |
597                 (cond << 8) |
598                 (cond << 6) |
599                 (cond << 4) |
600                 (cond << 2) |
601                 (cond << 0);
602 
603         midgard_branch_extended branch = {
604                 .op = op,
605                 .dest_tag = dest_tag,
606                 .offset = quadword_offset,
607                 .cond = duplicated_cond
608         };
609 
610         return branch;
611 }
612 
613 static void
emit_branch(midgard_instruction * ins,compiler_context * ctx,midgard_block * block,midgard_bundle * bundle,struct util_dynarray * emission)614 emit_branch(midgard_instruction *ins,
615             compiler_context *ctx,
616             midgard_block *block,
617             midgard_bundle *bundle,
618             struct util_dynarray *emission)
619 {
620         /* Parse some basic branch info */
621         bool is_compact = ins->unit == ALU_ENAB_BR_COMPACT;
622         bool is_conditional = ins->branch.conditional;
623         bool is_inverted = ins->branch.invert_conditional;
624         bool is_discard = ins->branch.target_type == TARGET_DISCARD;
625         bool is_tilebuf_wait = ins->branch.target_type == TARGET_TILEBUF_WAIT;
626         bool is_special = is_discard || is_tilebuf_wait;
627         bool is_writeout = ins->writeout;
628 
629         /* Determine the block we're jumping to */
630         int target_number = ins->branch.target_block;
631 
632         /* Report the destination tag */
633         int dest_tag = is_discard ? 0 :
634                 is_tilebuf_wait ? bundle->tag :
635                 midgard_get_first_tag_from_block(ctx, target_number);
636 
637         /* Count up the number of quadwords we're
638          * jumping over = number of quadwords until
639          * (br_block_idx, target_number) */
640 
641         int quadword_offset = 0;
642 
643         if (is_discard) {
644                 /* Fixed encoding, not actually an offset */
645                 quadword_offset = 0x2;
646         } else if (is_tilebuf_wait) {
647                 quadword_offset = -1;
648         } else if (target_number > block->base.name) {
649                 /* Jump forward */
650 
651                 for (int idx = block->base.name+1; idx < target_number; ++idx) {
652                         midgard_block *blk = mir_get_block(ctx, idx);
653                         assert(blk);
654 
655                         quadword_offset += blk->quadword_count;
656                 }
657         } else {
658                 /* Jump backwards */
659 
660                 for (int idx = block->base.name; idx >= target_number; --idx) {
661                         midgard_block *blk = mir_get_block(ctx, idx);
662                         assert(blk);
663 
664                         quadword_offset -= blk->quadword_count;
665                 }
666         }
667 
668         /* Unconditional extended branches (far jumps)
669          * have issues, so we always use a conditional
670          * branch, setting the condition to always for
671          * unconditional. For compact unconditional
672          * branches, cond isn't used so it doesn't
673          * matter what we pick. */
674 
675         midgard_condition cond =
676                 !is_conditional ? midgard_condition_always :
677                 is_inverted ? midgard_condition_false :
678                 midgard_condition_true;
679 
680         midgard_jmp_writeout_op op =
681                 is_discard ? midgard_jmp_writeout_op_discard :
682                 is_tilebuf_wait ? midgard_jmp_writeout_op_tilebuffer_pending :
683                 is_writeout ? midgard_jmp_writeout_op_writeout :
684                 (is_compact && !is_conditional) ?
685                 midgard_jmp_writeout_op_branch_uncond :
686                 midgard_jmp_writeout_op_branch_cond;
687 
688         if (is_compact) {
689                 unsigned size = sizeof(midgard_branch_cond);
690 
691                 if (is_conditional || is_special) {
692                         midgard_branch_cond branch = {
693                                 .op = op,
694                                 .dest_tag = dest_tag,
695                                 .offset = quadword_offset,
696                                 .cond = cond
697                         };
698                         memcpy(util_dynarray_grow_bytes(emission, size, 1), &branch, size);
699                 } else {
700                         assert(op == midgard_jmp_writeout_op_branch_uncond);
701                         midgard_branch_uncond branch = {
702                                 .op = op,
703                                 .dest_tag = dest_tag,
704                                 .offset = quadword_offset,
705                                 .unknown = 1
706                         };
707                         assert(branch.offset == quadword_offset);
708                         memcpy(util_dynarray_grow_bytes(emission, size, 1), &branch, size);
709                 }
710         } else { /* `ins->compact_branch`,  misnomer */
711                 unsigned size = sizeof(midgard_branch_extended);
712 
713                 midgard_branch_extended branch =
714                         midgard_create_branch_extended(
715                                         cond, op,
716                                         dest_tag,
717                                         quadword_offset);
718 
719                 memcpy(util_dynarray_grow_bytes(emission, size, 1), &branch, size);
720         }
721 }
722 
723 static void
emit_alu_bundle(compiler_context * ctx,midgard_block * block,midgard_bundle * bundle,struct util_dynarray * emission,unsigned lookahead)724 emit_alu_bundle(compiler_context *ctx,
725                 midgard_block *block,
726                 midgard_bundle *bundle,
727                 struct util_dynarray *emission,
728                 unsigned lookahead)
729 {
730         /* Emit the control word */
731         util_dynarray_append(emission, uint32_t, bundle->control | lookahead);
732 
733         /* Next up, emit register words */
734         for (unsigned i = 0; i < bundle->instruction_count; ++i) {
735                 midgard_instruction *ins = bundle->instructions[i];
736 
737                 /* Check if this instruction has registers */
738                 if (ins->compact_branch) continue;
739 
740                 unsigned src2_reg = REGISTER_UNUSED;
741                 if (ins->has_inline_constant)
742                         src2_reg = ins->inline_constant >> 11;
743                 else if (ins->src[1] != ~0)
744                         src2_reg = SSA_REG_FROM_FIXED(ins->src[1]);
745 
746                 /* Otherwise, just emit the registers */
747                 uint16_t reg_word = 0;
748                 midgard_reg_info registers = {
749                         .src1_reg = (ins->src[0] == ~0 ?
750                                         REGISTER_UNUSED :
751                                         SSA_REG_FROM_FIXED(ins->src[0])),
752                         .src2_reg = src2_reg,
753                         .src2_imm = ins->has_inline_constant,
754                         .out_reg = (ins->dest == ~0 ?
755                                         REGISTER_UNUSED :
756                                         SSA_REG_FROM_FIXED(ins->dest)),
757                 };
758                 memcpy(&reg_word, &registers, sizeof(uint16_t));
759                 util_dynarray_append(emission, uint16_t, reg_word);
760         }
761 
762         /* Now, we emit the body itself */
763         for (unsigned i = 0; i < bundle->instruction_count; ++i) {
764                 midgard_instruction *ins = bundle->instructions[i];
765 
766                 if (!ins->compact_branch) {
767                         mir_lower_inverts(ins);
768                         mir_lower_roundmode(ins);
769                 }
770 
771                 if (midgard_is_branch_unit(ins->unit)) {
772                         emit_branch(ins, ctx, block, bundle, emission);
773                 } else if (ins->unit & UNITS_ANY_VECTOR) {
774                         midgard_vector_alu source = vector_alu_from_instr(ins);
775                         mir_pack_mask_alu(ins, &source);
776                         mir_pack_vector_srcs(ins, &source);
777                         unsigned size = sizeof(source);
778                         memcpy(util_dynarray_grow_bytes(emission, size, 1), &source, size);
779                 } else {
780                         midgard_scalar_alu source = vector_to_scalar_alu(vector_alu_from_instr(ins), ins);
781                         unsigned size = sizeof(source);
782                         memcpy(util_dynarray_grow_bytes(emission, size, 1), &source, size);
783                 }
784         }
785 
786         /* Emit padding (all zero) */
787         memset(util_dynarray_grow_bytes(emission, bundle->padding, 1), 0, bundle->padding);
788 
789         /* Tack on constants */
790 
791         if (bundle->has_embedded_constants)
792                 util_dynarray_append(emission, midgard_constants, bundle->constants);
793 }
794 
795 /* Shift applied to the immediate used as an offset. Probably this is papering
796  * over some other semantic distinction else well, but it unifies things in the
797  * compiler so I don't mind. */
798 
799 static unsigned
mir_ldst_imm_shift(midgard_load_store_op op)800 mir_ldst_imm_shift(midgard_load_store_op op)
801 {
802         if (OP_IS_UBO_READ(op))
803                 return 3;
804         else
805                 return 1;
806 }
807 
808 static enum mali_sampler_type
midgard_sampler_type(nir_alu_type t)809 midgard_sampler_type(nir_alu_type t) {
810         switch (nir_alu_type_get_base_type(t))
811         {
812         case nir_type_float:
813                 return MALI_SAMPLER_FLOAT;
814         case nir_type_int:
815                 return MALI_SAMPLER_SIGNED;
816         case nir_type_uint:
817                 return MALI_SAMPLER_UNSIGNED;
818         default:
819                 unreachable("Unknown sampler type");
820         }
821 }
822 
823 /* After everything is scheduled, emit whole bundles at a time */
824 
825 void
emit_binary_bundle(compiler_context * ctx,midgard_block * block,midgard_bundle * bundle,struct util_dynarray * emission,int next_tag)826 emit_binary_bundle(compiler_context *ctx,
827                    midgard_block *block,
828                    midgard_bundle *bundle,
829                    struct util_dynarray *emission,
830                    int next_tag)
831 {
832         int lookahead = next_tag << 4;
833 
834         switch (bundle->tag) {
835         case TAG_ALU_4:
836         case TAG_ALU_8:
837         case TAG_ALU_12:
838         case TAG_ALU_16:
839         case TAG_ALU_4 + 4:
840         case TAG_ALU_8 + 4:
841         case TAG_ALU_12 + 4:
842         case TAG_ALU_16 + 4:
843                 emit_alu_bundle(ctx, block, bundle, emission, lookahead);
844                 break;
845 
846         case TAG_LOAD_STORE_4: {
847                 /* One or two composing instructions */
848 
849                 uint64_t current64, next64 = LDST_NOP;
850 
851                 /* Copy masks */
852 
853                 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
854                         mir_pack_ldst_mask(bundle->instructions[i]);
855 
856                         mir_pack_swizzle_ldst(bundle->instructions[i]);
857 
858                         /* Apply a constant offset */
859                         unsigned offset = bundle->instructions[i]->constants.u32[0];
860 
861                         if (offset) {
862                                 unsigned shift = mir_ldst_imm_shift(bundle->instructions[i]->op);
863                                 unsigned upper_shift = 10 - shift;
864 
865                                 bundle->instructions[i]->load_store.varying_parameters |= (offset & ((1 << upper_shift) - 1)) << shift;
866                                 bundle->instructions[i]->load_store.address |= (offset >> upper_shift);
867                         }
868                 }
869 
870                 midgard_load_store_word ldst0 =
871                         load_store_from_instr(bundle->instructions[0]);
872                 memcpy(&current64, &ldst0, sizeof(current64));
873 
874                 if (bundle->instruction_count == 2) {
875                         midgard_load_store_word ldst1 =
876                                 load_store_from_instr(bundle->instructions[1]);
877                         memcpy(&next64, &ldst1, sizeof(next64));
878                 }
879 
880                 midgard_load_store instruction = {
881                         .type = bundle->tag,
882                         .next_type = next_tag,
883                         .word1 = current64,
884                         .word2 = next64
885                 };
886 
887                 util_dynarray_append(emission, midgard_load_store, instruction);
888 
889                 break;
890         }
891 
892         case TAG_TEXTURE_4:
893         case TAG_TEXTURE_4_VTX:
894         case TAG_TEXTURE_4_BARRIER: {
895                 /* Texture instructions are easy, since there is no pipelining
896                  * nor VLIW to worry about. We may need to set .cont/.last
897                  * flags. */
898 
899                 midgard_instruction *ins = bundle->instructions[0];
900 
901                 ins->texture.type = bundle->tag;
902                 ins->texture.next_type = next_tag;
903 
904                 /* Nothing else to pack for barriers */
905                 if (ins->op == TEXTURE_OP_BARRIER) {
906                         ins->texture.cont = ins->texture.last = 1;
907                         ins->texture.op = ins->op;
908                         util_dynarray_append(emission, midgard_texture_word, ins->texture);
909                         return;
910                 }
911 
912                 signed override = mir_upper_override(ins, 32);
913 
914                 ins->texture.mask = override > 0 ?
915                         ins->mask >> override :
916                         ins->mask;
917 
918                 mir_pack_swizzle_tex(ins);
919 
920                 if (!(ctx->quirks & MIDGARD_NO_OOO))
921                         mir_pack_tex_ooo(block, bundle, ins);
922 
923                 unsigned osz = nir_alu_type_get_type_size(ins->dest_type);
924                 unsigned isz = nir_alu_type_get_type_size(ins->src_types[1]);
925 
926                 assert(osz == 32 || osz == 16);
927                 assert(isz == 32 || isz == 16);
928 
929                 ins->texture.out_full = (osz == 32);
930                 ins->texture.out_upper = override > 0;
931                 ins->texture.in_reg_full = (isz == 32);
932                 ins->texture.sampler_type = midgard_sampler_type(ins->dest_type);
933                 ins->texture.outmod = ins->outmod;
934 
935                 if (mir_op_computes_derivatives(ctx->stage, ins->op)) {
936                         ins->texture.cont = !ins->helper_terminate;
937                         ins->texture.last = ins->helper_terminate || ins->helper_execute;
938                 } else {
939                         ins->texture.cont = ins->texture.last = 1;
940                 }
941 
942                 midgard_texture_word texture = texture_word_from_instr(ins);
943                 util_dynarray_append(emission, midgard_texture_word, texture);
944                 break;
945         }
946 
947         default:
948                 unreachable("Unknown midgard instruction type\n");
949         }
950 }
951