1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the
6  * "Software"), to deal in the Software without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sub license, and/or sell copies of the Software, and to
9  * permit persons to whom the Software is furnished to do so, subject to
10  * the following conditions:
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18  * USE OR OTHER DEALINGS IN THE SOFTWARE.
19  *
20  * The above copyright notice and this permission notice (including the
21  * next paragraph) shall be included in all copies or substantial portions
22  * of the Software.
23  *
24  */
25 /* based on pieces from si_pipe.c and radeon_llvm_emit.c */
26 #include "ac_llvm_build.h"
27 
28 #include "ac_nir.h"
29 #include "ac_llvm_util.h"
30 #include "ac_shader_util.h"
31 #include "c11/threads.h"
32 #include "shader_enums.h"
33 #include "sid.h"
34 #include "util/bitscan.h"
35 #include "util/macros.h"
36 #include "util/u_atomic.h"
37 #include "util/u_math.h"
38 #include <llvm-c/Core.h>
39 #include <llvm/Config/llvm-config.h>
40 
41 #include <assert.h>
42 #include <stdio.h>
43 
44 #define AC_LLVM_INITIAL_CF_DEPTH 4
45 
46 /* Data for if/else/endif and bgnloop/endloop control flow structures.
47  */
48 struct ac_llvm_flow {
49    /* Loop exit or next part of if/else/endif. */
50    LLVMBasicBlockRef next_block;
51    LLVMBasicBlockRef loop_entry_block;
52 };
53 
54 static void ac_build_tbuffer_store(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
55                                    LLVMValueRef vdata, LLVMValueRef vindex, LLVMValueRef voffset,
56                                    LLVMValueRef soffset, LLVMValueRef immoffset,
57                                    unsigned num_channels, unsigned dfmt, unsigned nfmt,
58                                    unsigned cache_policy);
59 
60 /* Initialize module-independent parts of the context.
61  *
62  * The caller is responsible for initializing ctx::module and ctx::builder.
63  */
ac_llvm_context_init(struct ac_llvm_context * ctx,struct ac_llvm_compiler * compiler,enum chip_class chip_class,enum radeon_family family,const struct radeon_info * info,enum ac_float_mode float_mode,unsigned wave_size,unsigned ballot_mask_bits)64 void ac_llvm_context_init(struct ac_llvm_context *ctx, struct ac_llvm_compiler *compiler,
65                           enum chip_class chip_class, enum radeon_family family,
66                           const struct radeon_info *info,
67                           enum ac_float_mode float_mode, unsigned wave_size,
68                           unsigned ballot_mask_bits)
69 {
70    ctx->context = LLVMContextCreate();
71 
72    ctx->chip_class = chip_class;
73    ctx->family = family;
74    ctx->info = info;
75    ctx->wave_size = wave_size;
76    ctx->ballot_mask_bits = ballot_mask_bits;
77    ctx->float_mode = float_mode;
78    ctx->module = ac_create_module(compiler->tm, ctx->context);
79    ctx->builder = ac_create_builder(ctx->context, float_mode);
80 
81    ctx->voidt = LLVMVoidTypeInContext(ctx->context);
82    ctx->i1 = LLVMInt1TypeInContext(ctx->context);
83    ctx->i8 = LLVMInt8TypeInContext(ctx->context);
84    ctx->i16 = LLVMIntTypeInContext(ctx->context, 16);
85    ctx->i32 = LLVMIntTypeInContext(ctx->context, 32);
86    ctx->i64 = LLVMIntTypeInContext(ctx->context, 64);
87    ctx->i128 = LLVMIntTypeInContext(ctx->context, 128);
88    ctx->intptr = ctx->i32;
89    ctx->f16 = LLVMHalfTypeInContext(ctx->context);
90    ctx->f32 = LLVMFloatTypeInContext(ctx->context);
91    ctx->f64 = LLVMDoubleTypeInContext(ctx->context);
92    ctx->v2i16 = LLVMVectorType(ctx->i16, 2);
93    ctx->v4i16 = LLVMVectorType(ctx->i16, 4);
94    ctx->v2f16 = LLVMVectorType(ctx->f16, 2);
95    ctx->v4f16 = LLVMVectorType(ctx->f16, 4);
96    ctx->v2i32 = LLVMVectorType(ctx->i32, 2);
97    ctx->v3i32 = LLVMVectorType(ctx->i32, 3);
98    ctx->v4i32 = LLVMVectorType(ctx->i32, 4);
99    ctx->v2f32 = LLVMVectorType(ctx->f32, 2);
100    ctx->v3f32 = LLVMVectorType(ctx->f32, 3);
101    ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
102    ctx->v8i32 = LLVMVectorType(ctx->i32, 8);
103    ctx->iN_wavemask = LLVMIntTypeInContext(ctx->context, ctx->wave_size);
104    ctx->iN_ballotmask = LLVMIntTypeInContext(ctx->context, ballot_mask_bits);
105 
106    ctx->i8_0 = LLVMConstInt(ctx->i8, 0, false);
107    ctx->i8_1 = LLVMConstInt(ctx->i8, 1, false);
108    ctx->i16_0 = LLVMConstInt(ctx->i16, 0, false);
109    ctx->i16_1 = LLVMConstInt(ctx->i16, 1, false);
110    ctx->i32_0 = LLVMConstInt(ctx->i32, 0, false);
111    ctx->i32_1 = LLVMConstInt(ctx->i32, 1, false);
112    ctx->i64_0 = LLVMConstInt(ctx->i64, 0, false);
113    ctx->i64_1 = LLVMConstInt(ctx->i64, 1, false);
114    ctx->i128_0 = LLVMConstInt(ctx->i128, 0, false);
115    ctx->i128_1 = LLVMConstInt(ctx->i128, 1, false);
116    ctx->f16_0 = LLVMConstReal(ctx->f16, 0.0);
117    ctx->f16_1 = LLVMConstReal(ctx->f16, 1.0);
118    ctx->f32_0 = LLVMConstReal(ctx->f32, 0.0);
119    ctx->f32_1 = LLVMConstReal(ctx->f32, 1.0);
120    ctx->f64_0 = LLVMConstReal(ctx->f64, 0.0);
121    ctx->f64_1 = LLVMConstReal(ctx->f64, 1.0);
122 
123    ctx->i1false = LLVMConstInt(ctx->i1, 0, false);
124    ctx->i1true = LLVMConstInt(ctx->i1, 1, false);
125 
126    ctx->range_md_kind = LLVMGetMDKindIDInContext(ctx->context, "range", 5);
127 
128    ctx->invariant_load_md_kind = LLVMGetMDKindIDInContext(ctx->context, "invariant.load", 14);
129 
130    ctx->uniform_md_kind = LLVMGetMDKindIDInContext(ctx->context, "amdgpu.uniform", 14);
131 
132    ctx->empty_md = LLVMMDNodeInContext(ctx->context, NULL, 0);
133    ctx->flow = calloc(1, sizeof(*ctx->flow));
134 }
135 
ac_llvm_context_dispose(struct ac_llvm_context * ctx)136 void ac_llvm_context_dispose(struct ac_llvm_context *ctx)
137 {
138    free(ctx->flow->stack);
139    free(ctx->flow);
140    ctx->flow = NULL;
141 }
142 
ac_get_llvm_num_components(LLVMValueRef value)143 int ac_get_llvm_num_components(LLVMValueRef value)
144 {
145    LLVMTypeRef type = LLVMTypeOf(value);
146    unsigned num_components =
147       LLVMGetTypeKind(type) == LLVMVectorTypeKind ? LLVMGetVectorSize(type) : 1;
148    return num_components;
149 }
150 
ac_llvm_extract_elem(struct ac_llvm_context * ac,LLVMValueRef value,int index)151 LLVMValueRef ac_llvm_extract_elem(struct ac_llvm_context *ac, LLVMValueRef value, int index)
152 {
153    if (LLVMGetTypeKind(LLVMTypeOf(value)) != LLVMVectorTypeKind) {
154       assert(index == 0);
155       return value;
156    }
157 
158    return LLVMBuildExtractElement(ac->builder, value, LLVMConstInt(ac->i32, index, false), "");
159 }
160 
ac_get_elem_bits(struct ac_llvm_context * ctx,LLVMTypeRef type)161 int ac_get_elem_bits(struct ac_llvm_context *ctx, LLVMTypeRef type)
162 {
163    if (LLVMGetTypeKind(type) == LLVMVectorTypeKind)
164       type = LLVMGetElementType(type);
165 
166    if (LLVMGetTypeKind(type) == LLVMIntegerTypeKind)
167       return LLVMGetIntTypeWidth(type);
168 
169    if (LLVMGetTypeKind(type) == LLVMPointerTypeKind) {
170       if (LLVMGetPointerAddressSpace(type) == AC_ADDR_SPACE_LDS)
171          return 32;
172    }
173 
174    if (type == ctx->f16)
175       return 16;
176    if (type == ctx->f32)
177       return 32;
178    if (type == ctx->f64)
179       return 64;
180 
181    unreachable("Unhandled type kind in get_elem_bits");
182 }
183 
ac_get_type_size(LLVMTypeRef type)184 unsigned ac_get_type_size(LLVMTypeRef type)
185 {
186    LLVMTypeKind kind = LLVMGetTypeKind(type);
187 
188    switch (kind) {
189    case LLVMIntegerTypeKind:
190       return LLVMGetIntTypeWidth(type) / 8;
191    case LLVMHalfTypeKind:
192       return 2;
193    case LLVMFloatTypeKind:
194       return 4;
195    case LLVMDoubleTypeKind:
196       return 8;
197    case LLVMPointerTypeKind:
198       if (LLVMGetPointerAddressSpace(type) == AC_ADDR_SPACE_CONST_32BIT)
199          return 4;
200       return 8;
201    case LLVMVectorTypeKind:
202       return LLVMGetVectorSize(type) * ac_get_type_size(LLVMGetElementType(type));
203    case LLVMArrayTypeKind:
204       return LLVMGetArrayLength(type) * ac_get_type_size(LLVMGetElementType(type));
205    default:
206       assert(0);
207       return 0;
208    }
209 }
210 
to_integer_type_scalar(struct ac_llvm_context * ctx,LLVMTypeRef t)211 static LLVMTypeRef to_integer_type_scalar(struct ac_llvm_context *ctx, LLVMTypeRef t)
212 {
213    if (t == ctx->i1)
214       return ctx->i1;
215    else if (t == ctx->i8)
216       return ctx->i8;
217    else if (t == ctx->f16 || t == ctx->i16)
218       return ctx->i16;
219    else if (t == ctx->f32 || t == ctx->i32)
220       return ctx->i32;
221    else if (t == ctx->f64 || t == ctx->i64)
222       return ctx->i64;
223    else
224       unreachable("Unhandled integer size");
225 }
226 
ac_to_integer_type(struct ac_llvm_context * ctx,LLVMTypeRef t)227 LLVMTypeRef ac_to_integer_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
228 {
229    if (LLVMGetTypeKind(t) == LLVMVectorTypeKind) {
230       LLVMTypeRef elem_type = LLVMGetElementType(t);
231       return LLVMVectorType(to_integer_type_scalar(ctx, elem_type), LLVMGetVectorSize(t));
232    }
233    if (LLVMGetTypeKind(t) == LLVMPointerTypeKind) {
234       switch (LLVMGetPointerAddressSpace(t)) {
235       case AC_ADDR_SPACE_GLOBAL:
236          return ctx->i64;
237       case AC_ADDR_SPACE_CONST_32BIT:
238       case AC_ADDR_SPACE_LDS:
239          return ctx->i32;
240       default:
241          unreachable("unhandled address space");
242       }
243    }
244    return to_integer_type_scalar(ctx, t);
245 }
246 
ac_to_integer(struct ac_llvm_context * ctx,LLVMValueRef v)247 LLVMValueRef ac_to_integer(struct ac_llvm_context *ctx, LLVMValueRef v)
248 {
249    LLVMTypeRef type = LLVMTypeOf(v);
250    if (LLVMGetTypeKind(type) == LLVMPointerTypeKind) {
251       return LLVMBuildPtrToInt(ctx->builder, v, ac_to_integer_type(ctx, type), "");
252    }
253    return LLVMBuildBitCast(ctx->builder, v, ac_to_integer_type(ctx, type), "");
254 }
255 
ac_to_integer_or_pointer(struct ac_llvm_context * ctx,LLVMValueRef v)256 LLVMValueRef ac_to_integer_or_pointer(struct ac_llvm_context *ctx, LLVMValueRef v)
257 {
258    LLVMTypeRef type = LLVMTypeOf(v);
259    if (LLVMGetTypeKind(type) == LLVMPointerTypeKind)
260       return v;
261    return ac_to_integer(ctx, v);
262 }
263 
to_float_type_scalar(struct ac_llvm_context * ctx,LLVMTypeRef t)264 static LLVMTypeRef to_float_type_scalar(struct ac_llvm_context *ctx, LLVMTypeRef t)
265 {
266    if (t == ctx->i8)
267       return ctx->i8;
268    else if (t == ctx->i16 || t == ctx->f16)
269       return ctx->f16;
270    else if (t == ctx->i32 || t == ctx->f32)
271       return ctx->f32;
272    else if (t == ctx->i64 || t == ctx->f64)
273       return ctx->f64;
274    else
275       unreachable("Unhandled float size");
276 }
277 
ac_to_float_type(struct ac_llvm_context * ctx,LLVMTypeRef t)278 LLVMTypeRef ac_to_float_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
279 {
280    if (LLVMGetTypeKind(t) == LLVMVectorTypeKind) {
281       LLVMTypeRef elem_type = LLVMGetElementType(t);
282       return LLVMVectorType(to_float_type_scalar(ctx, elem_type), LLVMGetVectorSize(t));
283    }
284    return to_float_type_scalar(ctx, t);
285 }
286 
ac_to_float(struct ac_llvm_context * ctx,LLVMValueRef v)287 LLVMValueRef ac_to_float(struct ac_llvm_context *ctx, LLVMValueRef v)
288 {
289    LLVMTypeRef type = LLVMTypeOf(v);
290    return LLVMBuildBitCast(ctx->builder, v, ac_to_float_type(ctx, type), "");
291 }
292 
ac_build_intrinsic(struct ac_llvm_context * ctx,const char * name,LLVMTypeRef return_type,LLVMValueRef * params,unsigned param_count,unsigned attrib_mask)293 LLVMValueRef ac_build_intrinsic(struct ac_llvm_context *ctx, const char *name,
294                                 LLVMTypeRef return_type, LLVMValueRef *params, unsigned param_count,
295                                 unsigned attrib_mask)
296 {
297    LLVMValueRef function, call;
298    bool set_callsite_attrs = !(attrib_mask & AC_FUNC_ATTR_LEGACY);
299 
300    function = LLVMGetNamedFunction(ctx->module, name);
301    if (!function) {
302       LLVMTypeRef param_types[32], function_type;
303       unsigned i;
304 
305       assert(param_count <= 32);
306 
307       for (i = 0; i < param_count; ++i) {
308          assert(params[i]);
309          param_types[i] = LLVMTypeOf(params[i]);
310       }
311       function_type = LLVMFunctionType(return_type, param_types, param_count, 0);
312       function = LLVMAddFunction(ctx->module, name, function_type);
313 
314       LLVMSetFunctionCallConv(function, LLVMCCallConv);
315       LLVMSetLinkage(function, LLVMExternalLinkage);
316 
317       if (!set_callsite_attrs)
318          ac_add_func_attributes(ctx->context, function, attrib_mask);
319    }
320 
321    call = LLVMBuildCall(ctx->builder, function, params, param_count, "");
322    if (set_callsite_attrs)
323       ac_add_func_attributes(ctx->context, call, attrib_mask);
324    return call;
325 }
326 
327 /**
328  * Given the i32 or vNi32 \p type, generate the textual name (e.g. for use with
329  * intrinsic names).
330  */
ac_build_type_name_for_intr(LLVMTypeRef type,char * buf,unsigned bufsize)331 void ac_build_type_name_for_intr(LLVMTypeRef type, char *buf, unsigned bufsize)
332 {
333    LLVMTypeRef elem_type = type;
334 
335    if (LLVMGetTypeKind(type) == LLVMStructTypeKind) {
336       unsigned count = LLVMCountStructElementTypes(type);
337       int ret = snprintf(buf, bufsize, "sl_");
338       buf += ret;
339       bufsize -= ret;
340 
341       LLVMTypeRef *elems = alloca(count * sizeof(LLVMTypeRef));
342       LLVMGetStructElementTypes(type, elems);
343 
344       for (unsigned i = 0; i < count; i++) {
345          ac_build_type_name_for_intr(elems[i], buf, bufsize);
346          ret = strlen(buf);
347          buf += ret;
348          bufsize -= ret;
349       }
350 
351       snprintf(buf, bufsize, "s");
352       return;
353    }
354 
355    assert(bufsize >= 8);
356    if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
357       int ret = snprintf(buf, bufsize, "v%u", LLVMGetVectorSize(type));
358       if (ret < 0) {
359          char *type_name = LLVMPrintTypeToString(type);
360          fprintf(stderr, "Error building type name for: %s\n", type_name);
361          LLVMDisposeMessage(type_name);
362          return;
363       }
364       elem_type = LLVMGetElementType(type);
365       buf += ret;
366       bufsize -= ret;
367    }
368    switch (LLVMGetTypeKind(elem_type)) {
369    default:
370       break;
371    case LLVMIntegerTypeKind:
372       snprintf(buf, bufsize, "i%d", LLVMGetIntTypeWidth(elem_type));
373       break;
374    case LLVMHalfTypeKind:
375       snprintf(buf, bufsize, "f16");
376       break;
377    case LLVMFloatTypeKind:
378       snprintf(buf, bufsize, "f32");
379       break;
380    case LLVMDoubleTypeKind:
381       snprintf(buf, bufsize, "f64");
382       break;
383    }
384 }
385 
386 /**
387  * Helper function that builds an LLVM IR PHI node and immediately adds
388  * incoming edges.
389  */
ac_build_phi(struct ac_llvm_context * ctx,LLVMTypeRef type,unsigned count_incoming,LLVMValueRef * values,LLVMBasicBlockRef * blocks)390 LLVMValueRef ac_build_phi(struct ac_llvm_context *ctx, LLVMTypeRef type, unsigned count_incoming,
391                           LLVMValueRef *values, LLVMBasicBlockRef *blocks)
392 {
393    LLVMValueRef phi = LLVMBuildPhi(ctx->builder, type, "");
394    LLVMAddIncoming(phi, values, blocks, count_incoming);
395    return phi;
396 }
397 
ac_build_s_barrier(struct ac_llvm_context * ctx)398 void ac_build_s_barrier(struct ac_llvm_context *ctx)
399 {
400    ac_build_intrinsic(ctx, "llvm.amdgcn.s.barrier", ctx->voidt, NULL, 0, AC_FUNC_ATTR_CONVERGENT);
401 }
402 
403 /* Prevent optimizations (at least of memory accesses) across the current
404  * point in the program by emitting empty inline assembly that is marked as
405  * having side effects.
406  *
407  * Optionally, a value can be passed through the inline assembly to prevent
408  * LLVM from hoisting calls to ReadNone functions.
409  */
ac_build_optimization_barrier(struct ac_llvm_context * ctx,LLVMValueRef * pgpr,bool sgpr)410 void ac_build_optimization_barrier(struct ac_llvm_context *ctx, LLVMValueRef *pgpr, bool sgpr)
411 {
412    static int counter = 0;
413 
414    LLVMBuilderRef builder = ctx->builder;
415    char code[16];
416    const char *constraint = sgpr ? "=s,0" : "=v,0";
417 
418    snprintf(code, sizeof(code), "; %d", (int)p_atomic_inc_return(&counter));
419 
420    if (!pgpr) {
421       LLVMTypeRef ftype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
422       LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "", true, false);
423       LLVMBuildCall(builder, inlineasm, NULL, 0, "");
424    } else if (LLVMTypeOf(*pgpr) == ctx->i32) {
425       /* Simple version for i32 that allows the caller to set LLVM metadata on the call
426        * instruction. */
427       LLVMTypeRef ftype = LLVMFunctionType(ctx->i32, &ctx->i32, 1, false);
428       LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, constraint, true, false);
429 
430       *pgpr = LLVMBuildCall(builder, inlineasm, pgpr, 1, "");
431    } else if (LLVMTypeOf(*pgpr) == ctx->i16) {
432       /* Simple version for i16 that allows the caller to set LLVM metadata on the call
433        * instruction. */
434       LLVMTypeRef ftype = LLVMFunctionType(ctx->i16, &ctx->i16, 1, false);
435       LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, constraint, true, false);
436 
437       *pgpr = LLVMBuildCall(builder, inlineasm, pgpr, 1, "");
438    } else if (LLVMGetTypeKind(LLVMTypeOf(*pgpr)) == LLVMPointerTypeKind) {
439       LLVMTypeRef type = LLVMTypeOf(*pgpr);
440       LLVMTypeRef ftype = LLVMFunctionType(type, &type, 1, false);
441       LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, constraint, true, false);
442 
443       *pgpr = LLVMBuildCall(builder, inlineasm, pgpr, 1, "");
444    } else {
445       LLVMTypeRef ftype = LLVMFunctionType(ctx->i32, &ctx->i32, 1, false);
446       LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, constraint, true, false);
447       LLVMTypeRef type = LLVMTypeOf(*pgpr);
448       unsigned bitsize = ac_get_elem_bits(ctx, type);
449       LLVMValueRef vgpr = *pgpr;
450       LLVMTypeRef vgpr_type;
451       unsigned vgpr_size;
452       LLVMValueRef vgpr0;
453 
454       if (bitsize < 32)
455          vgpr = LLVMBuildZExt(ctx->builder, vgpr, ctx->i32, "");
456 
457       vgpr_type = LLVMTypeOf(vgpr);
458       vgpr_size = ac_get_type_size(vgpr_type);
459 
460       assert(vgpr_size % 4 == 0);
461 
462       vgpr = LLVMBuildBitCast(builder, vgpr, LLVMVectorType(ctx->i32, vgpr_size / 4), "");
463       vgpr0 = LLVMBuildExtractElement(builder, vgpr, ctx->i32_0, "");
464       vgpr0 = LLVMBuildCall(builder, inlineasm, &vgpr0, 1, "");
465       vgpr = LLVMBuildInsertElement(builder, vgpr, vgpr0, ctx->i32_0, "");
466       vgpr = LLVMBuildBitCast(builder, vgpr, vgpr_type, "");
467 
468       if (bitsize < 32)
469          vgpr = LLVMBuildTrunc(builder, vgpr, type, "");
470 
471       *pgpr = vgpr;
472    }
473 }
474 
ac_build_shader_clock(struct ac_llvm_context * ctx,nir_scope scope)475 LLVMValueRef ac_build_shader_clock(struct ac_llvm_context *ctx, nir_scope scope)
476 {
477    const char *subgroup = "llvm.readcyclecounter";
478    const char *name = scope == NIR_SCOPE_DEVICE ? "llvm.amdgcn.s.memrealtime" : subgroup;
479 
480    LLVMValueRef tmp = ac_build_intrinsic(ctx, name, ctx->i64, NULL, 0, 0);
481    return LLVMBuildBitCast(ctx->builder, tmp, ctx->v2i32, "");
482 }
483 
ac_build_ballot(struct ac_llvm_context * ctx,LLVMValueRef value)484 LLVMValueRef ac_build_ballot(struct ac_llvm_context *ctx, LLVMValueRef value)
485 {
486    const char *name;
487 
488    if (LLVMTypeOf(value) == ctx->i1)
489       value = LLVMBuildZExt(ctx->builder, value, ctx->i32, "");
490 
491    if (ctx->wave_size == 64)
492       name = "llvm.amdgcn.icmp.i64.i32";
493    else
494       name = "llvm.amdgcn.icmp.i32.i32";
495 
496    LLVMValueRef args[3] = {value, ctx->i32_0, LLVMConstInt(ctx->i32, LLVMIntNE, 0)};
497 
498    /* We currently have no other way to prevent LLVM from lifting the icmp
499     * calls to a dominating basic block.
500     */
501    ac_build_optimization_barrier(ctx, &args[0], false);
502 
503    args[0] = ac_to_integer(ctx, args[0]);
504 
505    return ac_build_intrinsic(
506       ctx, name, ctx->iN_wavemask, args, 3,
507       AC_FUNC_ATTR_NOUNWIND | AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
508 }
509 
ac_get_i1_sgpr_mask(struct ac_llvm_context * ctx,LLVMValueRef value)510 LLVMValueRef ac_get_i1_sgpr_mask(struct ac_llvm_context *ctx, LLVMValueRef value)
511 {
512    const char *name;
513 
514    if (ctx->wave_size == 64)
515       name = "llvm.amdgcn.icmp.i64.i1";
516    else
517       name = "llvm.amdgcn.icmp.i32.i1";
518 
519    LLVMValueRef args[3] = {
520       value,
521       ctx->i1false,
522       LLVMConstInt(ctx->i32, LLVMIntNE, 0),
523    };
524 
525    return ac_build_intrinsic(
526       ctx, name, ctx->iN_wavemask, args, 3,
527       AC_FUNC_ATTR_NOUNWIND | AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
528 }
529 
ac_build_vote_all(struct ac_llvm_context * ctx,LLVMValueRef value)530 LLVMValueRef ac_build_vote_all(struct ac_llvm_context *ctx, LLVMValueRef value)
531 {
532    LLVMValueRef active_set = ac_build_ballot(ctx, ctx->i32_1);
533    LLVMValueRef vote_set = ac_build_ballot(ctx, value);
534    return LLVMBuildICmp(ctx->builder, LLVMIntEQ, vote_set, active_set, "");
535 }
536 
ac_build_vote_any(struct ac_llvm_context * ctx,LLVMValueRef value)537 LLVMValueRef ac_build_vote_any(struct ac_llvm_context *ctx, LLVMValueRef value)
538 {
539    LLVMValueRef vote_set = ac_build_ballot(ctx, value);
540    return LLVMBuildICmp(ctx->builder, LLVMIntNE, vote_set, LLVMConstInt(ctx->iN_wavemask, 0, 0),
541                         "");
542 }
543 
ac_build_vote_eq(struct ac_llvm_context * ctx,LLVMValueRef value)544 LLVMValueRef ac_build_vote_eq(struct ac_llvm_context *ctx, LLVMValueRef value)
545 {
546    LLVMValueRef active_set = ac_build_ballot(ctx, ctx->i32_1);
547    LLVMValueRef vote_set = ac_build_ballot(ctx, value);
548 
549    LLVMValueRef all = LLVMBuildICmp(ctx->builder, LLVMIntEQ, vote_set, active_set, "");
550    LLVMValueRef none =
551       LLVMBuildICmp(ctx->builder, LLVMIntEQ, vote_set, LLVMConstInt(ctx->iN_wavemask, 0, 0), "");
552    return LLVMBuildOr(ctx->builder, all, none, "");
553 }
554 
ac_build_varying_gather_values(struct ac_llvm_context * ctx,LLVMValueRef * values,unsigned value_count,unsigned component)555 LLVMValueRef ac_build_varying_gather_values(struct ac_llvm_context *ctx, LLVMValueRef *values,
556                                             unsigned value_count, unsigned component)
557 {
558    LLVMValueRef vec = NULL;
559 
560    if (value_count == 1) {
561       return values[component];
562    } else if (!value_count)
563       unreachable("value_count is 0");
564 
565    for (unsigned i = component; i < value_count + component; i++) {
566       LLVMValueRef value = values[i];
567 
568       if (i == component)
569          vec = LLVMGetUndef(LLVMVectorType(LLVMTypeOf(value), value_count));
570       LLVMValueRef index = LLVMConstInt(ctx->i32, i - component, false);
571       vec = LLVMBuildInsertElement(ctx->builder, vec, value, index, "");
572    }
573    return vec;
574 }
575 
ac_build_gather_values_extended(struct ac_llvm_context * ctx,LLVMValueRef * values,unsigned value_count,unsigned value_stride,bool load,bool always_vector)576 LLVMValueRef ac_build_gather_values_extended(struct ac_llvm_context *ctx, LLVMValueRef *values,
577                                              unsigned value_count, unsigned value_stride, bool load,
578                                              bool always_vector)
579 {
580    LLVMBuilderRef builder = ctx->builder;
581    LLVMValueRef vec = NULL;
582    unsigned i;
583 
584    if (value_count == 1 && !always_vector) {
585       if (load)
586          return LLVMBuildLoad(builder, values[0], "");
587       return values[0];
588    } else if (!value_count)
589       unreachable("value_count is 0");
590 
591    for (i = 0; i < value_count; i++) {
592       LLVMValueRef value = values[i * value_stride];
593       if (load)
594          value = LLVMBuildLoad(builder, value, "");
595 
596       if (!i)
597          vec = LLVMGetUndef(LLVMVectorType(LLVMTypeOf(value), value_count));
598       LLVMValueRef index = LLVMConstInt(ctx->i32, i, false);
599       vec = LLVMBuildInsertElement(builder, vec, value, index, "");
600    }
601    return vec;
602 }
603 
ac_build_gather_values(struct ac_llvm_context * ctx,LLVMValueRef * values,unsigned value_count)604 LLVMValueRef ac_build_gather_values(struct ac_llvm_context *ctx, LLVMValueRef *values,
605                                     unsigned value_count)
606 {
607    return ac_build_gather_values_extended(ctx, values, value_count, 1, false, false);
608 }
609 
ac_build_concat(struct ac_llvm_context * ctx,LLVMValueRef a,LLVMValueRef b)610 LLVMValueRef ac_build_concat(struct ac_llvm_context *ctx, LLVMValueRef a, LLVMValueRef b)
611 {
612    unsigned a_size = ac_get_llvm_num_components(a);
613    unsigned b_size = ac_get_llvm_num_components(b);
614 
615    LLVMValueRef *elems = alloca((a_size + b_size) * sizeof(LLVMValueRef));
616    for (unsigned i = 0; i < a_size; i++)
617       elems[i] = ac_llvm_extract_elem(ctx, a, i);
618    for (unsigned i = 0; i < b_size; i++)
619       elems[a_size + i] = ac_llvm_extract_elem(ctx, b, i);
620 
621    return ac_build_gather_values(ctx, elems, a_size + b_size);
622 }
623 
624 /* Expand a scalar or vector to <dst_channels x type> by filling the remaining
625  * channels with undef. Extract at most src_channels components from the input.
626  */
ac_build_expand(struct ac_llvm_context * ctx,LLVMValueRef value,unsigned src_channels,unsigned dst_channels)627 LLVMValueRef ac_build_expand(struct ac_llvm_context *ctx, LLVMValueRef value,
628                              unsigned src_channels, unsigned dst_channels)
629 {
630    LLVMTypeRef elemtype;
631    LLVMValueRef *const chan = alloca(dst_channels * sizeof(LLVMValueRef));
632 
633    if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMVectorTypeKind) {
634       unsigned vec_size = LLVMGetVectorSize(LLVMTypeOf(value));
635 
636       if (src_channels == dst_channels && vec_size == dst_channels)
637          return value;
638 
639       src_channels = MIN2(src_channels, vec_size);
640 
641       for (unsigned i = 0; i < src_channels; i++)
642          chan[i] = ac_llvm_extract_elem(ctx, value, i);
643 
644       elemtype = LLVMGetElementType(LLVMTypeOf(value));
645    } else {
646       if (src_channels) {
647          assert(src_channels == 1);
648          chan[0] = value;
649       }
650       elemtype = LLVMTypeOf(value);
651    }
652 
653    for (unsigned i = src_channels; i < dst_channels; i++)
654       chan[i] = LLVMGetUndef(elemtype);
655 
656    return ac_build_gather_values(ctx, chan, dst_channels);
657 }
658 
659 /* Extract components [start, start + channels) from a vector.
660  */
ac_extract_components(struct ac_llvm_context * ctx,LLVMValueRef value,unsigned start,unsigned channels)661 LLVMValueRef ac_extract_components(struct ac_llvm_context *ctx, LLVMValueRef value, unsigned start,
662                                    unsigned channels)
663 {
664    LLVMValueRef *const chan = alloca(channels * sizeof(LLVMValueRef));
665 
666    for (unsigned i = 0; i < channels; i++)
667       chan[i] = ac_llvm_extract_elem(ctx, value, i + start);
668 
669    return ac_build_gather_values(ctx, chan, channels);
670 }
671 
672 /* Expand a scalar or vector to <4 x type> by filling the remaining channels
673  * with undef. Extract at most num_channels components from the input.
674  */
ac_build_expand_to_vec4(struct ac_llvm_context * ctx,LLVMValueRef value,unsigned num_channels)675 LLVMValueRef ac_build_expand_to_vec4(struct ac_llvm_context *ctx, LLVMValueRef value,
676                                      unsigned num_channels)
677 {
678    return ac_build_expand(ctx, value, num_channels, 4);
679 }
680 
ac_build_round(struct ac_llvm_context * ctx,LLVMValueRef value)681 LLVMValueRef ac_build_round(struct ac_llvm_context *ctx, LLVMValueRef value)
682 {
683    unsigned type_size = ac_get_type_size(LLVMTypeOf(value));
684    const char *name;
685 
686    if (type_size == 2)
687       name = "llvm.rint.f16";
688    else if (type_size == 4)
689       name = "llvm.rint.f32";
690    else
691       name = "llvm.rint.f64";
692 
693    return ac_build_intrinsic(ctx, name, LLVMTypeOf(value), &value, 1, AC_FUNC_ATTR_READNONE);
694 }
695 
ac_build_fdiv(struct ac_llvm_context * ctx,LLVMValueRef num,LLVMValueRef den)696 LLVMValueRef ac_build_fdiv(struct ac_llvm_context *ctx, LLVMValueRef num, LLVMValueRef den)
697 {
698    unsigned type_size = ac_get_type_size(LLVMTypeOf(den));
699    const char *name;
700 
701    /* For doubles, we need precise division to pass GLCTS. */
702    if (ctx->float_mode == AC_FLOAT_MODE_DEFAULT_OPENGL && type_size == 8)
703       return LLVMBuildFDiv(ctx->builder, num, den, "");
704 
705    if (type_size == 2)
706       name = "llvm.amdgcn.rcp.f16";
707    else if (type_size == 4)
708       name = "llvm.amdgcn.rcp.f32";
709    else
710       name = "llvm.amdgcn.rcp.f64";
711 
712    LLVMValueRef rcp =
713       ac_build_intrinsic(ctx, name, LLVMTypeOf(den), &den, 1, AC_FUNC_ATTR_READNONE);
714 
715    return LLVMBuildFMul(ctx->builder, num, rcp, "");
716 }
717 
718 /* See fast_idiv_by_const.h. */
719 /* Set: increment = util_fast_udiv_info::increment ? multiplier : 0; */
ac_build_fast_udiv(struct ac_llvm_context * ctx,LLVMValueRef num,LLVMValueRef multiplier,LLVMValueRef pre_shift,LLVMValueRef post_shift,LLVMValueRef increment)720 LLVMValueRef ac_build_fast_udiv(struct ac_llvm_context *ctx, LLVMValueRef num,
721                                 LLVMValueRef multiplier, LLVMValueRef pre_shift,
722                                 LLVMValueRef post_shift, LLVMValueRef increment)
723 {
724    LLVMBuilderRef builder = ctx->builder;
725 
726    num = LLVMBuildLShr(builder, num, pre_shift, "");
727    num = LLVMBuildMul(builder, LLVMBuildZExt(builder, num, ctx->i64, ""),
728                       LLVMBuildZExt(builder, multiplier, ctx->i64, ""), "");
729    num = LLVMBuildAdd(builder, num, LLVMBuildZExt(builder, increment, ctx->i64, ""), "");
730    num = LLVMBuildLShr(builder, num, LLVMConstInt(ctx->i64, 32, 0), "");
731    num = LLVMBuildTrunc(builder, num, ctx->i32, "");
732    return LLVMBuildLShr(builder, num, post_shift, "");
733 }
734 
735 /* See fast_idiv_by_const.h. */
736 /* If num != UINT_MAX, this more efficient version can be used. */
737 /* Set: increment = util_fast_udiv_info::increment; */
ac_build_fast_udiv_nuw(struct ac_llvm_context * ctx,LLVMValueRef num,LLVMValueRef multiplier,LLVMValueRef pre_shift,LLVMValueRef post_shift,LLVMValueRef increment)738 LLVMValueRef ac_build_fast_udiv_nuw(struct ac_llvm_context *ctx, LLVMValueRef num,
739                                     LLVMValueRef multiplier, LLVMValueRef pre_shift,
740                                     LLVMValueRef post_shift, LLVMValueRef increment)
741 {
742    LLVMBuilderRef builder = ctx->builder;
743 
744    num = LLVMBuildLShr(builder, num, pre_shift, "");
745    num = LLVMBuildNUWAdd(builder, num, increment, "");
746    num = LLVMBuildMul(builder, LLVMBuildZExt(builder, num, ctx->i64, ""),
747                       LLVMBuildZExt(builder, multiplier, ctx->i64, ""), "");
748    num = LLVMBuildLShr(builder, num, LLVMConstInt(ctx->i64, 32, 0), "");
749    num = LLVMBuildTrunc(builder, num, ctx->i32, "");
750    return LLVMBuildLShr(builder, num, post_shift, "");
751 }
752 
753 /* See fast_idiv_by_const.h. */
754 /* Both operands must fit in 31 bits and the divisor must not be 1. */
ac_build_fast_udiv_u31_d_not_one(struct ac_llvm_context * ctx,LLVMValueRef num,LLVMValueRef multiplier,LLVMValueRef post_shift)755 LLVMValueRef ac_build_fast_udiv_u31_d_not_one(struct ac_llvm_context *ctx, LLVMValueRef num,
756                                               LLVMValueRef multiplier, LLVMValueRef post_shift)
757 {
758    LLVMBuilderRef builder = ctx->builder;
759 
760    num = LLVMBuildMul(builder, LLVMBuildZExt(builder, num, ctx->i64, ""),
761                       LLVMBuildZExt(builder, multiplier, ctx->i64, ""), "");
762    num = LLVMBuildLShr(builder, num, LLVMConstInt(ctx->i64, 32, 0), "");
763    num = LLVMBuildTrunc(builder, num, ctx->i32, "");
764    return LLVMBuildLShr(builder, num, post_shift, "");
765 }
766 
767 /* Coordinates for cube map selection. sc, tc, and ma are as in Table 8.27
768  * of the OpenGL 4.5 (Compatibility Profile) specification, except ma is
769  * already multiplied by two. id is the cube face number.
770  */
771 struct cube_selection_coords {
772    LLVMValueRef stc[2];
773    LLVMValueRef ma;
774    LLVMValueRef id;
775 };
776 
build_cube_intrinsic(struct ac_llvm_context * ctx,LLVMValueRef in[3],struct cube_selection_coords * out)777 static void build_cube_intrinsic(struct ac_llvm_context *ctx, LLVMValueRef in[3],
778                                  struct cube_selection_coords *out)
779 {
780    LLVMTypeRef f32 = ctx->f32;
781 
782    out->stc[1] = ac_build_intrinsic(ctx, "llvm.amdgcn.cubetc", f32, in, 3, AC_FUNC_ATTR_READNONE);
783    out->stc[0] = ac_build_intrinsic(ctx, "llvm.amdgcn.cubesc", f32, in, 3, AC_FUNC_ATTR_READNONE);
784    out->ma = ac_build_intrinsic(ctx, "llvm.amdgcn.cubema", f32, in, 3, AC_FUNC_ATTR_READNONE);
785    out->id = ac_build_intrinsic(ctx, "llvm.amdgcn.cubeid", f32, in, 3, AC_FUNC_ATTR_READNONE);
786 }
787 
788 /**
789  * Build a manual selection sequence for cube face sc/tc coordinates and
790  * major axis vector (multiplied by 2 for consistency) for the given
791  * vec3 \p coords, for the face implied by \p selcoords.
792  *
793  * For the major axis, we always adjust the sign to be in the direction of
794  * selcoords.ma; i.e., a positive out_ma means that coords is pointed towards
795  * the selcoords major axis.
796  */
build_cube_select(struct ac_llvm_context * ctx,const struct cube_selection_coords * selcoords,const LLVMValueRef * coords,LLVMValueRef * out_st,LLVMValueRef * out_ma)797 static void build_cube_select(struct ac_llvm_context *ctx,
798                               const struct cube_selection_coords *selcoords,
799                               const LLVMValueRef *coords, LLVMValueRef *out_st,
800                               LLVMValueRef *out_ma)
801 {
802    LLVMBuilderRef builder = ctx->builder;
803    LLVMTypeRef f32 = LLVMTypeOf(coords[0]);
804    LLVMValueRef is_ma_positive;
805    LLVMValueRef sgn_ma;
806    LLVMValueRef is_ma_z, is_not_ma_z;
807    LLVMValueRef is_ma_y;
808    LLVMValueRef is_ma_x;
809    LLVMValueRef sgn;
810    LLVMValueRef tmp;
811 
812    is_ma_positive = LLVMBuildFCmp(builder, LLVMRealUGE, selcoords->ma, LLVMConstReal(f32, 0.0), "");
813    sgn_ma = LLVMBuildSelect(builder, is_ma_positive, LLVMConstReal(f32, 1.0),
814                             LLVMConstReal(f32, -1.0), "");
815 
816    is_ma_z = LLVMBuildFCmp(builder, LLVMRealUGE, selcoords->id, LLVMConstReal(f32, 4.0), "");
817    is_not_ma_z = LLVMBuildNot(builder, is_ma_z, "");
818    is_ma_y = LLVMBuildAnd(
819       builder, is_not_ma_z,
820       LLVMBuildFCmp(builder, LLVMRealUGE, selcoords->id, LLVMConstReal(f32, 2.0), ""), "");
821    is_ma_x = LLVMBuildAnd(builder, is_not_ma_z, LLVMBuildNot(builder, is_ma_y, ""), "");
822 
823    /* Select sc */
824    tmp = LLVMBuildSelect(builder, is_ma_x, coords[2], coords[0], "");
825    sgn = LLVMBuildSelect(
826       builder, is_ma_y, LLVMConstReal(f32, 1.0),
827       LLVMBuildSelect(builder, is_ma_z, sgn_ma, LLVMBuildFNeg(builder, sgn_ma, ""), ""), "");
828    out_st[0] = LLVMBuildFMul(builder, tmp, sgn, "");
829 
830    /* Select tc */
831    tmp = LLVMBuildSelect(builder, is_ma_y, coords[2], coords[1], "");
832    sgn = LLVMBuildSelect(builder, is_ma_y, sgn_ma, LLVMConstReal(f32, -1.0), "");
833    out_st[1] = LLVMBuildFMul(builder, tmp, sgn, "");
834 
835    /* Select ma */
836    tmp = LLVMBuildSelect(builder, is_ma_z, coords[2],
837                          LLVMBuildSelect(builder, is_ma_y, coords[1], coords[0], ""), "");
838    tmp = ac_build_intrinsic(ctx, "llvm.fabs.f32", ctx->f32, &tmp, 1, AC_FUNC_ATTR_READNONE);
839    *out_ma = LLVMBuildFMul(builder, tmp, LLVMConstReal(f32, 2.0), "");
840 }
841 
ac_prepare_cube_coords(struct ac_llvm_context * ctx,bool is_deriv,bool is_array,bool is_lod,LLVMValueRef * coords_arg,LLVMValueRef * derivs_arg)842 void ac_prepare_cube_coords(struct ac_llvm_context *ctx, bool is_deriv, bool is_array, bool is_lod,
843                             LLVMValueRef *coords_arg, LLVMValueRef *derivs_arg)
844 {
845 
846    LLVMBuilderRef builder = ctx->builder;
847    struct cube_selection_coords selcoords;
848    LLVMValueRef coords[3];
849    LLVMValueRef invma;
850 
851    if (is_array && !is_lod) {
852       LLVMValueRef tmp = ac_build_round(ctx, coords_arg[3]);
853 
854       /* Section 8.9 (Texture Functions) of the GLSL 4.50 spec says:
855        *
856        *    "For Array forms, the array layer used will be
857        *
858        *       max(0, min(d−1, floor(layer+0.5)))
859        *
860        *     where d is the depth of the texture array and layer
861        *     comes from the component indicated in the tables below.
862        *     Workaroudn for an issue where the layer is taken from a
863        *     helper invocation which happens to fall on a different
864        *     layer due to extrapolation."
865        *
866        * GFX8 and earlier attempt to implement this in hardware by
867        * clamping the value of coords[2] = (8 * layer) + face.
868        * Unfortunately, this means that the we end up with the wrong
869        * face when clamping occurs.
870        *
871        * Clamp the layer earlier to work around the issue.
872        */
873       if (ctx->chip_class <= GFX8) {
874          LLVMValueRef ge0;
875          ge0 = LLVMBuildFCmp(builder, LLVMRealOGE, tmp, ctx->f32_0, "");
876          tmp = LLVMBuildSelect(builder, ge0, tmp, ctx->f32_0, "");
877       }
878 
879       coords_arg[3] = tmp;
880    }
881 
882    build_cube_intrinsic(ctx, coords_arg, &selcoords);
883 
884    invma =
885       ac_build_intrinsic(ctx, "llvm.fabs.f32", ctx->f32, &selcoords.ma, 1, AC_FUNC_ATTR_READNONE);
886    invma = ac_build_fdiv(ctx, LLVMConstReal(ctx->f32, 1.0), invma);
887 
888    for (int i = 0; i < 2; ++i)
889       coords[i] = LLVMBuildFMul(builder, selcoords.stc[i], invma, "");
890 
891    coords[2] = selcoords.id;
892 
893    if (is_deriv && derivs_arg) {
894       LLVMValueRef derivs[4];
895       int axis;
896 
897       /* Convert cube derivatives to 2D derivatives. */
898       for (axis = 0; axis < 2; axis++) {
899          LLVMValueRef deriv_st[2];
900          LLVMValueRef deriv_ma;
901 
902          /* Transform the derivative alongside the texture
903           * coordinate. Mathematically, the correct formula is
904           * as follows. Assume we're projecting onto the +Z face
905           * and denote by dx/dh the derivative of the (original)
906           * X texture coordinate with respect to horizontal
907           * window coordinates. The projection onto the +Z face
908           * plane is:
909           *
910           *   f(x,z) = x/z
911           *
912           * Then df/dh = df/dx * dx/dh + df/dz * dz/dh
913           *            = 1/z * dx/dh - x/z * 1/z * dz/dh.
914           *
915           * This motivatives the implementation below.
916           *
917           * Whether this actually gives the expected results for
918           * apps that might feed in derivatives obtained via
919           * finite differences is anyone's guess. The OpenGL spec
920           * seems awfully quiet about how textureGrad for cube
921           * maps should be handled.
922           */
923          build_cube_select(ctx, &selcoords, &derivs_arg[axis * 3], deriv_st, &deriv_ma);
924 
925          deriv_ma = LLVMBuildFMul(builder, deriv_ma, invma, "");
926 
927          for (int i = 0; i < 2; ++i)
928             derivs[axis * 2 + i] =
929                LLVMBuildFSub(builder, LLVMBuildFMul(builder, deriv_st[i], invma, ""),
930                              LLVMBuildFMul(builder, deriv_ma, coords[i], ""), "");
931       }
932 
933       memcpy(derivs_arg, derivs, sizeof(derivs));
934    }
935 
936    /* Shift the texture coordinate. This must be applied after the
937     * derivative calculation.
938     */
939    for (int i = 0; i < 2; ++i)
940       coords[i] = LLVMBuildFAdd(builder, coords[i], LLVMConstReal(ctx->f32, 1.5), "");
941 
942    if (is_array) {
943       /* for cube arrays coord.z = coord.w(array_index) * 8 + face */
944       /* coords_arg.w component - array_index for cube arrays */
945       coords[2] = ac_build_fmad(ctx, coords_arg[3], LLVMConstReal(ctx->f32, 8.0), coords[2]);
946    }
947 
948    memcpy(coords_arg, coords, sizeof(coords));
949 }
950 
ac_build_fs_interp(struct ac_llvm_context * ctx,LLVMValueRef llvm_chan,LLVMValueRef attr_number,LLVMValueRef params,LLVMValueRef i,LLVMValueRef j)951 LLVMValueRef ac_build_fs_interp(struct ac_llvm_context *ctx, LLVMValueRef llvm_chan,
952                                 LLVMValueRef attr_number, LLVMValueRef params, LLVMValueRef i,
953                                 LLVMValueRef j)
954 {
955    LLVMValueRef args[5];
956    LLVMValueRef p1;
957 
958    args[0] = i;
959    args[1] = llvm_chan;
960    args[2] = attr_number;
961    args[3] = params;
962 
963    p1 = ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p1", ctx->f32, args, 4, AC_FUNC_ATTR_READNONE);
964 
965    args[0] = p1;
966    args[1] = j;
967    args[2] = llvm_chan;
968    args[3] = attr_number;
969    args[4] = params;
970 
971    return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p2", ctx->f32, args, 5,
972                              AC_FUNC_ATTR_READNONE);
973 }
974 
ac_build_fs_interp_f16(struct ac_llvm_context * ctx,LLVMValueRef llvm_chan,LLVMValueRef attr_number,LLVMValueRef params,LLVMValueRef i,LLVMValueRef j,bool high_16bits)975 LLVMValueRef ac_build_fs_interp_f16(struct ac_llvm_context *ctx, LLVMValueRef llvm_chan,
976                                     LLVMValueRef attr_number, LLVMValueRef params, LLVMValueRef i,
977                                     LLVMValueRef j, bool high_16bits)
978 {
979    LLVMValueRef args[6];
980    LLVMValueRef p1;
981 
982    args[0] = i;
983    args[1] = llvm_chan;
984    args[2] = attr_number;
985    args[3] = high_16bits ? ctx->i1true : ctx->i1false;
986    args[4] = params;
987 
988    p1 = ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p1.f16", ctx->f32, args, 5,
989                            AC_FUNC_ATTR_READNONE);
990 
991    args[0] = p1;
992    args[1] = j;
993    args[2] = llvm_chan;
994    args[3] = attr_number;
995    args[4] = high_16bits ? ctx->i1true : ctx->i1false;
996    args[5] = params;
997 
998    return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p2.f16", ctx->f16, args, 6,
999                              AC_FUNC_ATTR_READNONE);
1000 }
1001 
ac_build_fs_interp_mov(struct ac_llvm_context * ctx,LLVMValueRef parameter,LLVMValueRef llvm_chan,LLVMValueRef attr_number,LLVMValueRef params)1002 LLVMValueRef ac_build_fs_interp_mov(struct ac_llvm_context *ctx, LLVMValueRef parameter,
1003                                     LLVMValueRef llvm_chan, LLVMValueRef attr_number,
1004                                     LLVMValueRef params)
1005 {
1006    LLVMValueRef args[4];
1007 
1008    args[0] = parameter;
1009    args[1] = llvm_chan;
1010    args[2] = attr_number;
1011    args[3] = params;
1012 
1013    return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.mov", ctx->f32, args, 4,
1014                              AC_FUNC_ATTR_READNONE);
1015 }
1016 
ac_build_gep_ptr(struct ac_llvm_context * ctx,LLVMValueRef base_ptr,LLVMValueRef index)1017 LLVMValueRef ac_build_gep_ptr(struct ac_llvm_context *ctx, LLVMValueRef base_ptr,
1018                               LLVMValueRef index)
1019 {
1020    return LLVMBuildGEP(ctx->builder, base_ptr, &index, 1, "");
1021 }
1022 
ac_build_gep0(struct ac_llvm_context * ctx,LLVMValueRef base_ptr,LLVMValueRef index)1023 LLVMValueRef ac_build_gep0(struct ac_llvm_context *ctx, LLVMValueRef base_ptr, LLVMValueRef index)
1024 {
1025    LLVMValueRef indices[2] = {
1026       ctx->i32_0,
1027       index,
1028    };
1029    return LLVMBuildGEP(ctx->builder, base_ptr, indices, 2, "");
1030 }
1031 
ac_build_pointer_add(struct ac_llvm_context * ctx,LLVMValueRef ptr,LLVMValueRef index)1032 LLVMValueRef ac_build_pointer_add(struct ac_llvm_context *ctx, LLVMValueRef ptr, LLVMValueRef index)
1033 {
1034    return LLVMBuildPointerCast(ctx->builder, LLVMBuildGEP(ctx->builder, ptr, &index, 1, ""),
1035                                LLVMTypeOf(ptr), "");
1036 }
1037 
ac_build_indexed_store(struct ac_llvm_context * ctx,LLVMValueRef base_ptr,LLVMValueRef index,LLVMValueRef value)1038 void ac_build_indexed_store(struct ac_llvm_context *ctx, LLVMValueRef base_ptr, LLVMValueRef index,
1039                             LLVMValueRef value)
1040 {
1041    LLVMBuildStore(ctx->builder, value, ac_build_gep0(ctx, base_ptr, index));
1042 }
1043 
1044 /**
1045  * Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad.
1046  * It's equivalent to doing a load from &base_ptr[index].
1047  *
1048  * \param base_ptr  Where the array starts.
1049  * \param index     The element index into the array.
1050  * \param uniform   Whether the base_ptr and index can be assumed to be
1051  *                  dynamically uniform (i.e. load to an SGPR)
1052  * \param invariant Whether the load is invariant (no other opcodes affect it)
1053  * \param no_unsigned_wraparound
1054  *    For all possible re-associations and re-distributions of an expression
1055  *    "base_ptr + index * elemsize" into "addr + offset" (excluding GEPs
1056  *    without inbounds in base_ptr), this parameter is true if "addr + offset"
1057  *    does not result in an unsigned integer wraparound. This is used for
1058  *    optimal code generation of 32-bit pointer arithmetic.
1059  *
1060  *    For example, a 32-bit immediate offset that causes a 32-bit unsigned
1061  *    integer wraparound can't be an imm offset in s_load_dword, because
1062  *    the instruction performs "addr + offset" in 64 bits.
1063  *
1064  *    Expected usage for bindless textures by chaining GEPs:
1065  *      // possible unsigned wraparound, don't use InBounds:
1066  *      ptr1 = LLVMBuildGEP(base_ptr, index);
1067  *      image = load(ptr1); // becomes "s_load ptr1, 0"
1068  *
1069  *      ptr2 = LLVMBuildInBoundsGEP(ptr1, 32 / elemsize);
1070  *      sampler = load(ptr2); // becomes "s_load ptr1, 32" thanks to InBounds
1071  */
ac_build_load_custom(struct ac_llvm_context * ctx,LLVMValueRef base_ptr,LLVMValueRef index,bool uniform,bool invariant,bool no_unsigned_wraparound)1072 static LLVMValueRef ac_build_load_custom(struct ac_llvm_context *ctx, LLVMValueRef base_ptr,
1073                                          LLVMValueRef index, bool uniform, bool invariant,
1074                                          bool no_unsigned_wraparound)
1075 {
1076    LLVMValueRef pointer, result;
1077 
1078    if (no_unsigned_wraparound &&
1079        LLVMGetPointerAddressSpace(LLVMTypeOf(base_ptr)) == AC_ADDR_SPACE_CONST_32BIT)
1080       pointer = LLVMBuildInBoundsGEP(ctx->builder, base_ptr, &index, 1, "");
1081    else
1082       pointer = LLVMBuildGEP(ctx->builder, base_ptr, &index, 1, "");
1083 
1084    if (uniform)
1085       LLVMSetMetadata(pointer, ctx->uniform_md_kind, ctx->empty_md);
1086    result = LLVMBuildLoad(ctx->builder, pointer, "");
1087    if (invariant)
1088       LLVMSetMetadata(result, ctx->invariant_load_md_kind, ctx->empty_md);
1089    LLVMSetAlignment(result, 4);
1090    return result;
1091 }
1092 
ac_build_load(struct ac_llvm_context * ctx,LLVMValueRef base_ptr,LLVMValueRef index)1093 LLVMValueRef ac_build_load(struct ac_llvm_context *ctx, LLVMValueRef base_ptr, LLVMValueRef index)
1094 {
1095    return ac_build_load_custom(ctx, base_ptr, index, false, false, false);
1096 }
1097 
ac_build_load_invariant(struct ac_llvm_context * ctx,LLVMValueRef base_ptr,LLVMValueRef index)1098 LLVMValueRef ac_build_load_invariant(struct ac_llvm_context *ctx, LLVMValueRef base_ptr,
1099                                      LLVMValueRef index)
1100 {
1101    return ac_build_load_custom(ctx, base_ptr, index, false, true, false);
1102 }
1103 
1104 /* This assumes that there is no unsigned integer wraparound during the address
1105  * computation, excluding all GEPs within base_ptr. */
ac_build_load_to_sgpr(struct ac_llvm_context * ctx,LLVMValueRef base_ptr,LLVMValueRef index)1106 LLVMValueRef ac_build_load_to_sgpr(struct ac_llvm_context *ctx, LLVMValueRef base_ptr,
1107                                    LLVMValueRef index)
1108 {
1109    return ac_build_load_custom(ctx, base_ptr, index, true, true, true);
1110 }
1111 
1112 /* See ac_build_load_custom() documentation. */
ac_build_load_to_sgpr_uint_wraparound(struct ac_llvm_context * ctx,LLVMValueRef base_ptr,LLVMValueRef index)1113 LLVMValueRef ac_build_load_to_sgpr_uint_wraparound(struct ac_llvm_context *ctx,
1114                                                    LLVMValueRef base_ptr, LLVMValueRef index)
1115 {
1116    return ac_build_load_custom(ctx, base_ptr, index, true, true, false);
1117 }
1118 
get_load_cache_policy(struct ac_llvm_context * ctx,unsigned cache_policy)1119 static unsigned get_load_cache_policy(struct ac_llvm_context *ctx, unsigned cache_policy)
1120 {
1121    return cache_policy | (ctx->chip_class >= GFX10 && cache_policy & ac_glc ? ac_dlc : 0);
1122 }
1123 
ac_build_buffer_store_common(struct ac_llvm_context * ctx,LLVMValueRef rsrc,LLVMValueRef data,LLVMValueRef vindex,LLVMValueRef voffset,LLVMValueRef soffset,unsigned cache_policy,bool use_format)1124 static void ac_build_buffer_store_common(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
1125                                          LLVMValueRef data, LLVMValueRef vindex,
1126                                          LLVMValueRef voffset, LLVMValueRef soffset,
1127                                          unsigned cache_policy, bool use_format)
1128 {
1129    LLVMValueRef args[6];
1130    int idx = 0;
1131    args[idx++] = data;
1132    args[idx++] = LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, "");
1133    if (vindex)
1134       args[idx++] = vindex ? vindex : ctx->i32_0;
1135    args[idx++] = voffset ? voffset : ctx->i32_0;
1136    args[idx++] = soffset ? soffset : ctx->i32_0;
1137    args[idx++] = LLVMConstInt(ctx->i32, cache_policy, 0);
1138    const char *indexing_kind = vindex ? "struct" : "raw";
1139    char name[256], type_name[8];
1140 
1141    ac_build_type_name_for_intr(LLVMTypeOf(data), type_name, sizeof(type_name));
1142 
1143    if (use_format) {
1144       snprintf(name, sizeof(name), "llvm.amdgcn.%s.buffer.store.format.%s", indexing_kind,
1145                type_name);
1146    } else {
1147       snprintf(name, sizeof(name), "llvm.amdgcn.%s.buffer.store.%s", indexing_kind, type_name);
1148    }
1149 
1150    ac_build_intrinsic(ctx, name, ctx->voidt, args, idx, AC_FUNC_ATTR_INACCESSIBLE_MEM_ONLY);
1151 }
1152 
ac_build_buffer_store_format(struct ac_llvm_context * ctx,LLVMValueRef rsrc,LLVMValueRef data,LLVMValueRef vindex,LLVMValueRef voffset,unsigned cache_policy)1153 void ac_build_buffer_store_format(struct ac_llvm_context *ctx, LLVMValueRef rsrc, LLVMValueRef data,
1154                                   LLVMValueRef vindex, LLVMValueRef voffset, unsigned cache_policy)
1155 {
1156    ac_build_buffer_store_common(ctx, rsrc, data, vindex, voffset, NULL, cache_policy, true);
1157 }
1158 
1159 /* buffer_store_dword(,x2,x3,x4) <- the suffix is selected by the type of vdata. */
ac_build_buffer_store_dword(struct ac_llvm_context * ctx,LLVMValueRef rsrc,LLVMValueRef vdata,LLVMValueRef vindex,LLVMValueRef voffset,LLVMValueRef soffset,unsigned inst_offset,unsigned cache_policy)1160 void ac_build_buffer_store_dword(struct ac_llvm_context *ctx, LLVMValueRef rsrc, LLVMValueRef vdata,
1161                                  LLVMValueRef vindex, LLVMValueRef voffset, LLVMValueRef soffset,
1162                                  unsigned inst_offset, unsigned cache_policy)
1163 {
1164    unsigned num_channels = ac_get_llvm_num_components(vdata);
1165 
1166    /* Split 3 channel stores if unsupported. */
1167    if (num_channels == 3 && !ac_has_vec3_support(ctx->chip_class, false)) {
1168       LLVMValueRef v[3], v01;
1169 
1170       for (int i = 0; i < 3; i++) {
1171          v[i] = LLVMBuildExtractElement(ctx->builder, vdata, LLVMConstInt(ctx->i32, i, 0), "");
1172       }
1173       v01 = ac_build_gather_values(ctx, v, 2);
1174 
1175       ac_build_buffer_store_dword(ctx, rsrc, v01, vindex, voffset, soffset, inst_offset, cache_policy);
1176       ac_build_buffer_store_dword(ctx, rsrc, v[2], vindex, voffset, soffset, inst_offset + 8,
1177                                   cache_policy);
1178       return;
1179    }
1180 
1181    /* SWIZZLE_ENABLE requires that soffset isn't folded into voffset
1182     * (voffset is swizzled, but soffset isn't swizzled).
1183     * llvm.amdgcn.buffer.store doesn't have a separate soffset parameter.
1184     */
1185    if (!(cache_policy & ac_swizzled)) {
1186       LLVMValueRef offset = soffset;
1187 
1188       if (inst_offset)
1189          offset = LLVMBuildAdd(ctx->builder, offset, LLVMConstInt(ctx->i32, inst_offset, 0), "");
1190 
1191       ac_build_buffer_store_common(ctx, rsrc, ac_to_float(ctx, vdata), vindex, voffset, offset,
1192                                    cache_policy, false);
1193       return;
1194    }
1195 
1196    static const unsigned dfmts[] = {V_008F0C_BUF_DATA_FORMAT_32, V_008F0C_BUF_DATA_FORMAT_32_32,
1197                                     V_008F0C_BUF_DATA_FORMAT_32_32_32,
1198                                     V_008F0C_BUF_DATA_FORMAT_32_32_32_32};
1199    unsigned dfmt = dfmts[num_channels - 1];
1200    unsigned nfmt = V_008F0C_BUF_NUM_FORMAT_UINT;
1201    LLVMValueRef immoffset = LLVMConstInt(ctx->i32, inst_offset, 0);
1202 
1203    ac_build_tbuffer_store(ctx, rsrc, vdata, vindex, voffset, soffset, immoffset, num_channels, dfmt,
1204                           nfmt, cache_policy);
1205 }
1206 
ac_build_buffer_load_common(struct ac_llvm_context * ctx,LLVMValueRef rsrc,LLVMValueRef vindex,LLVMValueRef voffset,LLVMValueRef soffset,unsigned num_channels,LLVMTypeRef channel_type,unsigned cache_policy,bool can_speculate,bool use_format,bool structurized)1207 static LLVMValueRef ac_build_buffer_load_common(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
1208                                                 LLVMValueRef vindex, LLVMValueRef voffset,
1209                                                 LLVMValueRef soffset, unsigned num_channels,
1210                                                 LLVMTypeRef channel_type, unsigned cache_policy,
1211                                                 bool can_speculate, bool use_format,
1212                                                 bool structurized)
1213 {
1214    LLVMValueRef args[5];
1215    int idx = 0;
1216    args[idx++] = LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, "");
1217    if (structurized)
1218       args[idx++] = vindex ? vindex : ctx->i32_0;
1219    args[idx++] = voffset ? voffset : ctx->i32_0;
1220    args[idx++] = soffset ? soffset : ctx->i32_0;
1221    args[idx++] = LLVMConstInt(ctx->i32, get_load_cache_policy(ctx, cache_policy), 0);
1222    unsigned func =
1223       !ac_has_vec3_support(ctx->chip_class, use_format) && num_channels == 3 ? 4 : num_channels;
1224    const char *indexing_kind = structurized ? "struct" : "raw";
1225    char name[256], type_name[8];
1226 
1227    /* D16 is only supported on gfx8+ */
1228    assert(!use_format || (channel_type != ctx->f16 && channel_type != ctx->i16) ||
1229           ctx->chip_class >= GFX8);
1230 
1231    LLVMTypeRef type = func > 1 ? LLVMVectorType(channel_type, func) : channel_type;
1232    ac_build_type_name_for_intr(type, type_name, sizeof(type_name));
1233 
1234    if (use_format) {
1235       snprintf(name, sizeof(name), "llvm.amdgcn.%s.buffer.load.format.%s", indexing_kind,
1236                type_name);
1237    } else {
1238       snprintf(name, sizeof(name), "llvm.amdgcn.%s.buffer.load.%s", indexing_kind, type_name);
1239    }
1240 
1241    return ac_build_intrinsic(ctx, name, type, args, idx, ac_get_load_intr_attribs(can_speculate));
1242 }
1243 
ac_build_buffer_load(struct ac_llvm_context * ctx,LLVMValueRef rsrc,int num_channels,LLVMValueRef vindex,LLVMValueRef voffset,LLVMValueRef soffset,unsigned inst_offset,LLVMTypeRef channel_type,unsigned cache_policy,bool can_speculate,bool allow_smem)1244 LLVMValueRef ac_build_buffer_load(struct ac_llvm_context *ctx, LLVMValueRef rsrc, int num_channels,
1245                                   LLVMValueRef vindex, LLVMValueRef voffset, LLVMValueRef soffset,
1246                                   unsigned inst_offset, LLVMTypeRef channel_type,
1247                                   unsigned cache_policy, bool can_speculate, bool allow_smem)
1248 {
1249    LLVMValueRef offset = LLVMConstInt(ctx->i32, inst_offset, 0);
1250    if (voffset)
1251       offset = LLVMBuildAdd(ctx->builder, offset, voffset, "");
1252    if (soffset)
1253       offset = LLVMBuildAdd(ctx->builder, offset, soffset, "");
1254 
1255    if (allow_smem && !(cache_policy & ac_slc) &&
1256        (!(cache_policy & ac_glc) || ctx->chip_class >= GFX8)) {
1257       assert(vindex == NULL);
1258 
1259       LLVMValueRef result[8];
1260 
1261       for (int i = 0; i < num_channels; i++) {
1262          if (i) {
1263             offset = LLVMBuildAdd(ctx->builder, offset, LLVMConstInt(ctx->i32, 4, 0), "");
1264          }
1265          LLVMValueRef args[3] = {
1266             rsrc,
1267             offset,
1268             LLVMConstInt(ctx->i32, get_load_cache_policy(ctx, cache_policy), 0),
1269          };
1270          result[i] = ac_build_intrinsic(ctx, "llvm.amdgcn.s.buffer.load.f32", ctx->f32, args, 3,
1271                                         AC_FUNC_ATTR_READNONE);
1272       }
1273       if (num_channels == 1)
1274          return result[0];
1275 
1276       if (num_channels == 3 && !ac_has_vec3_support(ctx->chip_class, false))
1277          result[num_channels++] = LLVMGetUndef(ctx->f32);
1278       return ac_build_gather_values(ctx, result, num_channels);
1279    }
1280 
1281    return ac_build_buffer_load_common(ctx, rsrc, vindex, offset, ctx->i32_0, num_channels,
1282                                       channel_type, cache_policy, can_speculate, false, false);
1283 }
1284 
ac_build_buffer_load_format(struct ac_llvm_context * ctx,LLVMValueRef rsrc,LLVMValueRef vindex,LLVMValueRef voffset,unsigned num_channels,unsigned cache_policy,bool can_speculate,bool d16,bool tfe)1285 LLVMValueRef ac_build_buffer_load_format(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
1286                                          LLVMValueRef vindex, LLVMValueRef voffset,
1287                                          unsigned num_channels, unsigned cache_policy,
1288                                          bool can_speculate, bool d16, bool tfe)
1289 {
1290    if (tfe) {
1291       assert(!d16);
1292 
1293       char code[256];
1294       /* The definition in the assembly and the one in the constraint string
1295        * differs because of an assembler bug.
1296        */
1297       snprintf(code, sizeof(code),
1298                "v_mov_b32 v0, 0\n"
1299                "v_mov_b32 v1, 0\n"
1300                "v_mov_b32 v2, 0\n"
1301                "v_mov_b32 v3, 0\n"
1302                "v_mov_b32 v4, 0\n"
1303                "buffer_load_format_xyzw v[0:3], $1, $2, 0, idxen offen %s %s tfe %s\n"
1304                "s_waitcnt vmcnt(0)",
1305                cache_policy & ac_glc ? "glc" : "",
1306                cache_policy & ac_slc ? "slc" : "",
1307                cache_policy & ac_dlc ? "dlc" : "");
1308 
1309       LLVMTypeRef param_types[] = {ctx->v2i32, ctx->v4i32};
1310       LLVMTypeRef calltype = LLVMFunctionType(LLVMVectorType(ctx->f32, 5), param_types, 2, false);
1311       LLVMValueRef inlineasm = LLVMConstInlineAsm(calltype, code, "=&{v[0:4]},v,s", false, false);
1312 
1313       LLVMValueRef addr_comp[2] = {vindex ? vindex : ctx->i32_0,
1314                                    voffset ? voffset : ctx->i32_0};
1315 
1316       LLVMValueRef args[] = {ac_build_gather_values(ctx, addr_comp, 2),
1317                              LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, "")};
1318       LLVMValueRef res = LLVMBuildCall(ctx->builder, inlineasm, args, 2, "");
1319 
1320       return ac_build_concat(ctx, ac_trim_vector(ctx, res, num_channels),
1321                              ac_llvm_extract_elem(ctx, res, 4));
1322    }
1323 
1324    return ac_build_buffer_load_common(ctx, rsrc, vindex, voffset, ctx->i32_0, num_channels,
1325                                       d16 ? ctx->f16 : ctx->f32, cache_policy, can_speculate, true,
1326                                       true);
1327 }
1328 
ac_build_tbuffer_load(struct ac_llvm_context * ctx,LLVMValueRef rsrc,LLVMValueRef vindex,LLVMValueRef voffset,LLVMValueRef soffset,LLVMValueRef immoffset,unsigned num_channels,unsigned dfmt,unsigned nfmt,unsigned cache_policy,bool can_speculate,bool structurized)1329 static LLVMValueRef ac_build_tbuffer_load(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
1330                                           LLVMValueRef vindex, LLVMValueRef voffset,
1331                                           LLVMValueRef soffset, LLVMValueRef immoffset,
1332                                           unsigned num_channels, unsigned dfmt, unsigned nfmt,
1333                                           unsigned cache_policy, bool can_speculate,
1334                                           bool structurized)
1335 {
1336    voffset = LLVMBuildAdd(ctx->builder, voffset, immoffset, "");
1337 
1338    LLVMValueRef args[6];
1339    int idx = 0;
1340    args[idx++] = LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, "");
1341    if (structurized)
1342       args[idx++] = vindex ? vindex : ctx->i32_0;
1343    args[idx++] = voffset ? voffset : ctx->i32_0;
1344    args[idx++] = soffset ? soffset : ctx->i32_0;
1345    args[idx++] = LLVMConstInt(ctx->i32, ac_get_tbuffer_format(ctx->chip_class, dfmt, nfmt), 0);
1346    args[idx++] = LLVMConstInt(ctx->i32, get_load_cache_policy(ctx, cache_policy), 0);
1347    unsigned func =
1348       !ac_has_vec3_support(ctx->chip_class, true) && num_channels == 3 ? 4 : num_channels;
1349    const char *indexing_kind = structurized ? "struct" : "raw";
1350    char name[256], type_name[8];
1351 
1352    LLVMTypeRef type = func > 1 ? LLVMVectorType(ctx->i32, func) : ctx->i32;
1353    ac_build_type_name_for_intr(type, type_name, sizeof(type_name));
1354 
1355    snprintf(name, sizeof(name), "llvm.amdgcn.%s.tbuffer.load.%s", indexing_kind, type_name);
1356 
1357    return ac_build_intrinsic(ctx, name, type, args, idx, ac_get_load_intr_attribs(can_speculate));
1358 }
1359 
ac_build_struct_tbuffer_load(struct ac_llvm_context * ctx,LLVMValueRef rsrc,LLVMValueRef vindex,LLVMValueRef voffset,LLVMValueRef soffset,LLVMValueRef immoffset,unsigned num_channels,unsigned dfmt,unsigned nfmt,unsigned cache_policy,bool can_speculate)1360 LLVMValueRef ac_build_struct_tbuffer_load(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
1361                                           LLVMValueRef vindex, LLVMValueRef voffset,
1362                                           LLVMValueRef soffset, LLVMValueRef immoffset,
1363                                           unsigned num_channels, unsigned dfmt, unsigned nfmt,
1364                                           unsigned cache_policy, bool can_speculate)
1365 {
1366    return ac_build_tbuffer_load(ctx, rsrc, vindex, voffset, soffset, immoffset, num_channels, dfmt,
1367                                 nfmt, cache_policy, can_speculate, true);
1368 }
1369 
ac_build_tbuffer_load_short(struct ac_llvm_context * ctx,LLVMValueRef rsrc,LLVMValueRef voffset,LLVMValueRef soffset,LLVMValueRef immoffset,unsigned cache_policy)1370 LLVMValueRef ac_build_tbuffer_load_short(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
1371                                          LLVMValueRef voffset, LLVMValueRef soffset,
1372                                          LLVMValueRef immoffset, unsigned cache_policy)
1373 {
1374    voffset = LLVMBuildAdd(ctx->builder, voffset, immoffset, "");
1375 
1376    return ac_build_buffer_load_common(ctx, rsrc, NULL, voffset, soffset, 1, ctx->i16,
1377                                       cache_policy, false, false, false);
1378 }
1379 
ac_build_tbuffer_load_byte(struct ac_llvm_context * ctx,LLVMValueRef rsrc,LLVMValueRef voffset,LLVMValueRef soffset,LLVMValueRef immoffset,unsigned cache_policy)1380 LLVMValueRef ac_build_tbuffer_load_byte(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
1381                                         LLVMValueRef voffset, LLVMValueRef soffset,
1382                                         LLVMValueRef immoffset, unsigned cache_policy)
1383 {
1384    voffset = LLVMBuildAdd(ctx->builder, voffset, immoffset, "");
1385 
1386    return ac_build_buffer_load_common(ctx, rsrc, NULL, voffset, soffset, 1, ctx->i8, cache_policy,
1387                                       false, false, false);
1388 }
1389 
1390 /**
1391  * Convert an 11- or 10-bit unsigned floating point number to an f32.
1392  *
1393  * The input exponent is expected to be biased analogous to IEEE-754, i.e. by
1394  * 2^(exp_bits-1) - 1 (as defined in OpenGL and other graphics APIs).
1395  */
ac_ufN_to_float(struct ac_llvm_context * ctx,LLVMValueRef src,unsigned exp_bits,unsigned mant_bits)1396 static LLVMValueRef ac_ufN_to_float(struct ac_llvm_context *ctx, LLVMValueRef src,
1397                                     unsigned exp_bits, unsigned mant_bits)
1398 {
1399    assert(LLVMTypeOf(src) == ctx->i32);
1400 
1401    LLVMValueRef tmp;
1402    LLVMValueRef mantissa;
1403    mantissa =
1404       LLVMBuildAnd(ctx->builder, src, LLVMConstInt(ctx->i32, (1 << mant_bits) - 1, false), "");
1405 
1406    /* Converting normal numbers is just a shift + correcting the exponent bias */
1407    unsigned normal_shift = 23 - mant_bits;
1408    unsigned bias_shift = 127 - ((1 << (exp_bits - 1)) - 1);
1409    LLVMValueRef shifted, normal;
1410 
1411    shifted = LLVMBuildShl(ctx->builder, src, LLVMConstInt(ctx->i32, normal_shift, false), "");
1412    normal =
1413       LLVMBuildAdd(ctx->builder, shifted, LLVMConstInt(ctx->i32, bias_shift << 23, false), "");
1414 
1415    /* Converting nan/inf numbers is the same, but with a different exponent update */
1416    LLVMValueRef naninf;
1417    naninf = LLVMBuildOr(ctx->builder, normal, LLVMConstInt(ctx->i32, 0xff << 23, false), "");
1418 
1419    /* Converting denormals is the complex case: determine the leading zeros of the
1420     * mantissa to obtain the correct shift for the mantissa and exponent correction.
1421     */
1422    LLVMValueRef denormal;
1423    LLVMValueRef params[2] = {
1424       mantissa, ctx->i1true, /* result can be undef when arg is 0 */
1425    };
1426    LLVMValueRef ctlz =
1427       ac_build_intrinsic(ctx, "llvm.ctlz.i32", ctx->i32, params, 2, AC_FUNC_ATTR_READNONE);
1428 
1429    /* Shift such that the leading 1 ends up as the LSB of the exponent field. */
1430    tmp = LLVMBuildSub(ctx->builder, ctlz, LLVMConstInt(ctx->i32, 8, false), "");
1431    denormal = LLVMBuildShl(ctx->builder, mantissa, tmp, "");
1432 
1433    unsigned denormal_exp = bias_shift + (32 - mant_bits) - 1;
1434    tmp = LLVMBuildSub(ctx->builder, LLVMConstInt(ctx->i32, denormal_exp, false), ctlz, "");
1435    tmp = LLVMBuildShl(ctx->builder, tmp, LLVMConstInt(ctx->i32, 23, false), "");
1436    denormal = LLVMBuildAdd(ctx->builder, denormal, tmp, "");
1437 
1438    /* Select the final result. */
1439    LLVMValueRef result;
1440 
1441    tmp = LLVMBuildICmp(ctx->builder, LLVMIntUGE, src,
1442                        LLVMConstInt(ctx->i32, ((1ULL << exp_bits) - 1) << mant_bits, false), "");
1443    result = LLVMBuildSelect(ctx->builder, tmp, naninf, normal, "");
1444 
1445    tmp = LLVMBuildICmp(ctx->builder, LLVMIntUGE, src,
1446                        LLVMConstInt(ctx->i32, 1ULL << mant_bits, false), "");
1447    result = LLVMBuildSelect(ctx->builder, tmp, result, denormal, "");
1448 
1449    tmp = LLVMBuildICmp(ctx->builder, LLVMIntNE, src, ctx->i32_0, "");
1450    result = LLVMBuildSelect(ctx->builder, tmp, result, ctx->i32_0, "");
1451 
1452    return ac_to_float(ctx, result);
1453 }
1454 
1455 /**
1456  * Generate a fully general open coded buffer format fetch with all required
1457  * fixups suitable for vertex fetch, using non-format buffer loads.
1458  *
1459  * Some combinations of argument values have special interpretations:
1460  * - size = 8 bytes, format = fixed indicates PIPE_FORMAT_R11G11B10_FLOAT
1461  * - size = 8 bytes, format != {float,fixed} indicates a 2_10_10_10 data format
1462  *
1463  * \param log_size log(size of channel in bytes)
1464  * \param num_channels number of channels (1 to 4)
1465  * \param format AC_FETCH_FORMAT_xxx value
1466  * \param reverse whether XYZ channels are reversed
1467  * \param known_aligned whether the source is known to be aligned to hardware's
1468  *                      effective element size for loading the given format
1469  *                      (note: this means dword alignment for 8_8_8_8, 16_16, etc.)
1470  * \param rsrc buffer resource descriptor
1471  * \return the resulting vector of floats or integers bitcast to <4 x i32>
1472  */
ac_build_opencoded_load_format(struct ac_llvm_context * ctx,unsigned log_size,unsigned num_channels,unsigned format,bool reverse,bool known_aligned,LLVMValueRef rsrc,LLVMValueRef vindex,LLVMValueRef voffset,LLVMValueRef soffset,unsigned cache_policy,bool can_speculate)1473 LLVMValueRef ac_build_opencoded_load_format(struct ac_llvm_context *ctx, unsigned log_size,
1474                                             unsigned num_channels, unsigned format, bool reverse,
1475                                             bool known_aligned, LLVMValueRef rsrc,
1476                                             LLVMValueRef vindex, LLVMValueRef voffset,
1477                                             LLVMValueRef soffset, unsigned cache_policy,
1478                                             bool can_speculate)
1479 {
1480    LLVMValueRef tmp;
1481    unsigned load_log_size = log_size;
1482    unsigned load_num_channels = num_channels;
1483    if (log_size == 3) {
1484       load_log_size = 2;
1485       if (format == AC_FETCH_FORMAT_FLOAT) {
1486          load_num_channels = 2 * num_channels;
1487       } else {
1488          load_num_channels = 1; /* 10_11_11 or 2_10_10_10 */
1489       }
1490    }
1491 
1492    int log_recombine = 0;
1493    if ((ctx->chip_class == GFX6 || ctx->chip_class >= GFX10) && !known_aligned) {
1494       /* Avoid alignment restrictions by loading one byte at a time. */
1495       load_num_channels <<= load_log_size;
1496       log_recombine = load_log_size;
1497       load_log_size = 0;
1498    } else if (load_num_channels == 2 || load_num_channels == 4) {
1499       log_recombine = -util_logbase2(load_num_channels);
1500       load_num_channels = 1;
1501       load_log_size += -log_recombine;
1502    }
1503 
1504    LLVMValueRef loads[32]; /* up to 32 bytes */
1505    for (unsigned i = 0; i < load_num_channels; ++i) {
1506       tmp =
1507          LLVMBuildAdd(ctx->builder, soffset, LLVMConstInt(ctx->i32, i << load_log_size, false), "");
1508       LLVMTypeRef channel_type =
1509          load_log_size == 0 ? ctx->i8 : load_log_size == 1 ? ctx->i16 : ctx->i32;
1510       unsigned num_channels = 1 << (MAX2(load_log_size, 2) - 2);
1511       loads[i] =
1512          ac_build_buffer_load_common(ctx, rsrc, vindex, voffset, tmp, num_channels, channel_type,
1513                                      cache_policy, can_speculate, false, true);
1514       if (load_log_size >= 2)
1515          loads[i] = ac_to_integer(ctx, loads[i]);
1516    }
1517 
1518    if (log_recombine > 0) {
1519       /* Recombine bytes if necessary (GFX6 only) */
1520       LLVMTypeRef dst_type = log_recombine == 2 ? ctx->i32 : ctx->i16;
1521 
1522       for (unsigned src = 0, dst = 0; src < load_num_channels; ++dst) {
1523          LLVMValueRef accum = NULL;
1524          for (unsigned i = 0; i < (1 << log_recombine); ++i, ++src) {
1525             tmp = LLVMBuildZExt(ctx->builder, loads[src], dst_type, "");
1526             if (i == 0) {
1527                accum = tmp;
1528             } else {
1529                tmp = LLVMBuildShl(ctx->builder, tmp, LLVMConstInt(dst_type, 8 * i, false), "");
1530                accum = LLVMBuildOr(ctx->builder, accum, tmp, "");
1531             }
1532          }
1533          loads[dst] = accum;
1534       }
1535    } else if (log_recombine < 0) {
1536       /* Split vectors of dwords */
1537       if (load_log_size > 2) {
1538          assert(load_num_channels == 1);
1539          LLVMValueRef loaded = loads[0];
1540          unsigned log_split = load_log_size - 2;
1541          log_recombine += log_split;
1542          load_num_channels = 1 << log_split;
1543          load_log_size = 2;
1544          for (unsigned i = 0; i < load_num_channels; ++i) {
1545             tmp = LLVMConstInt(ctx->i32, i, false);
1546             loads[i] = LLVMBuildExtractElement(ctx->builder, loaded, tmp, "");
1547          }
1548       }
1549 
1550       /* Further split dwords and shorts if required */
1551       if (log_recombine < 0) {
1552          for (unsigned src = load_num_channels, dst = load_num_channels << -log_recombine; src > 0;
1553               --src) {
1554             unsigned dst_bits = 1 << (3 + load_log_size + log_recombine);
1555             LLVMTypeRef dst_type = LLVMIntTypeInContext(ctx->context, dst_bits);
1556             LLVMValueRef loaded = loads[src - 1];
1557             LLVMTypeRef loaded_type = LLVMTypeOf(loaded);
1558             for (unsigned i = 1 << -log_recombine; i > 0; --i, --dst) {
1559                tmp = LLVMConstInt(loaded_type, dst_bits * (i - 1), false);
1560                tmp = LLVMBuildLShr(ctx->builder, loaded, tmp, "");
1561                loads[dst - 1] = LLVMBuildTrunc(ctx->builder, tmp, dst_type, "");
1562             }
1563          }
1564       }
1565    }
1566 
1567    if (log_size == 3) {
1568       if (format == AC_FETCH_FORMAT_FLOAT) {
1569          for (unsigned i = 0; i < num_channels; ++i) {
1570             tmp = ac_build_gather_values(ctx, &loads[2 * i], 2);
1571             loads[i] = LLVMBuildBitCast(ctx->builder, tmp, ctx->f64, "");
1572          }
1573       } else if (format == AC_FETCH_FORMAT_FIXED) {
1574          /* 10_11_11_FLOAT */
1575          LLVMValueRef data = loads[0];
1576          LLVMValueRef i32_2047 = LLVMConstInt(ctx->i32, 2047, false);
1577          LLVMValueRef r = LLVMBuildAnd(ctx->builder, data, i32_2047, "");
1578          tmp = LLVMBuildLShr(ctx->builder, data, LLVMConstInt(ctx->i32, 11, false), "");
1579          LLVMValueRef g = LLVMBuildAnd(ctx->builder, tmp, i32_2047, "");
1580          LLVMValueRef b = LLVMBuildLShr(ctx->builder, data, LLVMConstInt(ctx->i32, 22, false), "");
1581 
1582          loads[0] = ac_to_integer(ctx, ac_ufN_to_float(ctx, r, 5, 6));
1583          loads[1] = ac_to_integer(ctx, ac_ufN_to_float(ctx, g, 5, 6));
1584          loads[2] = ac_to_integer(ctx, ac_ufN_to_float(ctx, b, 5, 5));
1585 
1586          num_channels = 3;
1587          log_size = 2;
1588          format = AC_FETCH_FORMAT_FLOAT;
1589       } else {
1590          /* 2_10_10_10 data formats */
1591          LLVMValueRef data = loads[0];
1592          LLVMTypeRef i10 = LLVMIntTypeInContext(ctx->context, 10);
1593          LLVMTypeRef i2 = LLVMIntTypeInContext(ctx->context, 2);
1594          loads[0] = LLVMBuildTrunc(ctx->builder, data, i10, "");
1595          tmp = LLVMBuildLShr(ctx->builder, data, LLVMConstInt(ctx->i32, 10, false), "");
1596          loads[1] = LLVMBuildTrunc(ctx->builder, tmp, i10, "");
1597          tmp = LLVMBuildLShr(ctx->builder, data, LLVMConstInt(ctx->i32, 20, false), "");
1598          loads[2] = LLVMBuildTrunc(ctx->builder, tmp, i10, "");
1599          tmp = LLVMBuildLShr(ctx->builder, data, LLVMConstInt(ctx->i32, 30, false), "");
1600          loads[3] = LLVMBuildTrunc(ctx->builder, tmp, i2, "");
1601 
1602          num_channels = 4;
1603       }
1604    }
1605 
1606    if (format == AC_FETCH_FORMAT_FLOAT) {
1607       if (log_size != 2) {
1608          for (unsigned chan = 0; chan < num_channels; ++chan) {
1609             tmp = ac_to_float(ctx, loads[chan]);
1610             if (log_size == 3)
1611                tmp = LLVMBuildFPTrunc(ctx->builder, tmp, ctx->f32, "");
1612             else if (log_size == 1)
1613                tmp = LLVMBuildFPExt(ctx->builder, tmp, ctx->f32, "");
1614             loads[chan] = ac_to_integer(ctx, tmp);
1615          }
1616       }
1617    } else if (format == AC_FETCH_FORMAT_UINT) {
1618       if (log_size != 2) {
1619          for (unsigned chan = 0; chan < num_channels; ++chan)
1620             loads[chan] = LLVMBuildZExt(ctx->builder, loads[chan], ctx->i32, "");
1621       }
1622    } else if (format == AC_FETCH_FORMAT_SINT) {
1623       if (log_size != 2) {
1624          for (unsigned chan = 0; chan < num_channels; ++chan)
1625             loads[chan] = LLVMBuildSExt(ctx->builder, loads[chan], ctx->i32, "");
1626       }
1627    } else {
1628       bool unsign = format == AC_FETCH_FORMAT_UNORM || format == AC_FETCH_FORMAT_USCALED ||
1629                     format == AC_FETCH_FORMAT_UINT;
1630 
1631       for (unsigned chan = 0; chan < num_channels; ++chan) {
1632          if (unsign) {
1633             tmp = LLVMBuildUIToFP(ctx->builder, loads[chan], ctx->f32, "");
1634          } else {
1635             tmp = LLVMBuildSIToFP(ctx->builder, loads[chan], ctx->f32, "");
1636          }
1637 
1638          LLVMValueRef scale = NULL;
1639          if (format == AC_FETCH_FORMAT_FIXED) {
1640             assert(log_size == 2);
1641             scale = LLVMConstReal(ctx->f32, 1.0 / 0x10000);
1642          } else if (format == AC_FETCH_FORMAT_UNORM) {
1643             unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(loads[chan]));
1644             scale = LLVMConstReal(ctx->f32, 1.0 / (((uint64_t)1 << bits) - 1));
1645          } else if (format == AC_FETCH_FORMAT_SNORM) {
1646             unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(loads[chan]));
1647             scale = LLVMConstReal(ctx->f32, 1.0 / (((uint64_t)1 << (bits - 1)) - 1));
1648          }
1649          if (scale)
1650             tmp = LLVMBuildFMul(ctx->builder, tmp, scale, "");
1651 
1652          if (format == AC_FETCH_FORMAT_SNORM) {
1653             /* Clamp to [-1, 1] */
1654             LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
1655             LLVMValueRef clamp = LLVMBuildFCmp(ctx->builder, LLVMRealULT, tmp, neg_one, "");
1656             tmp = LLVMBuildSelect(ctx->builder, clamp, neg_one, tmp, "");
1657          }
1658 
1659          loads[chan] = ac_to_integer(ctx, tmp);
1660       }
1661    }
1662 
1663    while (num_channels < 4) {
1664       if (format == AC_FETCH_FORMAT_UINT || format == AC_FETCH_FORMAT_SINT) {
1665          loads[num_channels] = num_channels == 3 ? ctx->i32_1 : ctx->i32_0;
1666       } else {
1667          loads[num_channels] = ac_to_integer(ctx, num_channels == 3 ? ctx->f32_1 : ctx->f32_0);
1668       }
1669       num_channels++;
1670    }
1671 
1672    if (reverse) {
1673       tmp = loads[0];
1674       loads[0] = loads[2];
1675       loads[2] = tmp;
1676    }
1677 
1678    return ac_build_gather_values(ctx, loads, 4);
1679 }
1680 
ac_build_tbuffer_store(struct ac_llvm_context * ctx,LLVMValueRef rsrc,LLVMValueRef vdata,LLVMValueRef vindex,LLVMValueRef voffset,LLVMValueRef soffset,LLVMValueRef immoffset,unsigned num_channels,unsigned dfmt,unsigned nfmt,unsigned cache_policy)1681 static void ac_build_tbuffer_store(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
1682                                    LLVMValueRef vdata, LLVMValueRef vindex, LLVMValueRef voffset,
1683                                    LLVMValueRef soffset, LLVMValueRef immoffset,
1684                                    unsigned num_channels, unsigned dfmt, unsigned nfmt,
1685                                    unsigned cache_policy)
1686 {
1687    voffset = LLVMBuildAdd(ctx->builder, voffset ? voffset : ctx->i32_0, immoffset, "");
1688 
1689    LLVMValueRef args[7];
1690    int idx = 0;
1691    args[idx++] = vdata;
1692    args[idx++] = LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, "");
1693    if (vindex)
1694       args[idx++] = vindex ? vindex : ctx->i32_0;
1695    args[idx++] = voffset ? voffset : ctx->i32_0;
1696    args[idx++] = soffset ? soffset : ctx->i32_0;
1697    args[idx++] = LLVMConstInt(ctx->i32, ac_get_tbuffer_format(ctx->chip_class, dfmt, nfmt), 0);
1698    args[idx++] = LLVMConstInt(ctx->i32, cache_policy, 0);
1699    unsigned func =
1700       !ac_has_vec3_support(ctx->chip_class, true) && num_channels == 3 ? 4 : num_channels;
1701    const char *indexing_kind = vindex ? "struct" : "raw";
1702    char name[256], type_name[8];
1703 
1704    LLVMTypeRef type = func > 1 ? LLVMVectorType(ctx->i32, func) : ctx->i32;
1705    ac_build_type_name_for_intr(type, type_name, sizeof(type_name));
1706 
1707    snprintf(name, sizeof(name), "llvm.amdgcn.%s.tbuffer.store.%s", indexing_kind, type_name);
1708 
1709    ac_build_intrinsic(ctx, name, ctx->voidt, args, idx, AC_FUNC_ATTR_INACCESSIBLE_MEM_ONLY);
1710 }
1711 
ac_build_struct_tbuffer_store(struct ac_llvm_context * ctx,LLVMValueRef rsrc,LLVMValueRef vdata,LLVMValueRef vindex,LLVMValueRef voffset,LLVMValueRef soffset,LLVMValueRef immoffset,unsigned num_channels,unsigned dfmt,unsigned nfmt,unsigned cache_policy)1712 void ac_build_struct_tbuffer_store(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
1713                                    LLVMValueRef vdata, LLVMValueRef vindex, LLVMValueRef voffset,
1714                                    LLVMValueRef soffset, LLVMValueRef immoffset,
1715                                    unsigned num_channels, unsigned dfmt, unsigned nfmt,
1716                                    unsigned cache_policy)
1717 {
1718    ac_build_tbuffer_store(ctx, rsrc, vdata, vindex, voffset, soffset, immoffset, num_channels, dfmt,
1719                           nfmt, cache_policy);
1720 }
1721 
ac_build_raw_tbuffer_store(struct ac_llvm_context * ctx,LLVMValueRef rsrc,LLVMValueRef vdata,LLVMValueRef voffset,LLVMValueRef soffset,LLVMValueRef immoffset,unsigned num_channels,unsigned dfmt,unsigned nfmt,unsigned cache_policy)1722 void ac_build_raw_tbuffer_store(struct ac_llvm_context *ctx, LLVMValueRef rsrc, LLVMValueRef vdata,
1723                                 LLVMValueRef voffset, LLVMValueRef soffset, LLVMValueRef immoffset,
1724                                 unsigned num_channels, unsigned dfmt, unsigned nfmt,
1725                                 unsigned cache_policy)
1726 {
1727    ac_build_tbuffer_store(ctx, rsrc, vdata, NULL, voffset, soffset, immoffset, num_channels, dfmt,
1728                           nfmt, cache_policy);
1729 }
1730 
ac_build_tbuffer_store_short(struct ac_llvm_context * ctx,LLVMValueRef rsrc,LLVMValueRef vdata,LLVMValueRef voffset,LLVMValueRef soffset,unsigned cache_policy)1731 void ac_build_tbuffer_store_short(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
1732                                   LLVMValueRef vdata, LLVMValueRef voffset, LLVMValueRef soffset,
1733                                   unsigned cache_policy)
1734 {
1735    vdata = LLVMBuildBitCast(ctx->builder, vdata, ctx->i16, "");
1736 
1737    ac_build_buffer_store_common(ctx, rsrc, vdata, NULL, voffset, soffset, cache_policy, false);
1738 }
1739 
ac_build_tbuffer_store_byte(struct ac_llvm_context * ctx,LLVMValueRef rsrc,LLVMValueRef vdata,LLVMValueRef voffset,LLVMValueRef soffset,unsigned cache_policy)1740 void ac_build_tbuffer_store_byte(struct ac_llvm_context *ctx, LLVMValueRef rsrc, LLVMValueRef vdata,
1741                                  LLVMValueRef voffset, LLVMValueRef soffset, unsigned cache_policy)
1742 {
1743    vdata = LLVMBuildBitCast(ctx->builder, vdata, ctx->i8, "");
1744 
1745    ac_build_buffer_store_common(ctx, rsrc, vdata, NULL, voffset, soffset, cache_policy, false);
1746 }
1747 
1748 /**
1749  * Set range metadata on an instruction.  This can only be used on load and
1750  * call instructions.  If you know an instruction can only produce the values
1751  * 0, 1, 2, you would do set_range_metadata(value, 0, 3);
1752  * \p lo is the minimum value inclusive.
1753  * \p hi is the maximum value exclusive.
1754  */
ac_set_range_metadata(struct ac_llvm_context * ctx,LLVMValueRef value,unsigned lo,unsigned hi)1755 void ac_set_range_metadata(struct ac_llvm_context *ctx, LLVMValueRef value, unsigned lo,
1756                            unsigned hi)
1757 {
1758    LLVMValueRef range_md, md_args[2];
1759    LLVMTypeRef type = LLVMTypeOf(value);
1760    LLVMContextRef context = LLVMGetTypeContext(type);
1761 
1762    md_args[0] = LLVMConstInt(type, lo, false);
1763    md_args[1] = LLVMConstInt(type, hi, false);
1764    range_md = LLVMMDNodeInContext(context, md_args, 2);
1765    LLVMSetMetadata(value, ctx->range_md_kind, range_md);
1766 }
1767 
ac_get_thread_id(struct ac_llvm_context * ctx)1768 LLVMValueRef ac_get_thread_id(struct ac_llvm_context *ctx)
1769 {
1770    return ac_build_mbcnt(ctx, LLVMConstInt(ctx->iN_wavemask, ~0ull, 0));
1771 }
1772 
1773 /*
1774  * AMD GCN implements derivatives using the local data store (LDS)
1775  * All writes to the LDS happen in all executing threads at
1776  * the same time. TID is the Thread ID for the current
1777  * thread and is a value between 0 and 63, representing
1778  * the thread's position in the wavefront.
1779  *
1780  * For the pixel shader threads are grouped into quads of four pixels.
1781  * The TIDs of the pixels of a quad are:
1782  *
1783  *  +------+------+
1784  *  |4n + 0|4n + 1|
1785  *  +------+------+
1786  *  |4n + 2|4n + 3|
1787  *  +------+------+
1788  *
1789  * So, masking the TID with 0xfffffffc yields the TID of the top left pixel
1790  * of the quad, masking with 0xfffffffd yields the TID of the top pixel of
1791  * the current pixel's column, and masking with 0xfffffffe yields the TID
1792  * of the left pixel of the current pixel's row.
1793  *
1794  * Adding 1 yields the TID of the pixel to the right of the left pixel, and
1795  * adding 2 yields the TID of the pixel below the top pixel.
1796  */
ac_build_ddxy(struct ac_llvm_context * ctx,uint32_t mask,int idx,LLVMValueRef val)1797 LLVMValueRef ac_build_ddxy(struct ac_llvm_context *ctx, uint32_t mask, int idx, LLVMValueRef val)
1798 {
1799    unsigned tl_lanes[4], trbl_lanes[4];
1800    char name[32], type[8];
1801    LLVMValueRef tl, trbl;
1802    LLVMTypeRef result_type;
1803    LLVMValueRef result;
1804 
1805    result_type = ac_to_float_type(ctx, LLVMTypeOf(val));
1806 
1807    if (result_type == ctx->f16)
1808       val = LLVMBuildZExt(ctx->builder, val, ctx->i32, "");
1809    else if (result_type == ctx->v2f16)
1810       val = LLVMBuildBitCast(ctx->builder, val, ctx->i32, "");
1811 
1812    for (unsigned i = 0; i < 4; ++i) {
1813       tl_lanes[i] = i & mask;
1814       trbl_lanes[i] = (i & mask) + idx;
1815    }
1816 
1817    tl = ac_build_quad_swizzle(ctx, val, tl_lanes[0], tl_lanes[1], tl_lanes[2], tl_lanes[3]);
1818    trbl =
1819       ac_build_quad_swizzle(ctx, val, trbl_lanes[0], trbl_lanes[1], trbl_lanes[2], trbl_lanes[3]);
1820 
1821    if (result_type == ctx->f16) {
1822       tl = LLVMBuildTrunc(ctx->builder, tl, ctx->i16, "");
1823       trbl = LLVMBuildTrunc(ctx->builder, trbl, ctx->i16, "");
1824    }
1825 
1826    tl = LLVMBuildBitCast(ctx->builder, tl, result_type, "");
1827    trbl = LLVMBuildBitCast(ctx->builder, trbl, result_type, "");
1828    result = LLVMBuildFSub(ctx->builder, trbl, tl, "");
1829 
1830    ac_build_type_name_for_intr(result_type, type, sizeof(type));
1831    snprintf(name, sizeof(name), "llvm.amdgcn.wqm.%s", type);
1832 
1833    return ac_build_intrinsic(ctx, name, result_type, &result, 1, 0);
1834 }
1835 
ac_build_sendmsg(struct ac_llvm_context * ctx,uint32_t msg,LLVMValueRef wave_id)1836 void ac_build_sendmsg(struct ac_llvm_context *ctx, uint32_t msg, LLVMValueRef wave_id)
1837 {
1838    LLVMValueRef args[2];
1839    args[0] = LLVMConstInt(ctx->i32, msg, false);
1840    args[1] = wave_id;
1841    ac_build_intrinsic(ctx, "llvm.amdgcn.s.sendmsg", ctx->voidt, args, 2, 0);
1842 }
1843 
ac_build_imsb(struct ac_llvm_context * ctx,LLVMValueRef arg,LLVMTypeRef dst_type)1844 LLVMValueRef ac_build_imsb(struct ac_llvm_context *ctx, LLVMValueRef arg, LLVMTypeRef dst_type)
1845 {
1846    LLVMValueRef msb =
1847       ac_build_intrinsic(ctx, "llvm.amdgcn.sffbh.i32", dst_type, &arg, 1, AC_FUNC_ATTR_READNONE);
1848 
1849    /* The HW returns the last bit index from MSB, but NIR/TGSI wants
1850     * the index from LSB. Invert it by doing "31 - msb". */
1851    msb = LLVMBuildSub(ctx->builder, LLVMConstInt(ctx->i32, 31, false), msb, "");
1852 
1853    LLVMValueRef all_ones = LLVMConstInt(ctx->i32, -1, true);
1854    LLVMValueRef cond =
1855       LLVMBuildOr(ctx->builder, LLVMBuildICmp(ctx->builder, LLVMIntEQ, arg, ctx->i32_0, ""),
1856                   LLVMBuildICmp(ctx->builder, LLVMIntEQ, arg, all_ones, ""), "");
1857 
1858    return LLVMBuildSelect(ctx->builder, cond, all_ones, msb, "");
1859 }
1860 
ac_build_umsb(struct ac_llvm_context * ctx,LLVMValueRef arg,LLVMTypeRef dst_type)1861 LLVMValueRef ac_build_umsb(struct ac_llvm_context *ctx, LLVMValueRef arg, LLVMTypeRef dst_type)
1862 {
1863    const char *intrin_name;
1864    LLVMTypeRef type;
1865    LLVMValueRef highest_bit;
1866    LLVMValueRef zero;
1867    unsigned bitsize;
1868 
1869    bitsize = ac_get_elem_bits(ctx, LLVMTypeOf(arg));
1870    switch (bitsize) {
1871    case 64:
1872       intrin_name = "llvm.ctlz.i64";
1873       type = ctx->i64;
1874       highest_bit = LLVMConstInt(ctx->i64, 63, false);
1875       zero = ctx->i64_0;
1876       break;
1877    case 32:
1878       intrin_name = "llvm.ctlz.i32";
1879       type = ctx->i32;
1880       highest_bit = LLVMConstInt(ctx->i32, 31, false);
1881       zero = ctx->i32_0;
1882       break;
1883    case 16:
1884       intrin_name = "llvm.ctlz.i16";
1885       type = ctx->i16;
1886       highest_bit = LLVMConstInt(ctx->i16, 15, false);
1887       zero = ctx->i16_0;
1888       break;
1889    case 8:
1890       intrin_name = "llvm.ctlz.i8";
1891       type = ctx->i8;
1892       highest_bit = LLVMConstInt(ctx->i8, 7, false);
1893       zero = ctx->i8_0;
1894       break;
1895    default:
1896       unreachable(!"invalid bitsize");
1897       break;
1898    }
1899 
1900    LLVMValueRef params[2] = {
1901       arg,
1902       ctx->i1true,
1903    };
1904 
1905    LLVMValueRef msb = ac_build_intrinsic(ctx, intrin_name, type, params, 2, AC_FUNC_ATTR_READNONE);
1906 
1907    /* The HW returns the last bit index from MSB, but TGSI/NIR wants
1908     * the index from LSB. Invert it by doing "31 - msb". */
1909    msb = LLVMBuildSub(ctx->builder, highest_bit, msb, "");
1910 
1911    if (bitsize == 64) {
1912       msb = LLVMBuildTrunc(ctx->builder, msb, ctx->i32, "");
1913    } else if (bitsize < 32) {
1914       msb = LLVMBuildSExt(ctx->builder, msb, ctx->i32, "");
1915    }
1916 
1917    /* check for zero */
1918    return LLVMBuildSelect(ctx->builder, LLVMBuildICmp(ctx->builder, LLVMIntEQ, arg, zero, ""),
1919                           LLVMConstInt(ctx->i32, -1, true), msb, "");
1920 }
1921 
ac_build_fmin(struct ac_llvm_context * ctx,LLVMValueRef a,LLVMValueRef b)1922 LLVMValueRef ac_build_fmin(struct ac_llvm_context *ctx, LLVMValueRef a, LLVMValueRef b)
1923 {
1924    char name[64], type[64];
1925 
1926    ac_build_type_name_for_intr(LLVMTypeOf(a), type, sizeof(type));
1927    snprintf(name, sizeof(name), "llvm.minnum.%s", type);
1928    LLVMValueRef args[2] = {a, b};
1929    return ac_build_intrinsic(ctx, name, LLVMTypeOf(a), args, 2, AC_FUNC_ATTR_READNONE);
1930 }
1931 
ac_build_fmax(struct ac_llvm_context * ctx,LLVMValueRef a,LLVMValueRef b)1932 LLVMValueRef ac_build_fmax(struct ac_llvm_context *ctx, LLVMValueRef a, LLVMValueRef b)
1933 {
1934    char name[64], type[64];
1935 
1936    ac_build_type_name_for_intr(LLVMTypeOf(a), type, sizeof(type));
1937    snprintf(name, sizeof(name), "llvm.maxnum.%s", type);
1938    LLVMValueRef args[2] = {a, b};
1939    return ac_build_intrinsic(ctx, name, LLVMTypeOf(a), args, 2, AC_FUNC_ATTR_READNONE);
1940 }
1941 
ac_build_imin(struct ac_llvm_context * ctx,LLVMValueRef a,LLVMValueRef b)1942 LLVMValueRef ac_build_imin(struct ac_llvm_context *ctx, LLVMValueRef a, LLVMValueRef b)
1943 {
1944    LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntSLE, a, b, "");
1945    return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
1946 }
1947 
ac_build_imax(struct ac_llvm_context * ctx,LLVMValueRef a,LLVMValueRef b)1948 LLVMValueRef ac_build_imax(struct ac_llvm_context *ctx, LLVMValueRef a, LLVMValueRef b)
1949 {
1950    LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntSGT, a, b, "");
1951    return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
1952 }
1953 
ac_build_umin(struct ac_llvm_context * ctx,LLVMValueRef a,LLVMValueRef b)1954 LLVMValueRef ac_build_umin(struct ac_llvm_context *ctx, LLVMValueRef a, LLVMValueRef b)
1955 {
1956    LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntULE, a, b, "");
1957    return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
1958 }
1959 
ac_build_umax(struct ac_llvm_context * ctx,LLVMValueRef a,LLVMValueRef b)1960 LLVMValueRef ac_build_umax(struct ac_llvm_context *ctx, LLVMValueRef a, LLVMValueRef b)
1961 {
1962    LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntUGE, a, b, "");
1963    return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
1964 }
1965 
ac_build_clamp(struct ac_llvm_context * ctx,LLVMValueRef value)1966 LLVMValueRef ac_build_clamp(struct ac_llvm_context *ctx, LLVMValueRef value)
1967 {
1968    LLVMTypeRef t = LLVMTypeOf(value);
1969    return ac_build_fmin(ctx, ac_build_fmax(ctx, value, LLVMConstReal(t, 0.0)),
1970                         LLVMConstReal(t, 1.0));
1971 }
1972 
ac_build_export(struct ac_llvm_context * ctx,struct ac_export_args * a)1973 void ac_build_export(struct ac_llvm_context *ctx, struct ac_export_args *a)
1974 {
1975    LLVMValueRef args[9];
1976 
1977    args[0] = LLVMConstInt(ctx->i32, a->target, 0);
1978    args[1] = LLVMConstInt(ctx->i32, a->enabled_channels, 0);
1979 
1980    if (a->compr) {
1981       args[2] = LLVMBuildBitCast(ctx->builder, a->out[0], ctx->v2i16, "");
1982       args[3] = LLVMBuildBitCast(ctx->builder, a->out[1], ctx->v2i16, "");
1983       args[4] = LLVMConstInt(ctx->i1, a->done, 0);
1984       args[5] = LLVMConstInt(ctx->i1, a->valid_mask, 0);
1985 
1986       ac_build_intrinsic(ctx, "llvm.amdgcn.exp.compr.v2i16", ctx->voidt, args, 6, 0);
1987    } else {
1988       args[2] = a->out[0];
1989       args[3] = a->out[1];
1990       args[4] = a->out[2];
1991       args[5] = a->out[3];
1992       args[6] = LLVMConstInt(ctx->i1, a->done, 0);
1993       args[7] = LLVMConstInt(ctx->i1, a->valid_mask, 0);
1994 
1995       ac_build_intrinsic(ctx, "llvm.amdgcn.exp.f32", ctx->voidt, args, 8, 0);
1996    }
1997 }
1998 
ac_build_export_null(struct ac_llvm_context * ctx)1999 void ac_build_export_null(struct ac_llvm_context *ctx)
2000 {
2001    struct ac_export_args args;
2002 
2003    args.enabled_channels = 0x0; /* enabled channels */
2004    args.valid_mask = 1;         /* whether the EXEC mask is valid */
2005    args.done = 1;               /* DONE bit */
2006    args.target = V_008DFC_SQ_EXP_NULL;
2007    args.compr = 0;                       /* COMPR flag (0 = 32-bit export) */
2008    args.out[0] = LLVMGetUndef(ctx->f32); /* R */
2009    args.out[1] = LLVMGetUndef(ctx->f32); /* G */
2010    args.out[2] = LLVMGetUndef(ctx->f32); /* B */
2011    args.out[3] = LLVMGetUndef(ctx->f32); /* A */
2012 
2013    ac_build_export(ctx, &args);
2014 }
2015 
ac_num_coords(enum ac_image_dim dim)2016 static unsigned ac_num_coords(enum ac_image_dim dim)
2017 {
2018    switch (dim) {
2019    case ac_image_1d:
2020       return 1;
2021    case ac_image_2d:
2022    case ac_image_1darray:
2023       return 2;
2024    case ac_image_3d:
2025    case ac_image_cube:
2026    case ac_image_2darray:
2027    case ac_image_2dmsaa:
2028       return 3;
2029    case ac_image_2darraymsaa:
2030       return 4;
2031    default:
2032       unreachable("ac_num_coords: bad dim");
2033    }
2034 }
2035 
ac_num_derivs(enum ac_image_dim dim)2036 static unsigned ac_num_derivs(enum ac_image_dim dim)
2037 {
2038    switch (dim) {
2039    case ac_image_1d:
2040    case ac_image_1darray:
2041       return 2;
2042    case ac_image_2d:
2043    case ac_image_2darray:
2044    case ac_image_cube:
2045       return 4;
2046    case ac_image_3d:
2047       return 6;
2048    case ac_image_2dmsaa:
2049    case ac_image_2darraymsaa:
2050    default:
2051       unreachable("derivatives not supported");
2052    }
2053 }
2054 
get_atomic_name(enum ac_atomic_op op)2055 static const char *get_atomic_name(enum ac_atomic_op op)
2056 {
2057    switch (op) {
2058    case ac_atomic_swap:
2059       return "swap";
2060    case ac_atomic_add:
2061       return "add";
2062    case ac_atomic_sub:
2063       return "sub";
2064    case ac_atomic_smin:
2065       return "smin";
2066    case ac_atomic_umin:
2067       return "umin";
2068    case ac_atomic_smax:
2069       return "smax";
2070    case ac_atomic_umax:
2071       return "umax";
2072    case ac_atomic_and:
2073       return "and";
2074    case ac_atomic_or:
2075       return "or";
2076    case ac_atomic_xor:
2077       return "xor";
2078    case ac_atomic_inc_wrap:
2079       return "inc";
2080    case ac_atomic_dec_wrap:
2081       return "dec";
2082    case ac_atomic_fmin:
2083       return "fmin";
2084    case ac_atomic_fmax:
2085       return "fmax";
2086    }
2087    unreachable("bad atomic op");
2088 }
2089 
ac_build_image_opcode(struct ac_llvm_context * ctx,struct ac_image_args * a)2090 LLVMValueRef ac_build_image_opcode(struct ac_llvm_context *ctx, struct ac_image_args *a)
2091 {
2092    const char *overload[3] = {"", "", ""};
2093    unsigned num_overloads = 0;
2094    LLVMValueRef args[18];
2095    unsigned num_args = 0;
2096    enum ac_image_dim dim = a->dim;
2097 
2098    assert(!a->lod || a->lod == ctx->i32_0 || a->lod == ctx->f32_0 || !a->level_zero);
2099    assert((a->opcode != ac_image_get_resinfo && a->opcode != ac_image_load_mip &&
2100            a->opcode != ac_image_store_mip) ||
2101           a->lod);
2102    assert(a->opcode == ac_image_sample || a->opcode == ac_image_gather4 ||
2103           (!a->compare && !a->offset));
2104    assert((a->opcode == ac_image_sample || a->opcode == ac_image_gather4 ||
2105            a->opcode == ac_image_get_lod) ||
2106           !a->bias);
2107    assert((a->bias ? 1 : 0) + (a->lod ? 1 : 0) + (a->level_zero ? 1 : 0) + (a->derivs[0] ? 1 : 0) <=
2108           1);
2109    assert((a->min_lod ? 1 : 0) + (a->lod ? 1 : 0) + (a->level_zero ? 1 : 0) <= 1);
2110    assert(!a->d16 || (ctx->chip_class >= GFX8 && a->opcode != ac_image_atomic &&
2111                       a->opcode != ac_image_atomic_cmpswap && a->opcode != ac_image_get_lod &&
2112                       a->opcode != ac_image_get_resinfo));
2113    assert(!a->a16 || ctx->chip_class >= GFX9);
2114    assert(a->g16 == a->a16 || ctx->chip_class >= GFX10);
2115 
2116    assert(!a->offset ||
2117           ac_get_elem_bits(ctx, LLVMTypeOf(a->offset)) == 32);
2118    assert(!a->bias ||
2119           ac_get_elem_bits(ctx, LLVMTypeOf(a->bias)) == 32);
2120    assert(!a->compare ||
2121           ac_get_elem_bits(ctx, LLVMTypeOf(a->compare)) == 32);
2122    assert(!a->derivs[0] ||
2123           ((!a->g16 || ac_get_elem_bits(ctx, LLVMTypeOf(a->derivs[0])) == 16) &&
2124            (a->g16 || ac_get_elem_bits(ctx, LLVMTypeOf(a->derivs[0])) == 32)));
2125    assert(!a->coords[0] ||
2126           ((!a->a16 || ac_get_elem_bits(ctx, LLVMTypeOf(a->coords[0])) == 16) &&
2127            (a->a16 || ac_get_elem_bits(ctx, LLVMTypeOf(a->coords[0])) == 32)));
2128    assert(!a->lod ||
2129           ((a->opcode != ac_image_get_resinfo || ac_get_elem_bits(ctx, LLVMTypeOf(a->lod))) &&
2130            (a->opcode == ac_image_get_resinfo ||
2131             ac_get_elem_bits(ctx, LLVMTypeOf(a->lod)) ==
2132             ac_get_elem_bits(ctx, LLVMTypeOf(a->coords[0])))));
2133    assert(!a->min_lod ||
2134           ac_get_elem_bits(ctx, LLVMTypeOf(a->min_lod)) ==
2135           ac_get_elem_bits(ctx, LLVMTypeOf(a->coords[0])));
2136 
2137    if (a->opcode == ac_image_get_lod) {
2138       switch (dim) {
2139       case ac_image_1darray:
2140          dim = ac_image_1d;
2141          break;
2142       case ac_image_2darray:
2143       case ac_image_cube:
2144          dim = ac_image_2d;
2145          break;
2146       default:
2147          break;
2148       }
2149    }
2150 
2151    bool sample = a->opcode == ac_image_sample || a->opcode == ac_image_gather4 ||
2152                  a->opcode == ac_image_get_lod;
2153    bool atomic = a->opcode == ac_image_atomic || a->opcode == ac_image_atomic_cmpswap;
2154    bool load = a->opcode == ac_image_sample || a->opcode == ac_image_gather4 ||
2155                a->opcode == ac_image_load || a->opcode == ac_image_load_mip;
2156    LLVMTypeRef coord_type = sample ? (a->a16 ? ctx->f16 : ctx->f32) : (a->a16 ? ctx->i16 : ctx->i32);
2157    uint8_t dmask = a->dmask;
2158    LLVMTypeRef data_type;
2159    char data_type_str[32];
2160 
2161    if (atomic) {
2162       data_type = LLVMTypeOf(a->data[0]);
2163    } else if (a->opcode == ac_image_store || a->opcode == ac_image_store_mip) {
2164       /* Image stores might have been shrinked using the format. */
2165       data_type = LLVMTypeOf(a->data[0]);
2166       dmask = (1 << ac_get_llvm_num_components(a->data[0])) - 1;
2167    } else {
2168       data_type = a->d16 ? ctx->v4f16 : ctx->v4f32;
2169    }
2170 
2171    if (a->tfe) {
2172       data_type = LLVMStructTypeInContext(
2173          ctx->context, (LLVMTypeRef[]){data_type, ctx->i32}, 2, false);
2174    }
2175 
2176    if (atomic || a->opcode == ac_image_store || a->opcode == ac_image_store_mip) {
2177       args[num_args++] = a->data[0];
2178       if (a->opcode == ac_image_atomic_cmpswap)
2179          args[num_args++] = a->data[1];
2180    }
2181 
2182    if (!atomic)
2183       args[num_args++] = LLVMConstInt(ctx->i32, dmask, false);
2184 
2185    if (a->offset)
2186       args[num_args++] = ac_to_integer(ctx, a->offset);
2187    if (a->bias) {
2188       args[num_args++] = ac_to_float(ctx, a->bias);
2189       overload[num_overloads++] = ".f32";
2190    }
2191    if (a->compare)
2192       args[num_args++] = ac_to_float(ctx, a->compare);
2193    if (a->derivs[0]) {
2194       unsigned count = ac_num_derivs(dim);
2195       for (unsigned i = 0; i < count; ++i)
2196          args[num_args++] = ac_to_float(ctx, a->derivs[i]);
2197       overload[num_overloads++] = a->g16 ? ".f16" : ".f32";
2198    }
2199    unsigned num_coords = a->opcode != ac_image_get_resinfo ? ac_num_coords(dim) : 0;
2200    for (unsigned i = 0; i < num_coords; ++i)
2201       args[num_args++] = LLVMBuildBitCast(ctx->builder, a->coords[i], coord_type, "");
2202    if (a->lod)
2203       args[num_args++] = LLVMBuildBitCast(ctx->builder, a->lod, coord_type, "");
2204    if (a->min_lod)
2205       args[num_args++] = LLVMBuildBitCast(ctx->builder, a->min_lod, coord_type, "");
2206 
2207    overload[num_overloads++] = sample ? (a->a16 ? ".f16" : ".f32") : (a->a16 ? ".i16" : ".i32");
2208 
2209    args[num_args++] = a->resource;
2210    if (sample) {
2211       args[num_args++] = a->sampler;
2212       args[num_args++] = LLVMConstInt(ctx->i1, a->unorm, false);
2213    }
2214 
2215    args[num_args++] = a->tfe ? ctx->i32_1 : ctx->i32_0; /* texfailctrl */
2216    args[num_args++] = LLVMConstInt(
2217       ctx->i32, load ? get_load_cache_policy(ctx, a->cache_policy) : a->cache_policy, false);
2218 
2219    const char *name;
2220    const char *atomic_subop = "";
2221    switch (a->opcode) {
2222    case ac_image_sample:
2223       name = "sample";
2224       break;
2225    case ac_image_gather4:
2226       name = "gather4";
2227       break;
2228    case ac_image_load:
2229       name = "load";
2230       break;
2231    case ac_image_load_mip:
2232       name = "load.mip";
2233       break;
2234    case ac_image_store:
2235       name = "store";
2236       break;
2237    case ac_image_store_mip:
2238       name = "store.mip";
2239       break;
2240    case ac_image_atomic:
2241       name = "atomic.";
2242       atomic_subop = get_atomic_name(a->atomic);
2243       break;
2244    case ac_image_atomic_cmpswap:
2245       name = "atomic.";
2246       atomic_subop = "cmpswap";
2247       break;
2248    case ac_image_get_lod:
2249       name = "getlod";
2250       break;
2251    case ac_image_get_resinfo:
2252       name = "getresinfo";
2253       break;
2254    default:
2255       unreachable("invalid image opcode");
2256    }
2257 
2258    const char *dimname;
2259    switch (dim) {
2260    case ac_image_1d:
2261       dimname = "1d";
2262       break;
2263    case ac_image_2d:
2264       dimname = "2d";
2265       break;
2266    case ac_image_3d:
2267       dimname = "3d";
2268       break;
2269    case ac_image_cube:
2270       dimname = "cube";
2271       break;
2272    case ac_image_1darray:
2273       dimname = "1darray";
2274       break;
2275    case ac_image_2darray:
2276       dimname = "2darray";
2277       break;
2278    case ac_image_2dmsaa:
2279       dimname = "2dmsaa";
2280       break;
2281    case ac_image_2darraymsaa:
2282       dimname = "2darraymsaa";
2283       break;
2284    default:
2285       unreachable("invalid dim");
2286    }
2287 
2288    ac_build_type_name_for_intr(data_type, data_type_str, sizeof(data_type_str));
2289 
2290    bool lod_suffix = a->lod && (a->opcode == ac_image_sample || a->opcode == ac_image_gather4);
2291    char intr_name[96];
2292    snprintf(intr_name, sizeof(intr_name),
2293             "llvm.amdgcn.image.%s%s" /* base name */
2294             "%s%s%s%s"               /* sample/gather modifiers */
2295             ".%s.%s%s%s%s",          /* dimension and type overloads */
2296             name, atomic_subop, a->compare ? ".c" : "",
2297             a->bias ? ".b" : lod_suffix ? ".l" : a->derivs[0] ? ".d" : a->level_zero ? ".lz" : "",
2298             a->min_lod ? ".cl" : "", a->offset ? ".o" : "", dimname,
2299             data_type_str, overload[0], overload[1], overload[2]);
2300 
2301    LLVMTypeRef retty;
2302    if (a->opcode == ac_image_store || a->opcode == ac_image_store_mip)
2303       retty = ctx->voidt;
2304    else
2305       retty = data_type;
2306 
2307    LLVMValueRef result = ac_build_intrinsic(ctx, intr_name, retty, args, num_args, a->attributes);
2308    if (a->tfe) {
2309       LLVMValueRef texel = LLVMBuildExtractValue(ctx->builder, result, 0, "");
2310       LLVMValueRef code = LLVMBuildExtractValue(ctx->builder, result, 1, "");
2311       result = ac_build_concat(ctx, texel, ac_to_float(ctx, code));
2312    }
2313 
2314    if (!sample && !atomic && retty != ctx->voidt)
2315       result = ac_to_integer(ctx, result);
2316 
2317    return result;
2318 }
2319 
ac_build_image_get_sample_count(struct ac_llvm_context * ctx,LLVMValueRef rsrc)2320 LLVMValueRef ac_build_image_get_sample_count(struct ac_llvm_context *ctx, LLVMValueRef rsrc)
2321 {
2322    LLVMValueRef samples;
2323 
2324    /* Read the samples from the descriptor directly.
2325     * Hardware doesn't have any instruction for this.
2326     */
2327    samples = LLVMBuildExtractElement(ctx->builder, rsrc, LLVMConstInt(ctx->i32, 3, 0), "");
2328    samples = LLVMBuildLShr(ctx->builder, samples, LLVMConstInt(ctx->i32, 16, 0), "");
2329    samples = LLVMBuildAnd(ctx->builder, samples, LLVMConstInt(ctx->i32, 0xf, 0), "");
2330    samples = LLVMBuildShl(ctx->builder, ctx->i32_1, samples, "");
2331    return samples;
2332 }
2333 
ac_build_cvt_pkrtz_f16(struct ac_llvm_context * ctx,LLVMValueRef args[2])2334 LLVMValueRef ac_build_cvt_pkrtz_f16(struct ac_llvm_context *ctx, LLVMValueRef args[2])
2335 {
2336    return ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pkrtz", ctx->v2f16, args, 2,
2337                              AC_FUNC_ATTR_READNONE);
2338 }
2339 
ac_build_cvt_pknorm_i16(struct ac_llvm_context * ctx,LLVMValueRef args[2])2340 LLVMValueRef ac_build_cvt_pknorm_i16(struct ac_llvm_context *ctx, LLVMValueRef args[2])
2341 {
2342    LLVMValueRef res = ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pknorm.i16", ctx->v2i16, args, 2,
2343                                          AC_FUNC_ATTR_READNONE);
2344    return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
2345 }
2346 
ac_build_cvt_pknorm_u16(struct ac_llvm_context * ctx,LLVMValueRef args[2])2347 LLVMValueRef ac_build_cvt_pknorm_u16(struct ac_llvm_context *ctx, LLVMValueRef args[2])
2348 {
2349    LLVMValueRef res = ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pknorm.u16", ctx->v2i16, args, 2,
2350                                          AC_FUNC_ATTR_READNONE);
2351    return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
2352 }
2353 
ac_build_cvt_pknorm_i16_f16(struct ac_llvm_context * ctx,LLVMValueRef args[2])2354 LLVMValueRef ac_build_cvt_pknorm_i16_f16(struct ac_llvm_context *ctx,
2355                                          LLVMValueRef args[2])
2356 {
2357    LLVMTypeRef param_types[] = {ctx->f16, ctx->f16};
2358    LLVMTypeRef calltype = LLVMFunctionType(ctx->i32, param_types, 2, false);
2359    LLVMValueRef code = LLVMConstInlineAsm(calltype,
2360                                           "v_cvt_pknorm_i16_f16 $0, $1, $2", "=v,v,v",
2361                                           false, false);
2362    return LLVMBuildCall(ctx->builder, code, args, 2, "");
2363 }
2364 
ac_build_cvt_pknorm_u16_f16(struct ac_llvm_context * ctx,LLVMValueRef args[2])2365 LLVMValueRef ac_build_cvt_pknorm_u16_f16(struct ac_llvm_context *ctx,
2366                                          LLVMValueRef args[2])
2367 {
2368    LLVMTypeRef param_types[] = {ctx->f16, ctx->f16};
2369    LLVMTypeRef calltype = LLVMFunctionType(ctx->i32, param_types, 2, false);
2370    LLVMValueRef code = LLVMConstInlineAsm(calltype,
2371                                           "v_cvt_pknorm_u16_f16 $0, $1, $2", "=v,v,v",
2372                                           false, false);
2373    return LLVMBuildCall(ctx->builder, code, args, 2, "");
2374 }
2375 
2376 /* The 8-bit and 10-bit clamping is for HW workarounds. */
ac_build_cvt_pk_i16(struct ac_llvm_context * ctx,LLVMValueRef args[2],unsigned bits,bool hi)2377 LLVMValueRef ac_build_cvt_pk_i16(struct ac_llvm_context *ctx, LLVMValueRef args[2], unsigned bits,
2378                                  bool hi)
2379 {
2380    assert(bits == 8 || bits == 10 || bits == 16);
2381 
2382    LLVMValueRef max_rgb = LLVMConstInt(ctx->i32, bits == 8 ? 127 : bits == 10 ? 511 : 32767, 0);
2383    LLVMValueRef min_rgb = LLVMConstInt(ctx->i32, bits == 8 ? -128 : bits == 10 ? -512 : -32768, 0);
2384    LLVMValueRef max_alpha = bits != 10 ? max_rgb : ctx->i32_1;
2385    LLVMValueRef min_alpha = bits != 10 ? min_rgb : LLVMConstInt(ctx->i32, -2, 0);
2386 
2387    /* Clamp. */
2388    if (bits != 16) {
2389       for (int i = 0; i < 2; i++) {
2390          bool alpha = hi && i == 1;
2391          args[i] = ac_build_imin(ctx, args[i], alpha ? max_alpha : max_rgb);
2392          args[i] = ac_build_imax(ctx, args[i], alpha ? min_alpha : min_rgb);
2393       }
2394    }
2395 
2396    LLVMValueRef res =
2397       ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pk.i16", ctx->v2i16, args, 2, AC_FUNC_ATTR_READNONE);
2398    return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
2399 }
2400 
2401 /* The 8-bit and 10-bit clamping is for HW workarounds. */
ac_build_cvt_pk_u16(struct ac_llvm_context * ctx,LLVMValueRef args[2],unsigned bits,bool hi)2402 LLVMValueRef ac_build_cvt_pk_u16(struct ac_llvm_context *ctx, LLVMValueRef args[2], unsigned bits,
2403                                  bool hi)
2404 {
2405    assert(bits == 8 || bits == 10 || bits == 16);
2406 
2407    LLVMValueRef max_rgb = LLVMConstInt(ctx->i32, bits == 8 ? 255 : bits == 10 ? 1023 : 65535, 0);
2408    LLVMValueRef max_alpha = bits != 10 ? max_rgb : LLVMConstInt(ctx->i32, 3, 0);
2409 
2410    /* Clamp. */
2411    if (bits != 16) {
2412       for (int i = 0; i < 2; i++) {
2413          bool alpha = hi && i == 1;
2414          args[i] = ac_build_umin(ctx, args[i], alpha ? max_alpha : max_rgb);
2415       }
2416    }
2417 
2418    LLVMValueRef res =
2419       ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pk.u16", ctx->v2i16, args, 2, AC_FUNC_ATTR_READNONE);
2420    return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
2421 }
2422 
ac_build_wqm_vote(struct ac_llvm_context * ctx,LLVMValueRef i1)2423 LLVMValueRef ac_build_wqm_vote(struct ac_llvm_context *ctx, LLVMValueRef i1)
2424 {
2425    return ac_build_intrinsic(ctx, "llvm.amdgcn.wqm.vote", ctx->i1, &i1, 1, AC_FUNC_ATTR_READNONE);
2426 }
2427 
ac_build_kill_if_false(struct ac_llvm_context * ctx,LLVMValueRef i1)2428 void ac_build_kill_if_false(struct ac_llvm_context *ctx, LLVMValueRef i1)
2429 {
2430    ac_build_intrinsic(ctx, "llvm.amdgcn.kill", ctx->voidt, &i1, 1, 0);
2431 }
2432 
ac_build_bfe(struct ac_llvm_context * ctx,LLVMValueRef input,LLVMValueRef offset,LLVMValueRef width,bool is_signed)2433 LLVMValueRef ac_build_bfe(struct ac_llvm_context *ctx, LLVMValueRef input, LLVMValueRef offset,
2434                           LLVMValueRef width, bool is_signed)
2435 {
2436    LLVMValueRef args[] = {
2437       input,
2438       offset,
2439       width,
2440    };
2441 
2442    return ac_build_intrinsic(ctx, is_signed ? "llvm.amdgcn.sbfe.i32" : "llvm.amdgcn.ubfe.i32",
2443                              ctx->i32, args, 3, AC_FUNC_ATTR_READNONE);
2444 }
2445 
ac_build_imad(struct ac_llvm_context * ctx,LLVMValueRef s0,LLVMValueRef s1,LLVMValueRef s2)2446 LLVMValueRef ac_build_imad(struct ac_llvm_context *ctx, LLVMValueRef s0, LLVMValueRef s1,
2447                            LLVMValueRef s2)
2448 {
2449    return LLVMBuildAdd(ctx->builder, LLVMBuildMul(ctx->builder, s0, s1, ""), s2, "");
2450 }
2451 
ac_build_fmad(struct ac_llvm_context * ctx,LLVMValueRef s0,LLVMValueRef s1,LLVMValueRef s2)2452 LLVMValueRef ac_build_fmad(struct ac_llvm_context *ctx, LLVMValueRef s0, LLVMValueRef s1,
2453                            LLVMValueRef s2)
2454 {
2455    /* FMA is better on GFX10, because it has FMA units instead of MUL-ADD units. */
2456    if (ctx->chip_class >= GFX10) {
2457       return ac_build_intrinsic(ctx, "llvm.fma.f32", ctx->f32, (LLVMValueRef[]){s0, s1, s2}, 3,
2458                                 AC_FUNC_ATTR_READNONE);
2459    }
2460 
2461    return LLVMBuildFAdd(ctx->builder, LLVMBuildFMul(ctx->builder, s0, s1, ""), s2, "");
2462 }
2463 
ac_build_waitcnt(struct ac_llvm_context * ctx,unsigned wait_flags)2464 void ac_build_waitcnt(struct ac_llvm_context *ctx, unsigned wait_flags)
2465 {
2466    if (!wait_flags)
2467       return;
2468 
2469    unsigned lgkmcnt = 63;
2470    unsigned vmcnt = ctx->chip_class >= GFX9 ? 63 : 15;
2471    unsigned vscnt = 63;
2472 
2473    if (wait_flags & AC_WAIT_LGKM)
2474       lgkmcnt = 0;
2475    if (wait_flags & AC_WAIT_VLOAD)
2476       vmcnt = 0;
2477 
2478    if (wait_flags & AC_WAIT_VSTORE) {
2479       if (ctx->chip_class >= GFX10)
2480          vscnt = 0;
2481       else
2482          vmcnt = 0;
2483    }
2484 
2485    /* There is no intrinsic for vscnt(0), so use a fence. */
2486    if ((wait_flags & AC_WAIT_LGKM && wait_flags & AC_WAIT_VLOAD && wait_flags & AC_WAIT_VSTORE) ||
2487        vscnt == 0) {
2488       LLVMBuildFence(ctx->builder, LLVMAtomicOrderingRelease, false, "");
2489       return;
2490    }
2491 
2492    unsigned simm16 = (lgkmcnt << 8) | (7 << 4) | /* expcnt */
2493                      (vmcnt & 0xf) | ((vmcnt >> 4) << 14);
2494 
2495    LLVMValueRef args[1] = {
2496       LLVMConstInt(ctx->i32, simm16, false),
2497    };
2498    ac_build_intrinsic(ctx, "llvm.amdgcn.s.waitcnt", ctx->voidt, args, 1, 0);
2499 }
2500 
ac_build_fsat(struct ac_llvm_context * ctx,LLVMValueRef src,LLVMTypeRef type)2501 LLVMValueRef ac_build_fsat(struct ac_llvm_context *ctx, LLVMValueRef src,
2502                            LLVMTypeRef type)
2503 {
2504    unsigned bitsize = ac_get_elem_bits(ctx, type);
2505    LLVMValueRef zero = LLVMConstReal(type, 0.0);
2506    LLVMValueRef one = LLVMConstReal(type, 1.0);
2507    LLVMValueRef result;
2508 
2509    if (bitsize == 64 || (bitsize == 16 && ctx->chip_class <= GFX8) || type == ctx->v2f16) {
2510       /* Use fmin/fmax for 64-bit fsat or 16-bit on GFX6-GFX8 because LLVM
2511        * doesn't expose an intrinsic.
2512        */
2513       result = ac_build_fmin(ctx, ac_build_fmax(ctx, src, zero), one);
2514    } else {
2515       LLVMTypeRef type;
2516       char *intr;
2517 
2518       if (bitsize == 16) {
2519          intr = "llvm.amdgcn.fmed3.f16";
2520          type = ctx->f16;
2521       } else {
2522          assert(bitsize == 32);
2523          intr = "llvm.amdgcn.fmed3.f32";
2524          type = ctx->f32;
2525       }
2526 
2527       LLVMValueRef params[] = {
2528          zero,
2529          one,
2530          src,
2531       };
2532 
2533       result = ac_build_intrinsic(ctx, intr, type, params, 3,
2534                                   AC_FUNC_ATTR_READNONE);
2535    }
2536 
2537    if (ctx->chip_class < GFX9 && bitsize == 32) {
2538       /* Only pre-GFX9 chips do not flush denorms. */
2539       result = ac_build_canonicalize(ctx, result, bitsize);
2540    }
2541 
2542    return result;
2543 }
2544 
ac_build_fract(struct ac_llvm_context * ctx,LLVMValueRef src0,unsigned bitsize)2545 LLVMValueRef ac_build_fract(struct ac_llvm_context *ctx, LLVMValueRef src0, unsigned bitsize)
2546 {
2547    LLVMTypeRef type;
2548    char *intr;
2549 
2550    if (bitsize == 16) {
2551       intr = "llvm.amdgcn.fract.f16";
2552       type = ctx->f16;
2553    } else if (bitsize == 32) {
2554       intr = "llvm.amdgcn.fract.f32";
2555       type = ctx->f32;
2556    } else {
2557       intr = "llvm.amdgcn.fract.f64";
2558       type = ctx->f64;
2559    }
2560 
2561    LLVMValueRef params[] = {
2562       src0,
2563    };
2564    return ac_build_intrinsic(ctx, intr, type, params, 1, AC_FUNC_ATTR_READNONE);
2565 }
2566 
ac_const_uint_vec(struct ac_llvm_context * ctx,LLVMTypeRef type,uint64_t value)2567 LLVMValueRef ac_const_uint_vec(struct ac_llvm_context *ctx, LLVMTypeRef type, uint64_t value)
2568 {
2569 
2570    if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
2571       LLVMValueRef scalar = LLVMConstInt(LLVMGetElementType(type), value, 0);
2572       unsigned vec_size = LLVMGetVectorSize(type);
2573       LLVMValueRef *scalars = alloca(vec_size * sizeof(LLVMValueRef));
2574 
2575       for (unsigned i = 0; i < vec_size; i++)
2576          scalars[i] = scalar;
2577       return LLVMConstVector(scalars, vec_size);
2578    }
2579    return LLVMConstInt(type, value, 0);
2580 }
2581 
ac_build_isign(struct ac_llvm_context * ctx,LLVMValueRef src0)2582 LLVMValueRef ac_build_isign(struct ac_llvm_context *ctx, LLVMValueRef src0)
2583 {
2584    LLVMTypeRef type = LLVMTypeOf(src0);
2585    LLVMValueRef val;
2586 
2587    /* v_med3 is selected only when max is first. (LLVM bug?) */
2588    val = ac_build_imax(ctx, src0, ac_const_uint_vec(ctx, type, -1));
2589    return ac_build_imin(ctx, val, ac_const_uint_vec(ctx, type, 1));
2590 }
2591 
ac_eliminate_negative_zero(struct ac_llvm_context * ctx,LLVMValueRef val)2592 static LLVMValueRef ac_eliminate_negative_zero(struct ac_llvm_context *ctx, LLVMValueRef val)
2593 {
2594    ac_enable_signed_zeros(ctx);
2595    /* (val + 0) converts negative zero to positive zero. */
2596    val = LLVMBuildFAdd(ctx->builder, val, LLVMConstNull(LLVMTypeOf(val)), "");
2597    ac_disable_signed_zeros(ctx);
2598    return val;
2599 }
2600 
ac_build_fsign(struct ac_llvm_context * ctx,LLVMValueRef src)2601 LLVMValueRef ac_build_fsign(struct ac_llvm_context *ctx, LLVMValueRef src)
2602 {
2603    LLVMTypeRef type = LLVMTypeOf(src);
2604    LLVMValueRef pos, neg, dw[2], val;
2605    unsigned bitsize = ac_get_elem_bits(ctx, type);
2606 
2607    /* The standard version leads to this:
2608     *   v_cmp_ngt_f32_e64 s[0:1], s4, 0                       ; D40B0000 00010004
2609     *   v_cndmask_b32_e64 v4, 1.0, s4, s[0:1]                 ; D5010004 000008F2
2610     *   v_cmp_le_f32_e32 vcc, 0, v4                           ; 7C060880
2611     *   v_cndmask_b32_e32 v4, -1.0, v4, vcc                   ; 020808F3
2612     *
2613     * The isign version:
2614     *   v_add_f32_e64 v4, s4, 0                               ; D5030004 00010004
2615     *   v_med3_i32 v4, v4, -1, 1                              ; D5580004 02058304
2616     *   v_cvt_f32_i32_e32 v4, v4                              ; 7E080B04
2617     *
2618     * (src0 + 0) converts negative zero to positive zero.
2619     * After that, int(fsign(x)) == isign(floatBitsToInt(x)).
2620     *
2621     * For FP64, use the standard version, which doesn't suffer from the huge DP rate
2622     * reduction. (FP64 comparisons are as fast as int64 comparisons)
2623     */
2624    if (bitsize == 16 || bitsize == 32) {
2625       val = ac_to_integer(ctx, ac_eliminate_negative_zero(ctx, src));
2626       val = ac_build_isign(ctx, val);
2627       return LLVMBuildSIToFP(ctx->builder, val, type, "");
2628    }
2629 
2630    assert(bitsize == 64);
2631    pos = LLVMBuildFCmp(ctx->builder, LLVMRealOGT, src, ctx->f64_0, "");
2632    neg = LLVMBuildFCmp(ctx->builder, LLVMRealOLT, src, ctx->f64_0, "");
2633    dw[0] = ctx->i32_0;
2634    dw[1] = LLVMBuildSelect(
2635       ctx->builder, pos, LLVMConstInt(ctx->i32, 0x3FF00000, 0),
2636       LLVMBuildSelect(ctx->builder, neg, LLVMConstInt(ctx->i32, 0xBFF00000, 0), ctx->i32_0, ""),
2637       "");
2638    return LLVMBuildBitCast(ctx->builder, ac_build_gather_values(ctx, dw, 2), ctx->f64, "");
2639 }
2640 
ac_build_bit_count(struct ac_llvm_context * ctx,LLVMValueRef src0)2641 LLVMValueRef ac_build_bit_count(struct ac_llvm_context *ctx, LLVMValueRef src0)
2642 {
2643    LLVMValueRef result;
2644    unsigned bitsize;
2645 
2646    bitsize = ac_get_elem_bits(ctx, LLVMTypeOf(src0));
2647 
2648    switch (bitsize) {
2649    case 128:
2650       result = ac_build_intrinsic(ctx, "llvm.ctpop.i128", ctx->i128, (LLVMValueRef[]){src0}, 1,
2651                                   AC_FUNC_ATTR_READNONE);
2652       result = LLVMBuildTrunc(ctx->builder, result, ctx->i32, "");
2653       break;
2654    case 64:
2655       result = ac_build_intrinsic(ctx, "llvm.ctpop.i64", ctx->i64, (LLVMValueRef[]){src0}, 1,
2656                                   AC_FUNC_ATTR_READNONE);
2657 
2658       result = LLVMBuildTrunc(ctx->builder, result, ctx->i32, "");
2659       break;
2660    case 32:
2661       result = ac_build_intrinsic(ctx, "llvm.ctpop.i32", ctx->i32, (LLVMValueRef[]){src0}, 1,
2662                                   AC_FUNC_ATTR_READNONE);
2663       break;
2664    case 16:
2665       result = ac_build_intrinsic(ctx, "llvm.ctpop.i16", ctx->i16, (LLVMValueRef[]){src0}, 1,
2666                                   AC_FUNC_ATTR_READNONE);
2667 
2668       result = LLVMBuildZExt(ctx->builder, result, ctx->i32, "");
2669       break;
2670    case 8:
2671       result = ac_build_intrinsic(ctx, "llvm.ctpop.i8", ctx->i8, (LLVMValueRef[]){src0}, 1,
2672                                   AC_FUNC_ATTR_READNONE);
2673 
2674       result = LLVMBuildZExt(ctx->builder, result, ctx->i32, "");
2675       break;
2676    default:
2677       unreachable(!"invalid bitsize");
2678       break;
2679    }
2680 
2681    return result;
2682 }
2683 
ac_build_bitfield_reverse(struct ac_llvm_context * ctx,LLVMValueRef src0)2684 LLVMValueRef ac_build_bitfield_reverse(struct ac_llvm_context *ctx, LLVMValueRef src0)
2685 {
2686    LLVMValueRef result;
2687    unsigned bitsize;
2688 
2689    bitsize = ac_get_elem_bits(ctx, LLVMTypeOf(src0));
2690 
2691    switch (bitsize) {
2692    case 64:
2693       result = ac_build_intrinsic(ctx, "llvm.bitreverse.i64", ctx->i64, (LLVMValueRef[]){src0}, 1,
2694                                   AC_FUNC_ATTR_READNONE);
2695 
2696       result = LLVMBuildTrunc(ctx->builder, result, ctx->i32, "");
2697       break;
2698    case 32:
2699       result = ac_build_intrinsic(ctx, "llvm.bitreverse.i32", ctx->i32, (LLVMValueRef[]){src0}, 1,
2700                                   AC_FUNC_ATTR_READNONE);
2701       break;
2702    case 16:
2703       result = ac_build_intrinsic(ctx, "llvm.bitreverse.i16", ctx->i16, (LLVMValueRef[]){src0}, 1,
2704                                   AC_FUNC_ATTR_READNONE);
2705 
2706       result = LLVMBuildZExt(ctx->builder, result, ctx->i32, "");
2707       break;
2708    case 8:
2709       result = ac_build_intrinsic(ctx, "llvm.bitreverse.i8", ctx->i8, (LLVMValueRef[]){src0}, 1,
2710                                   AC_FUNC_ATTR_READNONE);
2711 
2712       result = LLVMBuildZExt(ctx->builder, result, ctx->i32, "");
2713       break;
2714    default:
2715       unreachable(!"invalid bitsize");
2716       break;
2717    }
2718 
2719    return result;
2720 }
2721 
2722 #define AC_EXP_TARGET           0
2723 #define AC_EXP_ENABLED_CHANNELS 1
2724 #define AC_EXP_OUT0             2
2725 
2726 enum ac_ir_type
2727 {
2728    AC_IR_UNDEF,
2729    AC_IR_CONST,
2730    AC_IR_VALUE,
2731 };
2732 
2733 struct ac_vs_exp_chan {
2734    LLVMValueRef value;
2735    float const_float;
2736    enum ac_ir_type type;
2737 };
2738 
2739 struct ac_vs_exp_inst {
2740    unsigned offset;
2741    LLVMValueRef inst;
2742    struct ac_vs_exp_chan chan[4];
2743 };
2744 
2745 struct ac_vs_exports {
2746    unsigned num;
2747    struct ac_vs_exp_inst exp[VARYING_SLOT_MAX];
2748 };
2749 
2750 /* Return true if the PARAM export has been eliminated. */
ac_eliminate_const_output(uint8_t * vs_output_param_offset,uint32_t num_outputs,struct ac_vs_exp_inst * exp)2751 static bool ac_eliminate_const_output(uint8_t *vs_output_param_offset, uint32_t num_outputs,
2752                                       struct ac_vs_exp_inst *exp)
2753 {
2754    unsigned i, default_val; /* SPI_PS_INPUT_CNTL_i.DEFAULT_VAL */
2755    bool is_zero[4] = {0}, is_one[4] = {0};
2756 
2757    for (i = 0; i < 4; i++) {
2758       /* It's a constant expression. Undef outputs are eliminated too. */
2759       if (exp->chan[i].type == AC_IR_UNDEF) {
2760          is_zero[i] = true;
2761          is_one[i] = true;
2762       } else if (exp->chan[i].type == AC_IR_CONST) {
2763          if (exp->chan[i].const_float == 0)
2764             is_zero[i] = true;
2765          else if (exp->chan[i].const_float == 1)
2766             is_one[i] = true;
2767          else
2768             return false; /* other constant */
2769       } else
2770          return false;
2771    }
2772 
2773    /* Only certain combinations of 0 and 1 can be eliminated. */
2774    if (is_zero[0] && is_zero[1] && is_zero[2])
2775       default_val = is_zero[3] ? 0 : 1;
2776    else if (is_one[0] && is_one[1] && is_one[2])
2777       default_val = is_zero[3] ? 2 : 3;
2778    else
2779       return false;
2780 
2781    /* The PARAM export can be represented as DEFAULT_VAL. Kill it. */
2782    LLVMInstructionEraseFromParent(exp->inst);
2783 
2784    /* Change OFFSET to DEFAULT_VAL. */
2785    for (i = 0; i < num_outputs; i++) {
2786       if (vs_output_param_offset[i] == exp->offset) {
2787          vs_output_param_offset[i] = AC_EXP_PARAM_DEFAULT_VAL_0000 + default_val;
2788          break;
2789       }
2790    }
2791    return true;
2792 }
2793 
ac_eliminate_duplicated_output(struct ac_llvm_context * ctx,uint8_t * vs_output_param_offset,uint32_t num_outputs,struct ac_vs_exports * processed,struct ac_vs_exp_inst * exp)2794 static bool ac_eliminate_duplicated_output(struct ac_llvm_context *ctx,
2795                                            uint8_t *vs_output_param_offset, uint32_t num_outputs,
2796                                            struct ac_vs_exports *processed,
2797                                            struct ac_vs_exp_inst *exp)
2798 {
2799    unsigned p, copy_back_channels = 0;
2800 
2801    /* See if the output is already in the list of processed outputs.
2802     * The LLVMValueRef comparison relies on SSA.
2803     */
2804    for (p = 0; p < processed->num; p++) {
2805       bool different = false;
2806 
2807       for (unsigned j = 0; j < 4; j++) {
2808          struct ac_vs_exp_chan *c1 = &processed->exp[p].chan[j];
2809          struct ac_vs_exp_chan *c2 = &exp->chan[j];
2810 
2811          /* Treat undef as a match. */
2812          if (c2->type == AC_IR_UNDEF)
2813             continue;
2814 
2815          /* If c1 is undef but c2 isn't, we can copy c2 to c1
2816           * and consider the instruction duplicated.
2817           */
2818          if (c1->type == AC_IR_UNDEF) {
2819             copy_back_channels |= 1 << j;
2820             continue;
2821          }
2822 
2823          /* Test whether the channels are not equal. */
2824          if (c1->type != c2->type ||
2825              (c1->type == AC_IR_CONST && c1->const_float != c2->const_float) ||
2826              (c1->type == AC_IR_VALUE && c1->value != c2->value)) {
2827             different = true;
2828             break;
2829          }
2830       }
2831       if (!different)
2832          break;
2833 
2834       copy_back_channels = 0;
2835    }
2836    if (p == processed->num)
2837       return false;
2838 
2839    /* If a match was found, but the matching export has undef where the new
2840     * one has a normal value, copy the normal value to the undef channel.
2841     */
2842    struct ac_vs_exp_inst *match = &processed->exp[p];
2843 
2844    /* Get current enabled channels mask. */
2845    LLVMValueRef arg = LLVMGetOperand(match->inst, AC_EXP_ENABLED_CHANNELS);
2846    unsigned enabled_channels = LLVMConstIntGetZExtValue(arg);
2847 
2848    while (copy_back_channels) {
2849       unsigned chan = u_bit_scan(&copy_back_channels);
2850 
2851       assert(match->chan[chan].type == AC_IR_UNDEF);
2852       LLVMSetOperand(match->inst, AC_EXP_OUT0 + chan, exp->chan[chan].value);
2853       match->chan[chan] = exp->chan[chan];
2854 
2855       /* Update number of enabled channels because the original mask
2856        * is not always 0xf.
2857        */
2858       enabled_channels |= (1 << chan);
2859       LLVMSetOperand(match->inst, AC_EXP_ENABLED_CHANNELS,
2860                      LLVMConstInt(ctx->i32, enabled_channels, 0));
2861    }
2862 
2863    /* The PARAM export is duplicated. Kill it. */
2864    LLVMInstructionEraseFromParent(exp->inst);
2865 
2866    /* Change OFFSET to the matching export. */
2867    for (unsigned i = 0; i < num_outputs; i++) {
2868       if (vs_output_param_offset[i] == exp->offset) {
2869          vs_output_param_offset[i] = match->offset;
2870          break;
2871       }
2872    }
2873    return true;
2874 }
2875 
ac_optimize_vs_outputs(struct ac_llvm_context * ctx,LLVMValueRef main_fn,uint8_t * vs_output_param_offset,uint32_t num_outputs,uint32_t skip_output_mask,uint8_t * num_param_exports)2876 void ac_optimize_vs_outputs(struct ac_llvm_context *ctx, LLVMValueRef main_fn,
2877                             uint8_t *vs_output_param_offset, uint32_t num_outputs,
2878                             uint32_t skip_output_mask, uint8_t *num_param_exports)
2879 {
2880    LLVMBasicBlockRef bb;
2881    bool removed_any = false;
2882    struct ac_vs_exports exports;
2883 
2884    exports.num = 0;
2885 
2886    /* Process all LLVM instructions. */
2887    bb = LLVMGetFirstBasicBlock(main_fn);
2888    while (bb) {
2889       LLVMValueRef inst = LLVMGetFirstInstruction(bb);
2890 
2891       while (inst) {
2892          LLVMValueRef cur = inst;
2893          inst = LLVMGetNextInstruction(inst);
2894          struct ac_vs_exp_inst exp;
2895 
2896          if (LLVMGetInstructionOpcode(cur) != LLVMCall)
2897             continue;
2898 
2899          LLVMValueRef callee = ac_llvm_get_called_value(cur);
2900 
2901          if (!ac_llvm_is_function(callee))
2902             continue;
2903 
2904          const char *name = LLVMGetValueName(callee);
2905          unsigned num_args = LLVMCountParams(callee);
2906 
2907          /* Check if this is an export instruction. */
2908          if ((num_args != 9 && num_args != 8) ||
2909              (strcmp(name, "llvm.SI.export") && strcmp(name, "llvm.amdgcn.exp.f32")))
2910             continue;
2911 
2912          LLVMValueRef arg = LLVMGetOperand(cur, AC_EXP_TARGET);
2913          unsigned target = LLVMConstIntGetZExtValue(arg);
2914 
2915          if (target < V_008DFC_SQ_EXP_PARAM)
2916             continue;
2917 
2918          target -= V_008DFC_SQ_EXP_PARAM;
2919 
2920          /* Parse the instruction. */
2921          memset(&exp, 0, sizeof(exp));
2922          exp.offset = target;
2923          exp.inst = cur;
2924 
2925          for (unsigned i = 0; i < 4; i++) {
2926             LLVMValueRef v = LLVMGetOperand(cur, AC_EXP_OUT0 + i);
2927 
2928             exp.chan[i].value = v;
2929 
2930             if (LLVMIsUndef(v)) {
2931                exp.chan[i].type = AC_IR_UNDEF;
2932             } else if (LLVMIsAConstantFP(v)) {
2933                LLVMBool loses_info;
2934                exp.chan[i].type = AC_IR_CONST;
2935                exp.chan[i].const_float = LLVMConstRealGetDouble(v, &loses_info);
2936             } else {
2937                exp.chan[i].type = AC_IR_VALUE;
2938             }
2939          }
2940 
2941          /* Eliminate constant and duplicated PARAM exports. */
2942          if (!((1u << target) & skip_output_mask) &&
2943              (ac_eliminate_const_output(vs_output_param_offset, num_outputs, &exp) ||
2944               ac_eliminate_duplicated_output(ctx, vs_output_param_offset, num_outputs, &exports,
2945                                              &exp))) {
2946             removed_any = true;
2947          } else {
2948             exports.exp[exports.num++] = exp;
2949          }
2950       }
2951       bb = LLVMGetNextBasicBlock(bb);
2952    }
2953 
2954    /* Remove holes in export memory due to removed PARAM exports.
2955     * This is done by renumbering all PARAM exports.
2956     */
2957    if (removed_any) {
2958       uint8_t old_offset[VARYING_SLOT_MAX];
2959       unsigned out, i;
2960 
2961       /* Make a copy of the offsets. We need the old version while
2962        * we are modifying some of them. */
2963       memcpy(old_offset, vs_output_param_offset, sizeof(old_offset));
2964 
2965       for (i = 0; i < exports.num; i++) {
2966          unsigned offset = exports.exp[i].offset;
2967 
2968          /* Update vs_output_param_offset. Multiple outputs can
2969           * have the same offset.
2970           */
2971          for (out = 0; out < num_outputs; out++) {
2972             if (old_offset[out] == offset)
2973                vs_output_param_offset[out] = i;
2974          }
2975 
2976          /* Change the PARAM offset in the instruction. */
2977          LLVMSetOperand(exports.exp[i].inst, AC_EXP_TARGET,
2978                         LLVMConstInt(ctx->i32, V_008DFC_SQ_EXP_PARAM + i, 0));
2979       }
2980       *num_param_exports = exports.num;
2981    }
2982 }
2983 
ac_init_exec_full_mask(struct ac_llvm_context * ctx)2984 void ac_init_exec_full_mask(struct ac_llvm_context *ctx)
2985 {
2986    LLVMValueRef full_mask = LLVMConstInt(ctx->i64, ~0ull, 0);
2987    ac_build_intrinsic(ctx, "llvm.amdgcn.init.exec", ctx->voidt, &full_mask, 1,
2988                       AC_FUNC_ATTR_CONVERGENT);
2989 }
2990 
ac_declare_lds_as_pointer(struct ac_llvm_context * ctx)2991 void ac_declare_lds_as_pointer(struct ac_llvm_context *ctx)
2992 {
2993    unsigned lds_size = ctx->chip_class >= GFX7 ? 65536 : 32768;
2994    ctx->lds = LLVMBuildIntToPtr(
2995       ctx->builder, ctx->i32_0,
2996       LLVMPointerType(LLVMArrayType(ctx->i32, lds_size / 4), AC_ADDR_SPACE_LDS), "lds");
2997 }
2998 
ac_lds_load(struct ac_llvm_context * ctx,LLVMValueRef dw_addr)2999 LLVMValueRef ac_lds_load(struct ac_llvm_context *ctx, LLVMValueRef dw_addr)
3000 {
3001    return LLVMBuildLoad(ctx->builder, ac_build_gep0(ctx, ctx->lds, dw_addr), "");
3002 }
3003 
ac_lds_store(struct ac_llvm_context * ctx,LLVMValueRef dw_addr,LLVMValueRef value)3004 void ac_lds_store(struct ac_llvm_context *ctx, LLVMValueRef dw_addr, LLVMValueRef value)
3005 {
3006    value = ac_to_integer(ctx, value);
3007    ac_build_indexed_store(ctx, ctx->lds, dw_addr, value);
3008 }
3009 
ac_find_lsb(struct ac_llvm_context * ctx,LLVMTypeRef dst_type,LLVMValueRef src0)3010 LLVMValueRef ac_find_lsb(struct ac_llvm_context *ctx, LLVMTypeRef dst_type, LLVMValueRef src0)
3011 {
3012    unsigned src0_bitsize = ac_get_elem_bits(ctx, LLVMTypeOf(src0));
3013    const char *intrin_name;
3014    LLVMTypeRef type;
3015    LLVMValueRef zero;
3016 
3017    switch (src0_bitsize) {
3018    case 64:
3019       intrin_name = "llvm.cttz.i64";
3020       type = ctx->i64;
3021       zero = ctx->i64_0;
3022       break;
3023    case 32:
3024       intrin_name = "llvm.cttz.i32";
3025       type = ctx->i32;
3026       zero = ctx->i32_0;
3027       break;
3028    case 16:
3029       intrin_name = "llvm.cttz.i16";
3030       type = ctx->i16;
3031       zero = ctx->i16_0;
3032       break;
3033    case 8:
3034       intrin_name = "llvm.cttz.i8";
3035       type = ctx->i8;
3036       zero = ctx->i8_0;
3037       break;
3038    default:
3039       unreachable(!"invalid bitsize");
3040    }
3041 
3042    LLVMValueRef params[2] = {
3043       src0,
3044 
3045       /* The value of 1 means that ffs(x=0) = undef, so LLVM won't
3046        * add special code to check for x=0. The reason is that
3047        * the LLVM behavior for x=0 is different from what we
3048        * need here. However, LLVM also assumes that ffs(x) is
3049        * in [0, 31], but GLSL expects that ffs(0) = -1, so
3050        * a conditional assignment to handle 0 is still required.
3051        *
3052        * The hardware already implements the correct behavior.
3053        */
3054       ctx->i1true,
3055    };
3056 
3057    LLVMValueRef lsb = ac_build_intrinsic(ctx, intrin_name, type, params, 2, AC_FUNC_ATTR_READNONE);
3058 
3059    if (src0_bitsize == 64) {
3060       lsb = LLVMBuildTrunc(ctx->builder, lsb, ctx->i32, "");
3061    } else if (src0_bitsize < 32) {
3062       lsb = LLVMBuildSExt(ctx->builder, lsb, ctx->i32, "");
3063    }
3064 
3065    /* TODO: We need an intrinsic to skip this conditional. */
3066    /* Check for zero: */
3067    return LLVMBuildSelect(ctx->builder, LLVMBuildICmp(ctx->builder, LLVMIntEQ, src0, zero, ""),
3068                           LLVMConstInt(ctx->i32, -1, 0), lsb, "");
3069 }
3070 
ac_array_in_const_addr_space(LLVMTypeRef elem_type)3071 LLVMTypeRef ac_array_in_const_addr_space(LLVMTypeRef elem_type)
3072 {
3073    return LLVMPointerType(elem_type, AC_ADDR_SPACE_CONST);
3074 }
3075 
ac_array_in_const32_addr_space(LLVMTypeRef elem_type)3076 LLVMTypeRef ac_array_in_const32_addr_space(LLVMTypeRef elem_type)
3077 {
3078    return LLVMPointerType(elem_type, AC_ADDR_SPACE_CONST_32BIT);
3079 }
3080 
get_current_flow(struct ac_llvm_context * ctx)3081 static struct ac_llvm_flow *get_current_flow(struct ac_llvm_context *ctx)
3082 {
3083    if (ctx->flow->depth > 0)
3084       return &ctx->flow->stack[ctx->flow->depth - 1];
3085    return NULL;
3086 }
3087 
get_innermost_loop(struct ac_llvm_context * ctx)3088 static struct ac_llvm_flow *get_innermost_loop(struct ac_llvm_context *ctx)
3089 {
3090    for (unsigned i = ctx->flow->depth; i > 0; --i) {
3091       if (ctx->flow->stack[i - 1].loop_entry_block)
3092          return &ctx->flow->stack[i - 1];
3093    }
3094    return NULL;
3095 }
3096 
push_flow(struct ac_llvm_context * ctx)3097 static struct ac_llvm_flow *push_flow(struct ac_llvm_context *ctx)
3098 {
3099    struct ac_llvm_flow *flow;
3100 
3101    if (ctx->flow->depth >= ctx->flow->depth_max) {
3102       unsigned new_max = MAX2(ctx->flow->depth << 1, AC_LLVM_INITIAL_CF_DEPTH);
3103 
3104       ctx->flow->stack = realloc(ctx->flow->stack, new_max * sizeof(*ctx->flow->stack));
3105       ctx->flow->depth_max = new_max;
3106    }
3107 
3108    flow = &ctx->flow->stack[ctx->flow->depth];
3109    ctx->flow->depth++;
3110 
3111    flow->next_block = NULL;
3112    flow->loop_entry_block = NULL;
3113    return flow;
3114 }
3115 
set_basicblock_name(LLVMBasicBlockRef bb,const char * base,int label_id)3116 static void set_basicblock_name(LLVMBasicBlockRef bb, const char *base, int label_id)
3117 {
3118    char buf[32];
3119    snprintf(buf, sizeof(buf), "%s%d", base, label_id);
3120    LLVMSetValueName(LLVMBasicBlockAsValue(bb), buf);
3121 }
3122 
3123 /* Append a basic block at the level of the parent flow.
3124  */
append_basic_block(struct ac_llvm_context * ctx,const char * name)3125 static LLVMBasicBlockRef append_basic_block(struct ac_llvm_context *ctx, const char *name)
3126 {
3127    assert(ctx->flow->depth >= 1);
3128 
3129    if (ctx->flow->depth >= 2) {
3130       struct ac_llvm_flow *flow = &ctx->flow->stack[ctx->flow->depth - 2];
3131 
3132       return LLVMInsertBasicBlockInContext(ctx->context, flow->next_block, name);
3133    }
3134 
3135    LLVMValueRef main_fn = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx->builder));
3136    return LLVMAppendBasicBlockInContext(ctx->context, main_fn, name);
3137 }
3138 
3139 /* Emit a branch to the given default target for the current block if
3140  * applicable -- that is, if the current block does not already contain a
3141  * branch from a break or continue.
3142  */
emit_default_branch(LLVMBuilderRef builder,LLVMBasicBlockRef target)3143 static void emit_default_branch(LLVMBuilderRef builder, LLVMBasicBlockRef target)
3144 {
3145    if (!LLVMGetBasicBlockTerminator(LLVMGetInsertBlock(builder)))
3146       LLVMBuildBr(builder, target);
3147 }
3148 
ac_build_bgnloop(struct ac_llvm_context * ctx,int label_id)3149 void ac_build_bgnloop(struct ac_llvm_context *ctx, int label_id)
3150 {
3151    struct ac_llvm_flow *flow = push_flow(ctx);
3152    flow->loop_entry_block = append_basic_block(ctx, "LOOP");
3153    flow->next_block = append_basic_block(ctx, "ENDLOOP");
3154    set_basicblock_name(flow->loop_entry_block, "loop", label_id);
3155    LLVMBuildBr(ctx->builder, flow->loop_entry_block);
3156    LLVMPositionBuilderAtEnd(ctx->builder, flow->loop_entry_block);
3157 }
3158 
ac_build_break(struct ac_llvm_context * ctx)3159 void ac_build_break(struct ac_llvm_context *ctx)
3160 {
3161    struct ac_llvm_flow *flow = get_innermost_loop(ctx);
3162    LLVMBuildBr(ctx->builder, flow->next_block);
3163 }
3164 
ac_build_continue(struct ac_llvm_context * ctx)3165 void ac_build_continue(struct ac_llvm_context *ctx)
3166 {
3167    struct ac_llvm_flow *flow = get_innermost_loop(ctx);
3168    LLVMBuildBr(ctx->builder, flow->loop_entry_block);
3169 }
3170 
ac_build_else(struct ac_llvm_context * ctx,int label_id)3171 void ac_build_else(struct ac_llvm_context *ctx, int label_id)
3172 {
3173    struct ac_llvm_flow *current_branch = get_current_flow(ctx);
3174    LLVMBasicBlockRef endif_block;
3175 
3176    assert(!current_branch->loop_entry_block);
3177 
3178    endif_block = append_basic_block(ctx, "ENDIF");
3179    emit_default_branch(ctx->builder, endif_block);
3180 
3181    LLVMPositionBuilderAtEnd(ctx->builder, current_branch->next_block);
3182    set_basicblock_name(current_branch->next_block, "else", label_id);
3183 
3184    current_branch->next_block = endif_block;
3185 }
3186 
3187 /* Invoked after a branch is exited. */
ac_branch_exited(struct ac_llvm_context * ctx)3188 static void ac_branch_exited(struct ac_llvm_context *ctx)
3189 {
3190    if (ctx->flow->depth == 0 && ctx->conditional_demote_seen) {
3191       /* The previous conditional branch contained demote. Kill threads
3192        * after all conditional blocks because amdgcn.wqm.vote doesn't
3193        * return usable values inside the blocks.
3194        *
3195        * This is an optional optimization that only kills whole inactive quads.
3196        */
3197       LLVMValueRef cond = LLVMBuildLoad(ctx->builder, ctx->postponed_kill, "");
3198       ac_build_kill_if_false(ctx, ac_build_wqm_vote(ctx, cond));
3199       ctx->conditional_demote_seen = false;
3200    }
3201 }
3202 
ac_build_endif(struct ac_llvm_context * ctx,int label_id)3203 void ac_build_endif(struct ac_llvm_context *ctx, int label_id)
3204 {
3205    struct ac_llvm_flow *current_branch = get_current_flow(ctx);
3206 
3207    assert(!current_branch->loop_entry_block);
3208 
3209    emit_default_branch(ctx->builder, current_branch->next_block);
3210    LLVMPositionBuilderAtEnd(ctx->builder, current_branch->next_block);
3211    set_basicblock_name(current_branch->next_block, "endif", label_id);
3212 
3213    ctx->flow->depth--;
3214    ac_branch_exited(ctx);
3215 }
3216 
ac_build_endloop(struct ac_llvm_context * ctx,int label_id)3217 void ac_build_endloop(struct ac_llvm_context *ctx, int label_id)
3218 {
3219    struct ac_llvm_flow *current_loop = get_current_flow(ctx);
3220 
3221    assert(current_loop->loop_entry_block);
3222 
3223    emit_default_branch(ctx->builder, current_loop->loop_entry_block);
3224 
3225    LLVMPositionBuilderAtEnd(ctx->builder, current_loop->next_block);
3226    set_basicblock_name(current_loop->next_block, "endloop", label_id);
3227    ctx->flow->depth--;
3228    ac_branch_exited(ctx);
3229 }
3230 
ac_build_ifcc(struct ac_llvm_context * ctx,LLVMValueRef cond,int label_id)3231 void ac_build_ifcc(struct ac_llvm_context *ctx, LLVMValueRef cond, int label_id)
3232 {
3233    struct ac_llvm_flow *flow = push_flow(ctx);
3234    LLVMBasicBlockRef if_block;
3235 
3236    if_block = append_basic_block(ctx, "IF");
3237    flow->next_block = append_basic_block(ctx, "ELSE");
3238    set_basicblock_name(if_block, "if", label_id);
3239    LLVMBuildCondBr(ctx->builder, cond, if_block, flow->next_block);
3240    LLVMPositionBuilderAtEnd(ctx->builder, if_block);
3241 }
3242 
ac_build_alloca_undef(struct ac_llvm_context * ac,LLVMTypeRef type,const char * name)3243 LLVMValueRef ac_build_alloca_undef(struct ac_llvm_context *ac, LLVMTypeRef type, const char *name)
3244 {
3245    LLVMBuilderRef builder = ac->builder;
3246    LLVMBasicBlockRef current_block = LLVMGetInsertBlock(builder);
3247    LLVMValueRef function = LLVMGetBasicBlockParent(current_block);
3248    LLVMBasicBlockRef first_block = LLVMGetEntryBasicBlock(function);
3249    LLVMValueRef first_instr = LLVMGetFirstInstruction(first_block);
3250    LLVMBuilderRef first_builder = LLVMCreateBuilderInContext(ac->context);
3251    LLVMValueRef res;
3252 
3253    if (first_instr) {
3254       LLVMPositionBuilderBefore(first_builder, first_instr);
3255    } else {
3256       LLVMPositionBuilderAtEnd(first_builder, first_block);
3257    }
3258 
3259    res = LLVMBuildAlloca(first_builder, type, name);
3260    LLVMDisposeBuilder(first_builder);
3261    return res;
3262 }
3263 
ac_build_alloca(struct ac_llvm_context * ac,LLVMTypeRef type,const char * name)3264 LLVMValueRef ac_build_alloca(struct ac_llvm_context *ac, LLVMTypeRef type, const char *name)
3265 {
3266    LLVMValueRef ptr = ac_build_alloca_undef(ac, type, name);
3267    LLVMBuildStore(ac->builder, LLVMConstNull(type), ptr);
3268    return ptr;
3269 }
3270 
ac_build_alloca_init(struct ac_llvm_context * ac,LLVMValueRef val,const char * name)3271 LLVMValueRef ac_build_alloca_init(struct ac_llvm_context *ac, LLVMValueRef val, const char *name)
3272 {
3273    LLVMValueRef ptr = ac_build_alloca_undef(ac, LLVMTypeOf(val), name);
3274    LLVMBuildStore(ac->builder, val, ptr);
3275    return ptr;
3276 }
3277 
ac_cast_ptr(struct ac_llvm_context * ctx,LLVMValueRef ptr,LLVMTypeRef type)3278 LLVMValueRef ac_cast_ptr(struct ac_llvm_context *ctx, LLVMValueRef ptr, LLVMTypeRef type)
3279 {
3280    int addr_space = LLVMGetPointerAddressSpace(LLVMTypeOf(ptr));
3281    return LLVMBuildBitCast(ctx->builder, ptr, LLVMPointerType(type, addr_space), "");
3282 }
3283 
ac_trim_vector(struct ac_llvm_context * ctx,LLVMValueRef value,unsigned count)3284 LLVMValueRef ac_trim_vector(struct ac_llvm_context *ctx, LLVMValueRef value, unsigned count)
3285 {
3286    unsigned num_components = ac_get_llvm_num_components(value);
3287    if (count == num_components)
3288       return value;
3289 
3290    LLVMValueRef *const masks = alloca(MAX2(count, 2) * sizeof(LLVMValueRef));
3291    masks[0] = ctx->i32_0;
3292    masks[1] = ctx->i32_1;
3293    for (unsigned i = 2; i < count; i++)
3294       masks[i] = LLVMConstInt(ctx->i32, i, false);
3295 
3296    if (count == 1)
3297       return LLVMBuildExtractElement(ctx->builder, value, masks[0], "");
3298 
3299    LLVMValueRef swizzle = LLVMConstVector(masks, count);
3300    return LLVMBuildShuffleVector(ctx->builder, value, value, swizzle, "");
3301 }
3302 
3303 /* If param is i64 and bitwidth <= 32, the return value will be i32. */
ac_unpack_param(struct ac_llvm_context * ctx,LLVMValueRef param,unsigned rshift,unsigned bitwidth)3304 LLVMValueRef ac_unpack_param(struct ac_llvm_context *ctx, LLVMValueRef param, unsigned rshift,
3305                              unsigned bitwidth)
3306 {
3307    LLVMValueRef value = param;
3308    if (rshift)
3309       value = LLVMBuildLShr(ctx->builder, value, LLVMConstInt(LLVMTypeOf(param), rshift, false), "");
3310 
3311    if (rshift + bitwidth < 32) {
3312       uint64_t mask = (1ull << bitwidth) - 1;
3313       value = LLVMBuildAnd(ctx->builder, value, LLVMConstInt(LLVMTypeOf(param), mask, false), "");
3314    }
3315 
3316    if (bitwidth <= 32 && LLVMTypeOf(param) == ctx->i64)
3317       value = LLVMBuildTrunc(ctx->builder, value, ctx->i32, "");
3318    return value;
3319 }
3320 
3321 /* Adjust the sample index according to FMASK.
3322  *
3323  * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
3324  * which is the identity mapping. Each nibble says which physical sample
3325  * should be fetched to get that sample.
3326  *
3327  * For example, 0x11111100 means there are only 2 samples stored and
3328  * the second sample covers 3/4 of the pixel. When reading samples 0
3329  * and 1, return physical sample 0 (determined by the first two 0s
3330  * in FMASK), otherwise return physical sample 1.
3331  *
3332  * The sample index should be adjusted as follows:
3333  *   addr[sample_index] = (fmask >> (addr[sample_index] * 4)) & 0xF;
3334  */
ac_apply_fmask_to_sample(struct ac_llvm_context * ac,LLVMValueRef fmask,LLVMValueRef * addr,bool is_array_tex)3335 void ac_apply_fmask_to_sample(struct ac_llvm_context *ac, LLVMValueRef fmask, LLVMValueRef *addr,
3336                               bool is_array_tex)
3337 {
3338    struct ac_image_args fmask_load = {0};
3339    fmask_load.opcode = ac_image_load;
3340    fmask_load.resource = fmask;
3341    fmask_load.dmask = 0xf;
3342    fmask_load.dim = is_array_tex ? ac_image_2darray : ac_image_2d;
3343    fmask_load.attributes = AC_FUNC_ATTR_READNONE;
3344 
3345    fmask_load.coords[0] = addr[0];
3346    fmask_load.coords[1] = addr[1];
3347    if (is_array_tex)
3348       fmask_load.coords[2] = addr[2];
3349    fmask_load.a16 = ac_get_elem_bits(ac, LLVMTypeOf(addr[0])) == 16;
3350 
3351    LLVMValueRef fmask_value = ac_build_image_opcode(ac, &fmask_load);
3352    fmask_value = LLVMBuildExtractElement(ac->builder, fmask_value, ac->i32_0, "");
3353 
3354    /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
3355     * resource descriptor is 0 (invalid).
3356     */
3357    LLVMValueRef tmp;
3358    tmp = LLVMBuildBitCast(ac->builder, fmask, ac->v8i32, "");
3359    tmp = LLVMBuildExtractElement(ac->builder, tmp, ac->i32_1, "");
3360    tmp = LLVMBuildICmp(ac->builder, LLVMIntNE, tmp, ac->i32_0, "");
3361    fmask_value =
3362       LLVMBuildSelect(ac->builder, tmp, fmask_value, LLVMConstInt(ac->i32, 0x76543210, false), "");
3363 
3364    /* Apply the formula. */
3365    unsigned sample_chan = is_array_tex ? 3 : 2;
3366    LLVMValueRef final_sample;
3367    final_sample = LLVMBuildMul(ac->builder, addr[sample_chan],
3368                                LLVMConstInt(LLVMTypeOf(addr[0]), 4, 0), "");
3369    final_sample = LLVMBuildLShr(ac->builder, fmask_value,
3370                                 LLVMBuildZExt(ac->builder, final_sample, ac->i32, ""), "");
3371    /* Mask the sample index by 0x7, because 0x8 means an unknown value
3372     * with EQAA, so those will map to 0. */
3373    addr[sample_chan] = LLVMBuildAnd(ac->builder, final_sample, LLVMConstInt(ac->i32, 0x7, 0), "");
3374    if (fmask_load.a16)
3375       addr[sample_chan] = LLVMBuildTrunc(ac->builder, final_sample, ac->i16, "");
3376 }
3377 
_ac_build_readlane(struct ac_llvm_context * ctx,LLVMValueRef src,LLVMValueRef lane,bool with_opt_barrier)3378 static LLVMValueRef _ac_build_readlane(struct ac_llvm_context *ctx, LLVMValueRef src,
3379                                        LLVMValueRef lane, bool with_opt_barrier)
3380 {
3381    LLVMTypeRef type = LLVMTypeOf(src);
3382    LLVMValueRef result;
3383 
3384    if (with_opt_barrier)
3385       ac_build_optimization_barrier(ctx, &src, false);
3386 
3387    src = LLVMBuildZExt(ctx->builder, src, ctx->i32, "");
3388    if (lane)
3389       lane = LLVMBuildZExt(ctx->builder, lane, ctx->i32, "");
3390 
3391    result =
3392       ac_build_intrinsic(ctx, lane == NULL ? "llvm.amdgcn.readfirstlane" : "llvm.amdgcn.readlane",
3393                          ctx->i32, (LLVMValueRef[]){src, lane}, lane == NULL ? 1 : 2,
3394                          AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
3395 
3396    return LLVMBuildTrunc(ctx->builder, result, type, "");
3397 }
3398 
ac_build_readlane_common(struct ac_llvm_context * ctx,LLVMValueRef src,LLVMValueRef lane,bool with_opt_barrier)3399 static LLVMValueRef ac_build_readlane_common(struct ac_llvm_context *ctx, LLVMValueRef src,
3400                                              LLVMValueRef lane, bool with_opt_barrier)
3401 {
3402    LLVMTypeRef src_type = LLVMTypeOf(src);
3403    src = ac_to_integer(ctx, src);
3404    unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(src));
3405    LLVMValueRef ret;
3406 
3407    if (bits > 32) {
3408       assert(bits % 32 == 0);
3409       LLVMTypeRef vec_type = LLVMVectorType(ctx->i32, bits / 32);
3410       LLVMValueRef src_vector = LLVMBuildBitCast(ctx->builder, src, vec_type, "");
3411       ret = LLVMGetUndef(vec_type);
3412       for (unsigned i = 0; i < bits / 32; i++) {
3413          LLVMValueRef ret_comp;
3414 
3415          src = LLVMBuildExtractElement(ctx->builder, src_vector, LLVMConstInt(ctx->i32, i, 0), "");
3416 
3417          ret_comp = _ac_build_readlane(ctx, src, lane, with_opt_barrier);
3418 
3419          ret =
3420             LLVMBuildInsertElement(ctx->builder, ret, ret_comp, LLVMConstInt(ctx->i32, i, 0), "");
3421       }
3422    } else {
3423       ret = _ac_build_readlane(ctx, src, lane, with_opt_barrier);
3424    }
3425 
3426    if (LLVMGetTypeKind(src_type) == LLVMPointerTypeKind)
3427       return LLVMBuildIntToPtr(ctx->builder, ret, src_type, "");
3428    return LLVMBuildBitCast(ctx->builder, ret, src_type, "");
3429 }
3430 
3431 /**
3432  * Builds the "llvm.amdgcn.readlane" or "llvm.amdgcn.readfirstlane" intrinsic.
3433  *
3434  * The optimization barrier is not needed if the value is the same in all lanes
3435  * or if this is called in the outermost block.
3436  *
3437  * @param ctx
3438  * @param src
3439  * @param lane - id of the lane or NULL for the first active lane
3440  * @return value of the lane
3441  */
ac_build_readlane_no_opt_barrier(struct ac_llvm_context * ctx,LLVMValueRef src,LLVMValueRef lane)3442 LLVMValueRef ac_build_readlane_no_opt_barrier(struct ac_llvm_context *ctx, LLVMValueRef src,
3443                                               LLVMValueRef lane)
3444 {
3445    return ac_build_readlane_common(ctx, src, lane, false);
3446 }
3447 
ac_build_readlane(struct ac_llvm_context * ctx,LLVMValueRef src,LLVMValueRef lane)3448 LLVMValueRef ac_build_readlane(struct ac_llvm_context *ctx, LLVMValueRef src, LLVMValueRef lane)
3449 {
3450    return ac_build_readlane_common(ctx, src, lane, true);
3451 }
3452 
ac_build_writelane(struct ac_llvm_context * ctx,LLVMValueRef src,LLVMValueRef value,LLVMValueRef lane)3453 LLVMValueRef ac_build_writelane(struct ac_llvm_context *ctx, LLVMValueRef src, LLVMValueRef value,
3454                                 LLVMValueRef lane)
3455 {
3456    return ac_build_intrinsic(ctx, "llvm.amdgcn.writelane", ctx->i32,
3457                              (LLVMValueRef[]){value, lane, src}, 3,
3458                              AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
3459 }
3460 
ac_build_mbcnt_add(struct ac_llvm_context * ctx,LLVMValueRef mask,LLVMValueRef add_src)3461 LLVMValueRef ac_build_mbcnt_add(struct ac_llvm_context *ctx, LLVMValueRef mask, LLVMValueRef add_src)
3462 {
3463    if (ctx->wave_size == 32) {
3464       LLVMValueRef val = ac_build_intrinsic(ctx, "llvm.amdgcn.mbcnt.lo", ctx->i32,
3465                                 (LLVMValueRef[]){mask, ctx->i32_0}, 2, AC_FUNC_ATTR_READNONE);
3466       ac_set_range_metadata(ctx, val, 0, ctx->wave_size);
3467       return val;
3468    }
3469    LLVMValueRef mask_vec = LLVMBuildBitCast(ctx->builder, mask, ctx->v2i32, "");
3470    LLVMValueRef mask_lo = LLVMBuildExtractElement(ctx->builder, mask_vec, ctx->i32_0, "");
3471    LLVMValueRef mask_hi = LLVMBuildExtractElement(ctx->builder, mask_vec, ctx->i32_1, "");
3472    LLVMValueRef val =
3473       ac_build_intrinsic(ctx, "llvm.amdgcn.mbcnt.lo", ctx->i32,
3474                          (LLVMValueRef[]){mask_lo, add_src}, 2, AC_FUNC_ATTR_READNONE);
3475    val = ac_build_intrinsic(ctx, "llvm.amdgcn.mbcnt.hi", ctx->i32, (LLVMValueRef[]){mask_hi, val},
3476                             2, AC_FUNC_ATTR_READNONE);
3477    ac_set_range_metadata(ctx, val, 0, ctx->wave_size);
3478    return val;
3479 }
3480 
ac_build_mbcnt(struct ac_llvm_context * ctx,LLVMValueRef mask)3481 LLVMValueRef ac_build_mbcnt(struct ac_llvm_context *ctx, LLVMValueRef mask)
3482 {
3483    return ac_build_mbcnt_add(ctx, mask, ctx->i32_0);
3484 }
3485 
3486 enum dpp_ctrl
3487 {
3488    _dpp_quad_perm = 0x000,
3489    _dpp_row_sl = 0x100,
3490    _dpp_row_sr = 0x110,
3491    _dpp_row_rr = 0x120,
3492    dpp_wf_sl1 = 0x130,
3493    dpp_wf_rl1 = 0x134,
3494    dpp_wf_sr1 = 0x138,
3495    dpp_wf_rr1 = 0x13C,
3496    dpp_row_mirror = 0x140,
3497    dpp_row_half_mirror = 0x141,
3498    dpp_row_bcast15 = 0x142,
3499    dpp_row_bcast31 = 0x143
3500 };
3501 
dpp_quad_perm(unsigned lane0,unsigned lane1,unsigned lane2,unsigned lane3)3502 static inline enum dpp_ctrl dpp_quad_perm(unsigned lane0, unsigned lane1, unsigned lane2,
3503                                           unsigned lane3)
3504 {
3505    assert(lane0 < 4 && lane1 < 4 && lane2 < 4 && lane3 < 4);
3506    return _dpp_quad_perm | lane0 | (lane1 << 2) | (lane2 << 4) | (lane3 << 6);
3507 }
3508 
dpp_row_sr(unsigned amount)3509 static inline enum dpp_ctrl dpp_row_sr(unsigned amount)
3510 {
3511    assert(amount > 0 && amount < 16);
3512    return _dpp_row_sr | amount;
3513 }
3514 
_ac_build_dpp(struct ac_llvm_context * ctx,LLVMValueRef old,LLVMValueRef src,enum dpp_ctrl dpp_ctrl,unsigned row_mask,unsigned bank_mask,bool bound_ctrl)3515 static LLVMValueRef _ac_build_dpp(struct ac_llvm_context *ctx, LLVMValueRef old, LLVMValueRef src,
3516                                   enum dpp_ctrl dpp_ctrl, unsigned row_mask, unsigned bank_mask,
3517                                   bool bound_ctrl)
3518 {
3519    LLVMTypeRef type = LLVMTypeOf(src);
3520    LLVMValueRef res;
3521 
3522    old = LLVMBuildZExt(ctx->builder, old, ctx->i32, "");
3523    src = LLVMBuildZExt(ctx->builder, src, ctx->i32, "");
3524 
3525    res = ac_build_intrinsic(
3526       ctx, "llvm.amdgcn.update.dpp.i32", ctx->i32,
3527       (LLVMValueRef[]){old, src, LLVMConstInt(ctx->i32, dpp_ctrl, 0),
3528                        LLVMConstInt(ctx->i32, row_mask, 0), LLVMConstInt(ctx->i32, bank_mask, 0),
3529                        LLVMConstInt(ctx->i1, bound_ctrl, 0)},
3530       6, AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
3531 
3532    return LLVMBuildTrunc(ctx->builder, res, type, "");
3533 }
3534 
ac_build_dpp(struct ac_llvm_context * ctx,LLVMValueRef old,LLVMValueRef src,enum dpp_ctrl dpp_ctrl,unsigned row_mask,unsigned bank_mask,bool bound_ctrl)3535 static LLVMValueRef ac_build_dpp(struct ac_llvm_context *ctx, LLVMValueRef old, LLVMValueRef src,
3536                                  enum dpp_ctrl dpp_ctrl, unsigned row_mask, unsigned bank_mask,
3537                                  bool bound_ctrl)
3538 {
3539    LLVMTypeRef src_type = LLVMTypeOf(src);
3540    src = ac_to_integer(ctx, src);
3541    old = ac_to_integer(ctx, old);
3542    unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(src));
3543    LLVMValueRef ret;
3544    if (bits > 32) {
3545       assert(bits % 32 == 0);
3546       LLVMTypeRef vec_type = LLVMVectorType(ctx->i32, bits / 32);
3547       LLVMValueRef src_vector = LLVMBuildBitCast(ctx->builder, src, vec_type, "");
3548       LLVMValueRef old_vector = LLVMBuildBitCast(ctx->builder, old, vec_type, "");
3549       ret = LLVMGetUndef(vec_type);
3550       for (unsigned i = 0; i < bits / 32; i++) {
3551          src = LLVMBuildExtractElement(ctx->builder, src_vector, LLVMConstInt(ctx->i32, i, 0), "");
3552          old = LLVMBuildExtractElement(ctx->builder, old_vector, LLVMConstInt(ctx->i32, i, 0), "");
3553          LLVMValueRef ret_comp =
3554             _ac_build_dpp(ctx, old, src, dpp_ctrl, row_mask, bank_mask, bound_ctrl);
3555          ret =
3556             LLVMBuildInsertElement(ctx->builder, ret, ret_comp, LLVMConstInt(ctx->i32, i, 0), "");
3557       }
3558    } else {
3559       ret = _ac_build_dpp(ctx, old, src, dpp_ctrl, row_mask, bank_mask, bound_ctrl);
3560    }
3561    return LLVMBuildBitCast(ctx->builder, ret, src_type, "");
3562 }
3563 
_ac_build_permlane16(struct ac_llvm_context * ctx,LLVMValueRef src,uint64_t sel,bool exchange_rows,bool bound_ctrl)3564 static LLVMValueRef _ac_build_permlane16(struct ac_llvm_context *ctx, LLVMValueRef src,
3565                                          uint64_t sel, bool exchange_rows, bool bound_ctrl)
3566 {
3567    LLVMTypeRef type = LLVMTypeOf(src);
3568    LLVMValueRef result;
3569 
3570    src = LLVMBuildZExt(ctx->builder, src, ctx->i32, "");
3571 
3572    LLVMValueRef args[6] = {
3573       src,
3574       src,
3575       LLVMConstInt(ctx->i32, sel, false),
3576       LLVMConstInt(ctx->i32, sel >> 32, false),
3577       ctx->i1true, /* fi */
3578       bound_ctrl ? ctx->i1true : ctx->i1false,
3579    };
3580 
3581    result =
3582       ac_build_intrinsic(ctx, exchange_rows ? "llvm.amdgcn.permlanex16" : "llvm.amdgcn.permlane16",
3583                          ctx->i32, args, 6, AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
3584 
3585    return LLVMBuildTrunc(ctx->builder, result, type, "");
3586 }
3587 
ac_build_permlane16(struct ac_llvm_context * ctx,LLVMValueRef src,uint64_t sel,bool exchange_rows,bool bound_ctrl)3588 static LLVMValueRef ac_build_permlane16(struct ac_llvm_context *ctx, LLVMValueRef src, uint64_t sel,
3589                                         bool exchange_rows, bool bound_ctrl)
3590 {
3591    LLVMTypeRef src_type = LLVMTypeOf(src);
3592    src = ac_to_integer(ctx, src);
3593    unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(src));
3594    LLVMValueRef ret;
3595    if (bits > 32) {
3596       assert(bits % 32 == 0);
3597       LLVMTypeRef vec_type = LLVMVectorType(ctx->i32, bits / 32);
3598       LLVMValueRef src_vector = LLVMBuildBitCast(ctx->builder, src, vec_type, "");
3599       ret = LLVMGetUndef(vec_type);
3600       for (unsigned i = 0; i < bits / 32; i++) {
3601          src = LLVMBuildExtractElement(ctx->builder, src_vector, LLVMConstInt(ctx->i32, i, 0), "");
3602          LLVMValueRef ret_comp = _ac_build_permlane16(ctx, src, sel, exchange_rows, bound_ctrl);
3603          ret =
3604             LLVMBuildInsertElement(ctx->builder, ret, ret_comp, LLVMConstInt(ctx->i32, i, 0), "");
3605       }
3606    } else {
3607       ret = _ac_build_permlane16(ctx, src, sel, exchange_rows, bound_ctrl);
3608    }
3609    return LLVMBuildBitCast(ctx->builder, ret, src_type, "");
3610 }
3611 
ds_pattern_bitmode(unsigned and_mask,unsigned or_mask,unsigned xor_mask)3612 static inline unsigned ds_pattern_bitmode(unsigned and_mask, unsigned or_mask, unsigned xor_mask)
3613 {
3614    assert(and_mask < 32 && or_mask < 32 && xor_mask < 32);
3615    return and_mask | (or_mask << 5) | (xor_mask << 10);
3616 }
3617 
_ac_build_ds_swizzle(struct ac_llvm_context * ctx,LLVMValueRef src,unsigned mask)3618 static LLVMValueRef _ac_build_ds_swizzle(struct ac_llvm_context *ctx, LLVMValueRef src,
3619                                          unsigned mask)
3620 {
3621    LLVMTypeRef src_type = LLVMTypeOf(src);
3622    LLVMValueRef ret;
3623 
3624    src = LLVMBuildZExt(ctx->builder, src, ctx->i32, "");
3625 
3626    ret = ac_build_intrinsic(ctx, "llvm.amdgcn.ds.swizzle", ctx->i32,
3627                             (LLVMValueRef[]){src, LLVMConstInt(ctx->i32, mask, 0)}, 2,
3628                             AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
3629 
3630    return LLVMBuildTrunc(ctx->builder, ret, src_type, "");
3631 }
3632 
ac_build_ds_swizzle(struct ac_llvm_context * ctx,LLVMValueRef src,unsigned mask)3633 LLVMValueRef ac_build_ds_swizzle(struct ac_llvm_context *ctx, LLVMValueRef src, unsigned mask)
3634 {
3635    LLVMTypeRef src_type = LLVMTypeOf(src);
3636    src = ac_to_integer(ctx, src);
3637    unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(src));
3638    LLVMValueRef ret;
3639    if (bits > 32) {
3640       assert(bits % 32 == 0);
3641       LLVMTypeRef vec_type = LLVMVectorType(ctx->i32, bits / 32);
3642       LLVMValueRef src_vector = LLVMBuildBitCast(ctx->builder, src, vec_type, "");
3643       ret = LLVMGetUndef(vec_type);
3644       for (unsigned i = 0; i < bits / 32; i++) {
3645          src = LLVMBuildExtractElement(ctx->builder, src_vector, LLVMConstInt(ctx->i32, i, 0), "");
3646          LLVMValueRef ret_comp = _ac_build_ds_swizzle(ctx, src, mask);
3647          ret =
3648             LLVMBuildInsertElement(ctx->builder, ret, ret_comp, LLVMConstInt(ctx->i32, i, 0), "");
3649       }
3650    } else {
3651       ret = _ac_build_ds_swizzle(ctx, src, mask);
3652    }
3653    return LLVMBuildBitCast(ctx->builder, ret, src_type, "");
3654 }
3655 
ac_build_wwm(struct ac_llvm_context * ctx,LLVMValueRef src)3656 static LLVMValueRef ac_build_wwm(struct ac_llvm_context *ctx, LLVMValueRef src)
3657 {
3658    LLVMTypeRef src_type = LLVMTypeOf(src);
3659    unsigned bitsize = ac_get_elem_bits(ctx, src_type);
3660    char name[32], type[8];
3661    LLVMValueRef ret;
3662 
3663    src = ac_to_integer(ctx, src);
3664 
3665    if (bitsize < 32)
3666       src = LLVMBuildZExt(ctx->builder, src, ctx->i32, "");
3667 
3668    ac_build_type_name_for_intr(LLVMTypeOf(src), type, sizeof(type));
3669    snprintf(name, sizeof(name), "llvm.amdgcn.wwm.%s", type);
3670    ret = ac_build_intrinsic(ctx, name, LLVMTypeOf(src), (LLVMValueRef[]){src}, 1,
3671                             AC_FUNC_ATTR_READNONE);
3672 
3673    if (bitsize < 32)
3674       ret = LLVMBuildTrunc(ctx->builder, ret, ac_to_integer_type(ctx, src_type), "");
3675 
3676    return LLVMBuildBitCast(ctx->builder, ret, src_type, "");
3677 }
3678 
ac_build_set_inactive(struct ac_llvm_context * ctx,LLVMValueRef src,LLVMValueRef inactive)3679 static LLVMValueRef ac_build_set_inactive(struct ac_llvm_context *ctx, LLVMValueRef src,
3680                                           LLVMValueRef inactive)
3681 {
3682    char name[33], type[8];
3683    LLVMTypeRef src_type = LLVMTypeOf(src);
3684    unsigned bitsize = ac_get_elem_bits(ctx, src_type);
3685    src = ac_to_integer(ctx, src);
3686    inactive = ac_to_integer(ctx, inactive);
3687 
3688    if (bitsize < 32) {
3689       src = LLVMBuildZExt(ctx->builder, src, ctx->i32, "");
3690       inactive = LLVMBuildZExt(ctx->builder, inactive, ctx->i32, "");
3691    }
3692 
3693    ac_build_type_name_for_intr(LLVMTypeOf(src), type, sizeof(type));
3694    snprintf(name, sizeof(name), "llvm.amdgcn.set.inactive.%s", type);
3695    LLVMValueRef ret =
3696       ac_build_intrinsic(ctx, name, LLVMTypeOf(src), (LLVMValueRef[]){src, inactive}, 2,
3697                          AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
3698    if (bitsize < 32)
3699       ret = LLVMBuildTrunc(ctx->builder, ret, src_type, "");
3700 
3701    return ret;
3702 }
3703 
get_reduction_identity(struct ac_llvm_context * ctx,nir_op op,unsigned type_size)3704 static LLVMValueRef get_reduction_identity(struct ac_llvm_context *ctx, nir_op op,
3705                                            unsigned type_size)
3706 {
3707 
3708    if (type_size == 0) {
3709       switch (op) {
3710       case nir_op_ior:
3711       case nir_op_ixor:
3712          return LLVMConstInt(ctx->i1, 0, 0);
3713       case nir_op_iand:
3714          return LLVMConstInt(ctx->i1, 1, 0);
3715       default:
3716          unreachable("bad reduction intrinsic");
3717       }
3718    } else if (type_size == 1) {
3719       switch (op) {
3720       case nir_op_iadd:
3721          return ctx->i8_0;
3722       case nir_op_imul:
3723          return ctx->i8_1;
3724       case nir_op_imin:
3725          return LLVMConstInt(ctx->i8, INT8_MAX, 0);
3726       case nir_op_umin:
3727          return LLVMConstInt(ctx->i8, UINT8_MAX, 0);
3728       case nir_op_imax:
3729          return LLVMConstInt(ctx->i8, INT8_MIN, 0);
3730       case nir_op_umax:
3731          return ctx->i8_0;
3732       case nir_op_iand:
3733          return LLVMConstInt(ctx->i8, -1, 0);
3734       case nir_op_ior:
3735          return ctx->i8_0;
3736       case nir_op_ixor:
3737          return ctx->i8_0;
3738       default:
3739          unreachable("bad reduction intrinsic");
3740       }
3741    } else if (type_size == 2) {
3742       switch (op) {
3743       case nir_op_iadd:
3744          return ctx->i16_0;
3745       case nir_op_fadd:
3746          return ctx->f16_0;
3747       case nir_op_imul:
3748          return ctx->i16_1;
3749       case nir_op_fmul:
3750          return ctx->f16_1;
3751       case nir_op_imin:
3752          return LLVMConstInt(ctx->i16, INT16_MAX, 0);
3753       case nir_op_umin:
3754          return LLVMConstInt(ctx->i16, UINT16_MAX, 0);
3755       case nir_op_fmin:
3756          return LLVMConstReal(ctx->f16, INFINITY);
3757       case nir_op_imax:
3758          return LLVMConstInt(ctx->i16, INT16_MIN, 0);
3759       case nir_op_umax:
3760          return ctx->i16_0;
3761       case nir_op_fmax:
3762          return LLVMConstReal(ctx->f16, -INFINITY);
3763       case nir_op_iand:
3764          return LLVMConstInt(ctx->i16, -1, 0);
3765       case nir_op_ior:
3766          return ctx->i16_0;
3767       case nir_op_ixor:
3768          return ctx->i16_0;
3769       default:
3770          unreachable("bad reduction intrinsic");
3771       }
3772    } else if (type_size == 4) {
3773       switch (op) {
3774       case nir_op_iadd:
3775          return ctx->i32_0;
3776       case nir_op_fadd:
3777          return ctx->f32_0;
3778       case nir_op_imul:
3779          return ctx->i32_1;
3780       case nir_op_fmul:
3781          return ctx->f32_1;
3782       case nir_op_imin:
3783          return LLVMConstInt(ctx->i32, INT32_MAX, 0);
3784       case nir_op_umin:
3785          return LLVMConstInt(ctx->i32, UINT32_MAX, 0);
3786       case nir_op_fmin:
3787          return LLVMConstReal(ctx->f32, INFINITY);
3788       case nir_op_imax:
3789          return LLVMConstInt(ctx->i32, INT32_MIN, 0);
3790       case nir_op_umax:
3791          return ctx->i32_0;
3792       case nir_op_fmax:
3793          return LLVMConstReal(ctx->f32, -INFINITY);
3794       case nir_op_iand:
3795          return LLVMConstInt(ctx->i32, -1, 0);
3796       case nir_op_ior:
3797          return ctx->i32_0;
3798       case nir_op_ixor:
3799          return ctx->i32_0;
3800       default:
3801          unreachable("bad reduction intrinsic");
3802       }
3803    } else { /* type_size == 64bit */
3804       switch (op) {
3805       case nir_op_iadd:
3806          return ctx->i64_0;
3807       case nir_op_fadd:
3808          return ctx->f64_0;
3809       case nir_op_imul:
3810          return ctx->i64_1;
3811       case nir_op_fmul:
3812          return ctx->f64_1;
3813       case nir_op_imin:
3814          return LLVMConstInt(ctx->i64, INT64_MAX, 0);
3815       case nir_op_umin:
3816          return LLVMConstInt(ctx->i64, UINT64_MAX, 0);
3817       case nir_op_fmin:
3818          return LLVMConstReal(ctx->f64, INFINITY);
3819       case nir_op_imax:
3820          return LLVMConstInt(ctx->i64, INT64_MIN, 0);
3821       case nir_op_umax:
3822          return ctx->i64_0;
3823       case nir_op_fmax:
3824          return LLVMConstReal(ctx->f64, -INFINITY);
3825       case nir_op_iand:
3826          return LLVMConstInt(ctx->i64, -1, 0);
3827       case nir_op_ior:
3828          return ctx->i64_0;
3829       case nir_op_ixor:
3830          return ctx->i64_0;
3831       default:
3832          unreachable("bad reduction intrinsic");
3833       }
3834    }
3835 }
3836 
ac_build_alu_op(struct ac_llvm_context * ctx,LLVMValueRef lhs,LLVMValueRef rhs,nir_op op)3837 static LLVMValueRef ac_build_alu_op(struct ac_llvm_context *ctx, LLVMValueRef lhs, LLVMValueRef rhs,
3838                                     nir_op op)
3839 {
3840    bool _64bit = ac_get_type_size(LLVMTypeOf(lhs)) == 8;
3841    bool _32bit = ac_get_type_size(LLVMTypeOf(lhs)) == 4;
3842    switch (op) {
3843    case nir_op_iadd:
3844       return LLVMBuildAdd(ctx->builder, lhs, rhs, "");
3845    case nir_op_fadd:
3846       return LLVMBuildFAdd(ctx->builder, lhs, rhs, "");
3847    case nir_op_imul:
3848       return LLVMBuildMul(ctx->builder, lhs, rhs, "");
3849    case nir_op_fmul:
3850       return LLVMBuildFMul(ctx->builder, lhs, rhs, "");
3851    case nir_op_imin:
3852       return LLVMBuildSelect(ctx->builder, LLVMBuildICmp(ctx->builder, LLVMIntSLT, lhs, rhs, ""),
3853                              lhs, rhs, "");
3854    case nir_op_umin:
3855       return LLVMBuildSelect(ctx->builder, LLVMBuildICmp(ctx->builder, LLVMIntULT, lhs, rhs, ""),
3856                              lhs, rhs, "");
3857    case nir_op_fmin:
3858       return ac_build_intrinsic(
3859          ctx, _64bit ? "llvm.minnum.f64" : _32bit ? "llvm.minnum.f32" : "llvm.minnum.f16",
3860          _64bit ? ctx->f64 : _32bit ? ctx->f32 : ctx->f16, (LLVMValueRef[]){lhs, rhs}, 2,
3861          AC_FUNC_ATTR_READNONE);
3862    case nir_op_imax:
3863       return LLVMBuildSelect(ctx->builder, LLVMBuildICmp(ctx->builder, LLVMIntSGT, lhs, rhs, ""),
3864                              lhs, rhs, "");
3865    case nir_op_umax:
3866       return LLVMBuildSelect(ctx->builder, LLVMBuildICmp(ctx->builder, LLVMIntUGT, lhs, rhs, ""),
3867                              lhs, rhs, "");
3868    case nir_op_fmax:
3869       return ac_build_intrinsic(
3870          ctx, _64bit ? "llvm.maxnum.f64" : _32bit ? "llvm.maxnum.f32" : "llvm.maxnum.f16",
3871          _64bit ? ctx->f64 : _32bit ? ctx->f32 : ctx->f16, (LLVMValueRef[]){lhs, rhs}, 2,
3872          AC_FUNC_ATTR_READNONE);
3873    case nir_op_iand:
3874       return LLVMBuildAnd(ctx->builder, lhs, rhs, "");
3875    case nir_op_ior:
3876       return LLVMBuildOr(ctx->builder, lhs, rhs, "");
3877    case nir_op_ixor:
3878       return LLVMBuildXor(ctx->builder, lhs, rhs, "");
3879    default:
3880       unreachable("bad reduction intrinsic");
3881    }
3882 }
3883 
3884 /**
3885  * \param src The value to shift.
3886  * \param identity The value to use the first lane.
3887  * \param maxprefix specifies that the result only needs to be correct for a
3888  *     prefix of this many threads
3889  * \return src, shifted 1 lane up, and identity shifted into lane 0.
3890  */
ac_wavefront_shift_right_1(struct ac_llvm_context * ctx,LLVMValueRef src,LLVMValueRef identity,unsigned maxprefix)3891 static LLVMValueRef ac_wavefront_shift_right_1(struct ac_llvm_context *ctx, LLVMValueRef src,
3892                                                LLVMValueRef identity, unsigned maxprefix)
3893 {
3894    if (ctx->chip_class >= GFX10) {
3895       /* wavefront shift_right by 1 on GFX10 (emulate dpp_wf_sr1) */
3896       LLVMValueRef active, tmp1, tmp2;
3897       LLVMValueRef tid = ac_get_thread_id(ctx);
3898 
3899       tmp1 = ac_build_dpp(ctx, identity, src, dpp_row_sr(1), 0xf, 0xf, false);
3900 
3901       tmp2 = ac_build_permlane16(ctx, src, (uint64_t)~0, true, false);
3902 
3903       if (maxprefix > 32) {
3904          active =
3905             LLVMBuildICmp(ctx->builder, LLVMIntEQ, tid, LLVMConstInt(ctx->i32, 32, false), "");
3906 
3907          tmp2 = LLVMBuildSelect(ctx->builder, active,
3908                                 ac_build_readlane(ctx, src, LLVMConstInt(ctx->i32, 31, false)),
3909                                 tmp2, "");
3910 
3911          active = LLVMBuildOr(
3912             ctx->builder, active,
3913             LLVMBuildICmp(ctx->builder, LLVMIntEQ,
3914                           LLVMBuildAnd(ctx->builder, tid, LLVMConstInt(ctx->i32, 0x1f, false), ""),
3915                           LLVMConstInt(ctx->i32, 0x10, false), ""),
3916             "");
3917          return LLVMBuildSelect(ctx->builder, active, tmp2, tmp1, "");
3918       } else if (maxprefix > 16) {
3919          active =
3920             LLVMBuildICmp(ctx->builder, LLVMIntEQ, tid, LLVMConstInt(ctx->i32, 16, false), "");
3921 
3922          return LLVMBuildSelect(ctx->builder, active, tmp2, tmp1, "");
3923       }
3924    } else if (ctx->chip_class >= GFX8) {
3925       return ac_build_dpp(ctx, identity, src, dpp_wf_sr1, 0xf, 0xf, false);
3926    }
3927 
3928    /* wavefront shift_right by 1 on SI/CI */
3929    LLVMValueRef active, tmp1, tmp2;
3930    LLVMValueRef tid = ac_get_thread_id(ctx);
3931    tmp1 = ac_build_ds_swizzle(ctx, src, (1 << 15) | dpp_quad_perm(0, 0, 1, 2));
3932    tmp2 = ac_build_ds_swizzle(ctx, src, ds_pattern_bitmode(0x18, 0x03, 0x00));
3933    active = LLVMBuildICmp(ctx->builder, LLVMIntEQ,
3934                           LLVMBuildAnd(ctx->builder, tid, LLVMConstInt(ctx->i32, 0x7, 0), ""),
3935                           LLVMConstInt(ctx->i32, 0x4, 0), "");
3936    tmp1 = LLVMBuildSelect(ctx->builder, active, tmp2, tmp1, "");
3937    tmp2 = ac_build_ds_swizzle(ctx, src, ds_pattern_bitmode(0x10, 0x07, 0x00));
3938    active = LLVMBuildICmp(ctx->builder, LLVMIntEQ,
3939                           LLVMBuildAnd(ctx->builder, tid, LLVMConstInt(ctx->i32, 0xf, 0), ""),
3940                           LLVMConstInt(ctx->i32, 0x8, 0), "");
3941    tmp1 = LLVMBuildSelect(ctx->builder, active, tmp2, tmp1, "");
3942    tmp2 = ac_build_ds_swizzle(ctx, src, ds_pattern_bitmode(0x00, 0x0f, 0x00));
3943    active = LLVMBuildICmp(ctx->builder, LLVMIntEQ,
3944                           LLVMBuildAnd(ctx->builder, tid, LLVMConstInt(ctx->i32, 0x1f, 0), ""),
3945                           LLVMConstInt(ctx->i32, 0x10, 0), "");
3946    tmp1 = LLVMBuildSelect(ctx->builder, active, tmp2, tmp1, "");
3947    tmp2 = ac_build_readlane(ctx, src, LLVMConstInt(ctx->i32, 31, 0));
3948    active = LLVMBuildICmp(ctx->builder, LLVMIntEQ, tid, LLVMConstInt(ctx->i32, 32, 0), "");
3949    tmp1 = LLVMBuildSelect(ctx->builder, active, tmp2, tmp1, "");
3950    active = LLVMBuildICmp(ctx->builder, LLVMIntEQ, tid, LLVMConstInt(ctx->i32, 0, 0), "");
3951    return LLVMBuildSelect(ctx->builder, active, identity, tmp1, "");
3952 }
3953 
3954 /**
3955  * \param maxprefix specifies that the result only needs to be correct for a
3956  *     prefix of this many threads
3957  */
ac_build_scan(struct ac_llvm_context * ctx,nir_op op,LLVMValueRef src,LLVMValueRef identity,unsigned maxprefix,bool inclusive)3958 static LLVMValueRef ac_build_scan(struct ac_llvm_context *ctx, nir_op op, LLVMValueRef src,
3959                                   LLVMValueRef identity, unsigned maxprefix, bool inclusive)
3960 {
3961    LLVMValueRef result, tmp;
3962 
3963    if (!inclusive)
3964       src = ac_wavefront_shift_right_1(ctx, src, identity, maxprefix);
3965 
3966    result = src;
3967 
3968    if (ctx->chip_class <= GFX7) {
3969       assert(maxprefix == 64);
3970       LLVMValueRef tid = ac_get_thread_id(ctx);
3971       LLVMValueRef active;
3972       tmp = ac_build_ds_swizzle(ctx, src, ds_pattern_bitmode(0x1e, 0x00, 0x00));
3973       active = LLVMBuildICmp(ctx->builder, LLVMIntNE,
3974                              LLVMBuildAnd(ctx->builder, tid, ctx->i32_1, ""), ctx->i32_0, "");
3975       tmp = LLVMBuildSelect(ctx->builder, active, tmp, identity, "");
3976       result = ac_build_alu_op(ctx, result, tmp, op);
3977       tmp = ac_build_ds_swizzle(ctx, result, ds_pattern_bitmode(0x1c, 0x01, 0x00));
3978       active = LLVMBuildICmp(ctx->builder, LLVMIntNE,
3979                              LLVMBuildAnd(ctx->builder, tid, LLVMConstInt(ctx->i32, 2, 0), ""),
3980                              ctx->i32_0, "");
3981       tmp = LLVMBuildSelect(ctx->builder, active, tmp, identity, "");
3982       result = ac_build_alu_op(ctx, result, tmp, op);
3983       tmp = ac_build_ds_swizzle(ctx, result, ds_pattern_bitmode(0x18, 0x03, 0x00));
3984       active = LLVMBuildICmp(ctx->builder, LLVMIntNE,
3985                              LLVMBuildAnd(ctx->builder, tid, LLVMConstInt(ctx->i32, 4, 0), ""),
3986                              ctx->i32_0, "");
3987       tmp = LLVMBuildSelect(ctx->builder, active, tmp, identity, "");
3988       result = ac_build_alu_op(ctx, result, tmp, op);
3989       tmp = ac_build_ds_swizzle(ctx, result, ds_pattern_bitmode(0x10, 0x07, 0x00));
3990       active = LLVMBuildICmp(ctx->builder, LLVMIntNE,
3991                              LLVMBuildAnd(ctx->builder, tid, LLVMConstInt(ctx->i32, 8, 0), ""),
3992                              ctx->i32_0, "");
3993       tmp = LLVMBuildSelect(ctx->builder, active, tmp, identity, "");
3994       result = ac_build_alu_op(ctx, result, tmp, op);
3995       tmp = ac_build_ds_swizzle(ctx, result, ds_pattern_bitmode(0x00, 0x0f, 0x00));
3996       active = LLVMBuildICmp(ctx->builder, LLVMIntNE,
3997                              LLVMBuildAnd(ctx->builder, tid, LLVMConstInt(ctx->i32, 16, 0), ""),
3998                              ctx->i32_0, "");
3999       tmp = LLVMBuildSelect(ctx->builder, active, tmp, identity, "");
4000       result = ac_build_alu_op(ctx, result, tmp, op);
4001       tmp = ac_build_readlane(ctx, result, LLVMConstInt(ctx->i32, 31, 0));
4002       active = LLVMBuildICmp(ctx->builder, LLVMIntNE,
4003                              LLVMBuildAnd(ctx->builder, tid, LLVMConstInt(ctx->i32, 32, 0), ""),
4004                              ctx->i32_0, "");
4005       tmp = LLVMBuildSelect(ctx->builder, active, tmp, identity, "");
4006       result = ac_build_alu_op(ctx, result, tmp, op);
4007       return result;
4008    }
4009 
4010    if (maxprefix <= 1)
4011       return result;
4012    tmp = ac_build_dpp(ctx, identity, src, dpp_row_sr(1), 0xf, 0xf, false);
4013    result = ac_build_alu_op(ctx, result, tmp, op);
4014    if (maxprefix <= 2)
4015       return result;
4016    tmp = ac_build_dpp(ctx, identity, src, dpp_row_sr(2), 0xf, 0xf, false);
4017    result = ac_build_alu_op(ctx, result, tmp, op);
4018    if (maxprefix <= 3)
4019       return result;
4020    tmp = ac_build_dpp(ctx, identity, src, dpp_row_sr(3), 0xf, 0xf, false);
4021    result = ac_build_alu_op(ctx, result, tmp, op);
4022    if (maxprefix <= 4)
4023       return result;
4024    tmp = ac_build_dpp(ctx, identity, result, dpp_row_sr(4), 0xf, 0xe, false);
4025    result = ac_build_alu_op(ctx, result, tmp, op);
4026    if (maxprefix <= 8)
4027       return result;
4028    tmp = ac_build_dpp(ctx, identity, result, dpp_row_sr(8), 0xf, 0xc, false);
4029    result = ac_build_alu_op(ctx, result, tmp, op);
4030    if (maxprefix <= 16)
4031       return result;
4032 
4033    if (ctx->chip_class >= GFX10) {
4034       LLVMValueRef tid = ac_get_thread_id(ctx);
4035       LLVMValueRef active;
4036 
4037       tmp = ac_build_permlane16(ctx, result, ~(uint64_t)0, true, false);
4038 
4039       active = LLVMBuildICmp(ctx->builder, LLVMIntNE,
4040                              LLVMBuildAnd(ctx->builder, tid, LLVMConstInt(ctx->i32, 16, false), ""),
4041                              ctx->i32_0, "");
4042 
4043       tmp = LLVMBuildSelect(ctx->builder, active, tmp, identity, "");
4044 
4045       result = ac_build_alu_op(ctx, result, tmp, op);
4046 
4047       if (maxprefix <= 32)
4048          return result;
4049 
4050       tmp = ac_build_readlane(ctx, result, LLVMConstInt(ctx->i32, 31, false));
4051 
4052       active = LLVMBuildICmp(ctx->builder, LLVMIntUGE, tid, LLVMConstInt(ctx->i32, 32, false), "");
4053 
4054       tmp = LLVMBuildSelect(ctx->builder, active, tmp, identity, "");
4055 
4056       result = ac_build_alu_op(ctx, result, tmp, op);
4057       return result;
4058    }
4059 
4060    tmp = ac_build_dpp(ctx, identity, result, dpp_row_bcast15, 0xa, 0xf, false);
4061    result = ac_build_alu_op(ctx, result, tmp, op);
4062    if (maxprefix <= 32)
4063       return result;
4064    tmp = ac_build_dpp(ctx, identity, result, dpp_row_bcast31, 0xc, 0xf, false);
4065    result = ac_build_alu_op(ctx, result, tmp, op);
4066    return result;
4067 }
4068 
ac_build_inclusive_scan(struct ac_llvm_context * ctx,LLVMValueRef src,nir_op op)4069 LLVMValueRef ac_build_inclusive_scan(struct ac_llvm_context *ctx, LLVMValueRef src, nir_op op)
4070 {
4071    LLVMValueRef result;
4072 
4073    if (LLVMTypeOf(src) == ctx->i1 && op == nir_op_iadd) {
4074       LLVMBuilderRef builder = ctx->builder;
4075       src = LLVMBuildZExt(builder, src, ctx->i32, "");
4076       result = ac_build_ballot(ctx, src);
4077       result = ac_build_mbcnt(ctx, result);
4078       result = LLVMBuildAdd(builder, result, src, "");
4079       return result;
4080    }
4081 
4082    ac_build_optimization_barrier(ctx, &src, false);
4083 
4084    LLVMValueRef identity = get_reduction_identity(ctx, op, ac_get_type_size(LLVMTypeOf(src)));
4085    result = LLVMBuildBitCast(ctx->builder, ac_build_set_inactive(ctx, src, identity),
4086                              LLVMTypeOf(identity), "");
4087    result = ac_build_scan(ctx, op, result, identity, ctx->wave_size, true);
4088 
4089    return ac_build_wwm(ctx, result);
4090 }
4091 
ac_build_exclusive_scan(struct ac_llvm_context * ctx,LLVMValueRef src,nir_op op)4092 LLVMValueRef ac_build_exclusive_scan(struct ac_llvm_context *ctx, LLVMValueRef src, nir_op op)
4093 {
4094    LLVMValueRef result;
4095 
4096    if (LLVMTypeOf(src) == ctx->i1 && op == nir_op_iadd) {
4097       LLVMBuilderRef builder = ctx->builder;
4098       src = LLVMBuildZExt(builder, src, ctx->i32, "");
4099       result = ac_build_ballot(ctx, src);
4100       result = ac_build_mbcnt(ctx, result);
4101       return result;
4102    }
4103 
4104    ac_build_optimization_barrier(ctx, &src, false);
4105 
4106    LLVMValueRef identity = get_reduction_identity(ctx, op, ac_get_type_size(LLVMTypeOf(src)));
4107    result = LLVMBuildBitCast(ctx->builder, ac_build_set_inactive(ctx, src, identity),
4108                              LLVMTypeOf(identity), "");
4109    result = ac_build_scan(ctx, op, result, identity, ctx->wave_size, false);
4110 
4111    return ac_build_wwm(ctx, result);
4112 }
4113 
ac_build_reduce(struct ac_llvm_context * ctx,LLVMValueRef src,nir_op op,unsigned cluster_size)4114 LLVMValueRef ac_build_reduce(struct ac_llvm_context *ctx, LLVMValueRef src, nir_op op,
4115                              unsigned cluster_size)
4116 {
4117    if (cluster_size == 1)
4118       return src;
4119    ac_build_optimization_barrier(ctx, &src, false);
4120    LLVMValueRef result, swap;
4121    LLVMValueRef identity = get_reduction_identity(ctx, op, ac_get_type_size(LLVMTypeOf(src)));
4122    result = LLVMBuildBitCast(ctx->builder, ac_build_set_inactive(ctx, src, identity),
4123                              LLVMTypeOf(identity), "");
4124    swap = ac_build_quad_swizzle(ctx, result, 1, 0, 3, 2);
4125    result = ac_build_alu_op(ctx, result, swap, op);
4126    if (cluster_size == 2)
4127       return ac_build_wwm(ctx, result);
4128 
4129    swap = ac_build_quad_swizzle(ctx, result, 2, 3, 0, 1);
4130    result = ac_build_alu_op(ctx, result, swap, op);
4131    if (cluster_size == 4)
4132       return ac_build_wwm(ctx, result);
4133 
4134    if (ctx->chip_class >= GFX8)
4135       swap = ac_build_dpp(ctx, identity, result, dpp_row_half_mirror, 0xf, 0xf, false);
4136    else
4137       swap = ac_build_ds_swizzle(ctx, result, ds_pattern_bitmode(0x1f, 0, 0x04));
4138    result = ac_build_alu_op(ctx, result, swap, op);
4139    if (cluster_size == 8)
4140       return ac_build_wwm(ctx, result);
4141 
4142    if (ctx->chip_class >= GFX8)
4143       swap = ac_build_dpp(ctx, identity, result, dpp_row_mirror, 0xf, 0xf, false);
4144    else
4145       swap = ac_build_ds_swizzle(ctx, result, ds_pattern_bitmode(0x1f, 0, 0x08));
4146    result = ac_build_alu_op(ctx, result, swap, op);
4147    if (cluster_size == 16)
4148       return ac_build_wwm(ctx, result);
4149 
4150    if (ctx->chip_class >= GFX10)
4151       swap = ac_build_permlane16(ctx, result, 0, true, false);
4152    else if (ctx->chip_class >= GFX8 && cluster_size != 32)
4153       swap = ac_build_dpp(ctx, identity, result, dpp_row_bcast15, 0xa, 0xf, false);
4154    else
4155       swap = ac_build_ds_swizzle(ctx, result, ds_pattern_bitmode(0x1f, 0, 0x10));
4156    result = ac_build_alu_op(ctx, result, swap, op);
4157    if (cluster_size == 32)
4158       return ac_build_wwm(ctx, result);
4159 
4160    if (ctx->chip_class >= GFX8) {
4161       if (ctx->wave_size == 64) {
4162          if (ctx->chip_class >= GFX10)
4163             swap = ac_build_readlane(ctx, result, LLVMConstInt(ctx->i32, 31, false));
4164          else
4165             swap = ac_build_dpp(ctx, identity, result, dpp_row_bcast31, 0xc, 0xf, false);
4166          result = ac_build_alu_op(ctx, result, swap, op);
4167          result = ac_build_readlane(ctx, result, LLVMConstInt(ctx->i32, 63, 0));
4168       }
4169 
4170       return ac_build_wwm(ctx, result);
4171    } else {
4172       swap = ac_build_readlane(ctx, result, ctx->i32_0);
4173       result = ac_build_readlane(ctx, result, LLVMConstInt(ctx->i32, 32, 0));
4174       result = ac_build_alu_op(ctx, result, swap, op);
4175       return ac_build_wwm(ctx, result);
4176    }
4177 }
4178 
4179 /**
4180  * "Top half" of a scan that reduces per-wave values across an entire
4181  * workgroup.
4182  *
4183  * The source value must be present in the highest lane of the wave, and the
4184  * highest lane must be live.
4185  */
ac_build_wg_wavescan_top(struct ac_llvm_context * ctx,struct ac_wg_scan * ws)4186 void ac_build_wg_wavescan_top(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
4187 {
4188    if (ws->maxwaves <= 1)
4189       return;
4190 
4191    const LLVMValueRef last_lane = LLVMConstInt(ctx->i32, ctx->wave_size - 1, false);
4192    LLVMBuilderRef builder = ctx->builder;
4193    LLVMValueRef tid = ac_get_thread_id(ctx);
4194    LLVMValueRef tmp;
4195 
4196    tmp = LLVMBuildICmp(builder, LLVMIntEQ, tid, last_lane, "");
4197    ac_build_ifcc(ctx, tmp, 1000);
4198    LLVMBuildStore(builder, ws->src, LLVMBuildGEP(builder, ws->scratch, &ws->waveidx, 1, ""));
4199    ac_build_endif(ctx, 1000);
4200 }
4201 
4202 /**
4203  * "Bottom half" of a scan that reduces per-wave values across an entire
4204  * workgroup.
4205  *
4206  * The caller must place a barrier between the top and bottom halves.
4207  */
ac_build_wg_wavescan_bottom(struct ac_llvm_context * ctx,struct ac_wg_scan * ws)4208 void ac_build_wg_wavescan_bottom(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
4209 {
4210    const LLVMTypeRef type = LLVMTypeOf(ws->src);
4211    const LLVMValueRef identity = get_reduction_identity(ctx, ws->op, ac_get_type_size(type));
4212 
4213    if (ws->maxwaves <= 1) {
4214       ws->result_reduce = ws->src;
4215       ws->result_inclusive = ws->src;
4216       ws->result_exclusive = identity;
4217       return;
4218    }
4219    assert(ws->maxwaves <= 32);
4220 
4221    LLVMBuilderRef builder = ctx->builder;
4222    LLVMValueRef tid = ac_get_thread_id(ctx);
4223    LLVMBasicBlockRef bbs[2];
4224    LLVMValueRef phivalues_scan[2];
4225    LLVMValueRef tmp, tmp2;
4226 
4227    bbs[0] = LLVMGetInsertBlock(builder);
4228    phivalues_scan[0] = LLVMGetUndef(type);
4229 
4230    if (ws->enable_reduce)
4231       tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, ws->numwaves, "");
4232    else if (ws->enable_inclusive)
4233       tmp = LLVMBuildICmp(builder, LLVMIntULE, tid, ws->waveidx, "");
4234    else
4235       tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, ws->waveidx, "");
4236    ac_build_ifcc(ctx, tmp, 1001);
4237    {
4238       tmp = LLVMBuildLoad(builder, LLVMBuildGEP(builder, ws->scratch, &tid, 1, ""), "");
4239 
4240       ac_build_optimization_barrier(ctx, &tmp, false);
4241 
4242       bbs[1] = LLVMGetInsertBlock(builder);
4243       phivalues_scan[1] = ac_build_scan(ctx, ws->op, tmp, identity, ws->maxwaves, true);
4244    }
4245    ac_build_endif(ctx, 1001);
4246 
4247    const LLVMValueRef scan = ac_build_phi(ctx, type, 2, phivalues_scan, bbs);
4248 
4249    if (ws->enable_reduce) {
4250       tmp = LLVMBuildSub(builder, ws->numwaves, ctx->i32_1, "");
4251       ws->result_reduce = ac_build_readlane(ctx, scan, tmp);
4252    }
4253    if (ws->enable_inclusive)
4254       ws->result_inclusive = ac_build_readlane(ctx, scan, ws->waveidx);
4255    if (ws->enable_exclusive) {
4256       tmp = LLVMBuildSub(builder, ws->waveidx, ctx->i32_1, "");
4257       tmp = ac_build_readlane(ctx, scan, tmp);
4258       tmp2 = LLVMBuildICmp(builder, LLVMIntEQ, ws->waveidx, ctx->i32_0, "");
4259       ws->result_exclusive = LLVMBuildSelect(builder, tmp2, identity, tmp, "");
4260    }
4261 }
4262 
4263 /**
4264  * Inclusive scan of a per-wave value across an entire workgroup.
4265  *
4266  * This implies an s_barrier instruction.
4267  *
4268  * Unlike ac_build_inclusive_scan, the caller \em must ensure that all threads
4269  * of the workgroup are live. (This requirement cannot easily be relaxed in a
4270  * useful manner because of the barrier in the algorithm.)
4271  */
ac_build_wg_wavescan(struct ac_llvm_context * ctx,struct ac_wg_scan * ws)4272 void ac_build_wg_wavescan(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
4273 {
4274    ac_build_wg_wavescan_top(ctx, ws);
4275    ac_build_s_barrier(ctx);
4276    ac_build_wg_wavescan_bottom(ctx, ws);
4277 }
4278 
4279 /**
4280  * "Top half" of a scan that reduces per-thread values across an entire
4281  * workgroup.
4282  *
4283  * All lanes must be active when this code runs.
4284  */
ac_build_wg_scan_top(struct ac_llvm_context * ctx,struct ac_wg_scan * ws)4285 void ac_build_wg_scan_top(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
4286 {
4287    if (ws->enable_exclusive) {
4288       ws->extra = ac_build_exclusive_scan(ctx, ws->src, ws->op);
4289       if (LLVMTypeOf(ws->src) == ctx->i1 && ws->op == nir_op_iadd)
4290          ws->src = LLVMBuildZExt(ctx->builder, ws->src, ctx->i32, "");
4291       ws->src = ac_build_alu_op(ctx, ws->extra, ws->src, ws->op);
4292    } else {
4293       ws->src = ac_build_inclusive_scan(ctx, ws->src, ws->op);
4294    }
4295 
4296    bool enable_inclusive = ws->enable_inclusive;
4297    bool enable_exclusive = ws->enable_exclusive;
4298    ws->enable_inclusive = false;
4299    ws->enable_exclusive = ws->enable_exclusive || enable_inclusive;
4300    ac_build_wg_wavescan_top(ctx, ws);
4301    ws->enable_inclusive = enable_inclusive;
4302    ws->enable_exclusive = enable_exclusive;
4303 }
4304 
4305 /**
4306  * "Bottom half" of a scan that reduces per-thread values across an entire
4307  * workgroup.
4308  *
4309  * The caller must place a barrier between the top and bottom halves.
4310  */
ac_build_wg_scan_bottom(struct ac_llvm_context * ctx,struct ac_wg_scan * ws)4311 void ac_build_wg_scan_bottom(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
4312 {
4313    bool enable_inclusive = ws->enable_inclusive;
4314    bool enable_exclusive = ws->enable_exclusive;
4315    ws->enable_inclusive = false;
4316    ws->enable_exclusive = ws->enable_exclusive || enable_inclusive;
4317    ac_build_wg_wavescan_bottom(ctx, ws);
4318    ws->enable_inclusive = enable_inclusive;
4319    ws->enable_exclusive = enable_exclusive;
4320 
4321    /* ws->result_reduce is already the correct value */
4322    if (ws->enable_inclusive)
4323       ws->result_inclusive = ac_build_alu_op(ctx, ws->result_inclusive, ws->src, ws->op);
4324    if (ws->enable_exclusive)
4325       ws->result_exclusive = ac_build_alu_op(ctx, ws->result_exclusive, ws->extra, ws->op);
4326 }
4327 
4328 /**
4329  * A scan that reduces per-thread values across an entire workgroup.
4330  *
4331  * The caller must ensure that all lanes are active when this code runs
4332  * (WWM is insufficient!), because there is an implied barrier.
4333  */
ac_build_wg_scan(struct ac_llvm_context * ctx,struct ac_wg_scan * ws)4334 void ac_build_wg_scan(struct ac_llvm_context *ctx, struct ac_wg_scan *ws)
4335 {
4336    ac_build_wg_scan_top(ctx, ws);
4337    ac_build_s_barrier(ctx);
4338    ac_build_wg_scan_bottom(ctx, ws);
4339 }
4340 
ac_build_quad_swizzle(struct ac_llvm_context * ctx,LLVMValueRef src,unsigned lane0,unsigned lane1,unsigned lane2,unsigned lane3)4341 LLVMValueRef ac_build_quad_swizzle(struct ac_llvm_context *ctx, LLVMValueRef src, unsigned lane0,
4342                                    unsigned lane1, unsigned lane2, unsigned lane3)
4343 {
4344    unsigned mask = dpp_quad_perm(lane0, lane1, lane2, lane3);
4345    if (ctx->chip_class >= GFX8) {
4346       return ac_build_dpp(ctx, src, src, mask, 0xf, 0xf, false);
4347    } else {
4348       return ac_build_ds_swizzle(ctx, src, (1 << 15) | mask);
4349    }
4350 }
4351 
ac_build_shuffle(struct ac_llvm_context * ctx,LLVMValueRef src,LLVMValueRef index)4352 LLVMValueRef ac_build_shuffle(struct ac_llvm_context *ctx, LLVMValueRef src, LLVMValueRef index)
4353 {
4354    LLVMTypeRef type = LLVMTypeOf(src);
4355    LLVMValueRef result;
4356 
4357    index = LLVMBuildMul(ctx->builder, index, LLVMConstInt(ctx->i32, 4, 0), "");
4358    src = LLVMBuildZExt(ctx->builder, src, ctx->i32, "");
4359 
4360    result =
4361       ac_build_intrinsic(ctx, "llvm.amdgcn.ds.bpermute", ctx->i32, (LLVMValueRef[]){index, src}, 2,
4362                          AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
4363    return LLVMBuildTrunc(ctx->builder, result, type, "");
4364 }
4365 
ac_build_frexp_exp(struct ac_llvm_context * ctx,LLVMValueRef src0,unsigned bitsize)4366 LLVMValueRef ac_build_frexp_exp(struct ac_llvm_context *ctx, LLVMValueRef src0, unsigned bitsize)
4367 {
4368    LLVMTypeRef type;
4369    char *intr;
4370 
4371    if (bitsize == 16) {
4372       intr = "llvm.amdgcn.frexp.exp.i16.f16";
4373       type = ctx->i16;
4374    } else if (bitsize == 32) {
4375       intr = "llvm.amdgcn.frexp.exp.i32.f32";
4376       type = ctx->i32;
4377    } else {
4378       intr = "llvm.amdgcn.frexp.exp.i32.f64";
4379       type = ctx->i32;
4380    }
4381 
4382    LLVMValueRef params[] = {
4383       src0,
4384    };
4385    return ac_build_intrinsic(ctx, intr, type, params, 1, AC_FUNC_ATTR_READNONE);
4386 }
ac_build_frexp_mant(struct ac_llvm_context * ctx,LLVMValueRef src0,unsigned bitsize)4387 LLVMValueRef ac_build_frexp_mant(struct ac_llvm_context *ctx, LLVMValueRef src0, unsigned bitsize)
4388 {
4389    LLVMTypeRef type;
4390    char *intr;
4391 
4392    if (bitsize == 16) {
4393       intr = "llvm.amdgcn.frexp.mant.f16";
4394       type = ctx->f16;
4395    } else if (bitsize == 32) {
4396       intr = "llvm.amdgcn.frexp.mant.f32";
4397       type = ctx->f32;
4398    } else {
4399       intr = "llvm.amdgcn.frexp.mant.f64";
4400       type = ctx->f64;
4401    }
4402 
4403    LLVMValueRef params[] = {
4404       src0,
4405    };
4406    return ac_build_intrinsic(ctx, intr, type, params, 1, AC_FUNC_ATTR_READNONE);
4407 }
4408 
ac_build_canonicalize(struct ac_llvm_context * ctx,LLVMValueRef src0,unsigned bitsize)4409 LLVMValueRef ac_build_canonicalize(struct ac_llvm_context *ctx, LLVMValueRef src0, unsigned bitsize)
4410 {
4411    LLVMTypeRef type;
4412    char *intr;
4413 
4414    if (bitsize == 16) {
4415       intr = "llvm.canonicalize.f16";
4416       type = ctx->f16;
4417    } else if (bitsize == 32) {
4418       intr = "llvm.canonicalize.f32";
4419       type = ctx->f32;
4420    } else {
4421       intr = "llvm.canonicalize.f64";
4422       type = ctx->f64;
4423    }
4424 
4425    LLVMValueRef params[] = {
4426       src0,
4427    };
4428    return ac_build_intrinsic(ctx, intr, type, params, 1, AC_FUNC_ATTR_READNONE);
4429 }
4430 
4431 /*
4432  * this takes an I,J coordinate pair,
4433  * and works out the X and Y derivatives.
4434  * it returns DDX(I), DDX(J), DDY(I), DDY(J).
4435  */
ac_build_ddxy_interp(struct ac_llvm_context * ctx,LLVMValueRef interp_ij)4436 LLVMValueRef ac_build_ddxy_interp(struct ac_llvm_context *ctx, LLVMValueRef interp_ij)
4437 {
4438    LLVMValueRef result[4], a;
4439    unsigned i;
4440 
4441    for (i = 0; i < 2; i++) {
4442       a = LLVMBuildExtractElement(ctx->builder, interp_ij, LLVMConstInt(ctx->i32, i, false), "");
4443       result[i] = ac_build_ddxy(ctx, AC_TID_MASK_TOP_LEFT, 1, a);
4444       result[2 + i] = ac_build_ddxy(ctx, AC_TID_MASK_TOP_LEFT, 2, a);
4445    }
4446    return ac_build_gather_values(ctx, result, 4);
4447 }
4448 
ac_build_load_helper_invocation(struct ac_llvm_context * ctx)4449 LLVMValueRef ac_build_load_helper_invocation(struct ac_llvm_context *ctx)
4450 {
4451    LLVMValueRef result;
4452 
4453    if (LLVM_VERSION_MAJOR >= 13) {
4454       result = ac_build_intrinsic(ctx, "llvm.amdgcn.live.mask", ctx->i1, NULL, 0,
4455                                   AC_FUNC_ATTR_READONLY | AC_FUNC_ATTR_INACCESSIBLE_MEM_ONLY);
4456    } else {
4457       result = ac_build_intrinsic(ctx, "llvm.amdgcn.ps.live", ctx->i1, NULL, 0,
4458                                   AC_FUNC_ATTR_READNONE);
4459    }
4460    return LLVMBuildNot(ctx->builder, result, "");
4461 }
4462 
ac_build_is_helper_invocation(struct ac_llvm_context * ctx)4463 LLVMValueRef ac_build_is_helper_invocation(struct ac_llvm_context *ctx)
4464 {
4465    if (!ctx->postponed_kill)
4466       return ac_build_load_helper_invocation(ctx);
4467 
4468    /* postponed_kill should be NULL on LLVM 13+ */
4469    assert(LLVM_VERSION_MAJOR < 13);
4470 
4471    /* !(exact && postponed) */
4472    LLVMValueRef exact =
4473       ac_build_intrinsic(ctx, "llvm.amdgcn.ps.live", ctx->i1, NULL, 0, AC_FUNC_ATTR_READNONE);
4474 
4475    LLVMValueRef postponed = LLVMBuildLoad(ctx->builder, ctx->postponed_kill, "");
4476    return LLVMBuildNot(ctx->builder, LLVMBuildAnd(ctx->builder, exact, postponed, ""), "");
4477 }
4478 
ac_build_call(struct ac_llvm_context * ctx,LLVMValueRef func,LLVMValueRef * args,unsigned num_args)4479 LLVMValueRef ac_build_call(struct ac_llvm_context *ctx, LLVMValueRef func, LLVMValueRef *args,
4480                            unsigned num_args)
4481 {
4482    LLVMValueRef ret = LLVMBuildCall(ctx->builder, func, args, num_args, "");
4483    LLVMSetInstructionCallConv(ret, LLVMGetFunctionCallConv(func));
4484    return ret;
4485 }
4486 
ac_export_mrt_z(struct ac_llvm_context * ctx,LLVMValueRef depth,LLVMValueRef stencil,LLVMValueRef samplemask,bool is_last,struct ac_export_args * args)4487 void ac_export_mrt_z(struct ac_llvm_context *ctx, LLVMValueRef depth, LLVMValueRef stencil,
4488                      LLVMValueRef samplemask, bool is_last, struct ac_export_args *args)
4489 {
4490    unsigned mask = 0;
4491    unsigned format = ac_get_spi_shader_z_format(depth != NULL, stencil != NULL, samplemask != NULL);
4492 
4493    assert(depth || stencil || samplemask);
4494 
4495    memset(args, 0, sizeof(*args));
4496 
4497    if (is_last) {
4498       args->valid_mask = 1; /* whether the EXEC mask is valid */
4499       args->done = 1;       /* DONE bit */
4500    }
4501 
4502    /* Specify the target we are exporting */
4503    args->target = V_008DFC_SQ_EXP_MRTZ;
4504 
4505    args->compr = 0;                       /* COMP flag */
4506    args->out[0] = LLVMGetUndef(ctx->f32); /* R, depth */
4507    args->out[1] = LLVMGetUndef(ctx->f32); /* G, stencil test val[0:7], stencil op val[8:15] */
4508    args->out[2] = LLVMGetUndef(ctx->f32); /* B, sample mask */
4509    args->out[3] = LLVMGetUndef(ctx->f32); /* A, alpha to mask */
4510 
4511    if (format == V_028710_SPI_SHADER_UINT16_ABGR) {
4512       assert(!depth);
4513       args->compr = 1; /* COMPR flag */
4514 
4515       if (stencil) {
4516          /* Stencil should be in X[23:16]. */
4517          stencil = ac_to_integer(ctx, stencil);
4518          stencil = LLVMBuildShl(ctx->builder, stencil, LLVMConstInt(ctx->i32, 16, 0), "");
4519          args->out[0] = ac_to_float(ctx, stencil);
4520          mask |= 0x3;
4521       }
4522       if (samplemask) {
4523          /* SampleMask should be in Y[15:0]. */
4524          args->out[1] = samplemask;
4525          mask |= 0xc;
4526       }
4527    } else {
4528       if (depth) {
4529          args->out[0] = depth;
4530          mask |= 0x1;
4531       }
4532       if (stencil) {
4533          args->out[1] = stencil;
4534          mask |= 0x2;
4535       }
4536       if (samplemask) {
4537          args->out[2] = samplemask;
4538          mask |= 0x4;
4539       }
4540    }
4541 
4542    /* GFX6 (except OLAND and HAINAN) has a bug that it only looks
4543     * at the X writemask component. */
4544    if (ctx->chip_class == GFX6 && ctx->family != CHIP_OLAND && ctx->family != CHIP_HAINAN)
4545       mask |= 0x1;
4546 
4547    /* Specify which components to enable */
4548    args->enabled_channels = mask;
4549 }
4550 
4551 /* Send GS Alloc Req message from the first wave of the group to SPI.
4552  * Message payload is:
4553  * - bits 0..10: vertices in group
4554  * - bits 12..22: primitives in group
4555  */
ac_build_sendmsg_gs_alloc_req(struct ac_llvm_context * ctx,LLVMValueRef wave_id,LLVMValueRef vtx_cnt,LLVMValueRef prim_cnt)4556 void ac_build_sendmsg_gs_alloc_req(struct ac_llvm_context *ctx, LLVMValueRef wave_id,
4557                                    LLVMValueRef vtx_cnt, LLVMValueRef prim_cnt)
4558 {
4559    LLVMBuilderRef builder = ctx->builder;
4560    LLVMValueRef tmp;
4561    bool export_dummy_prim = false;
4562 
4563    /* HW workaround for a GPU hang with 100% culling.
4564     * We always have to export at least 1 primitive.
4565     * Export a degenerate triangle using vertex 0 for all 3 vertices.
4566     */
4567    if (prim_cnt == ctx->i32_0 && ctx->chip_class == GFX10) {
4568       assert(vtx_cnt == ctx->i32_0);
4569       prim_cnt = ctx->i32_1;
4570       vtx_cnt = ctx->i32_1;
4571       export_dummy_prim = true;
4572    }
4573 
4574    ac_build_ifcc(ctx, LLVMBuildICmp(builder, LLVMIntEQ, wave_id, ctx->i32_0, ""), 5020);
4575 
4576    tmp = LLVMBuildShl(builder, prim_cnt, LLVMConstInt(ctx->i32, 12, false), "");
4577    tmp = LLVMBuildOr(builder, tmp, vtx_cnt, "");
4578    ac_build_sendmsg(ctx, AC_SENDMSG_GS_ALLOC_REQ, tmp);
4579 
4580    if (export_dummy_prim) {
4581       struct ac_ngg_prim prim = {0};
4582       /* The vertex indices are 0,0,0. */
4583       prim.passthrough = ctx->i32_0;
4584 
4585       struct ac_export_args pos = {0};
4586       /* The hw culls primitives with NaN. */
4587       pos.out[0] = pos.out[1] = pos.out[2] = pos.out[3] = LLVMConstReal(ctx->f32, NAN);
4588       pos.target = V_008DFC_SQ_EXP_POS;
4589       pos.enabled_channels = 0xf;
4590       pos.done = true;
4591 
4592       ac_build_ifcc(ctx, LLVMBuildICmp(builder, LLVMIntEQ, ac_get_thread_id(ctx), ctx->i32_0, ""),
4593                     5021);
4594       ac_build_export_prim(ctx, &prim);
4595       ac_build_export(ctx, &pos);
4596       ac_build_endif(ctx, 5021);
4597    }
4598 
4599    ac_build_endif(ctx, 5020);
4600 }
4601 
4602 
ac_pack_edgeflags_for_export(struct ac_llvm_context * ctx,const struct ac_shader_args * args)4603 LLVMValueRef ac_pack_edgeflags_for_export(struct ac_llvm_context *ctx,
4604                                           const struct ac_shader_args *args)
4605 {
4606    /* Use the following trick to extract the edge flags:
4607     *   extracted = v_and_b32 gs_invocation_id, 0x700 ; get edge flags at bits 8, 9, 10
4608     *   shifted = v_mul_u32_u24 extracted, 0x80402u   ; shift the bits: 8->9, 9->19, 10->29
4609     *   result = v_and_b32 shifted, 0x20080200        ; remove garbage
4610     */
4611    LLVMValueRef tmp = LLVMBuildAnd(ctx->builder,
4612                                    ac_get_arg(ctx, args->gs_invocation_id),
4613                                    LLVMConstInt(ctx->i32, 0x700, 0), "");
4614    tmp = LLVMBuildMul(ctx->builder, tmp, LLVMConstInt(ctx->i32, 0x80402u, 0), "");
4615    return LLVMBuildAnd(ctx->builder, tmp, LLVMConstInt(ctx->i32, 0x20080200, 0), "");
4616 }
4617 
ac_pack_prim_export(struct ac_llvm_context * ctx,const struct ac_ngg_prim * prim)4618 LLVMValueRef ac_pack_prim_export(struct ac_llvm_context *ctx, const struct ac_ngg_prim *prim)
4619 {
4620    /* The prim export format is:
4621     *  - bits 0..8: index 0
4622     *  - bit 9: edge flag 0
4623     *  - bits 10..18: index 1
4624     *  - bit 19: edge flag 1
4625     *  - bits 20..28: index 2
4626     *  - bit 29: edge flag 2
4627     *  - bit 31: null primitive (skip)
4628     */
4629    LLVMBuilderRef builder = ctx->builder;
4630    LLVMValueRef tmp = LLVMBuildZExt(builder, prim->isnull, ctx->i32, "");
4631    LLVMValueRef result = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->i32, 31, false), "");
4632    result = LLVMBuildOr(ctx->builder, result, prim->edgeflags, "");
4633 
4634    for (unsigned i = 0; i < prim->num_vertices; ++i) {
4635       tmp = LLVMBuildShl(builder, prim->index[i], LLVMConstInt(ctx->i32, 10 * i, false), "");
4636       result = LLVMBuildOr(builder, result, tmp, "");
4637    }
4638    return result;
4639 }
4640 
ac_build_export_prim(struct ac_llvm_context * ctx,const struct ac_ngg_prim * prim)4641 void ac_build_export_prim(struct ac_llvm_context *ctx, const struct ac_ngg_prim *prim)
4642 {
4643    struct ac_export_args args;
4644 
4645    if (prim->passthrough) {
4646       args.out[0] = prim->passthrough;
4647    } else {
4648       args.out[0] = ac_pack_prim_export(ctx, prim);
4649    }
4650 
4651    args.out[0] = LLVMBuildBitCast(ctx->builder, args.out[0], ctx->f32, "");
4652    args.out[1] = LLVMGetUndef(ctx->f32);
4653    args.out[2] = LLVMGetUndef(ctx->f32);
4654    args.out[3] = LLVMGetUndef(ctx->f32);
4655 
4656    args.target = V_008DFC_SQ_EXP_PRIM;
4657    args.enabled_channels = 1;
4658    args.done = true;
4659    args.valid_mask = false;
4660    args.compr = false;
4661 
4662    ac_build_export(ctx, &args);
4663 }
4664 
arg_llvm_type(enum ac_arg_type type,unsigned size,struct ac_llvm_context * ctx)4665 static LLVMTypeRef arg_llvm_type(enum ac_arg_type type, unsigned size, struct ac_llvm_context *ctx)
4666 {
4667    if (type == AC_ARG_FLOAT) {
4668       return size == 1 ? ctx->f32 : LLVMVectorType(ctx->f32, size);
4669    } else if (type == AC_ARG_INT) {
4670       return size == 1 ? ctx->i32 : LLVMVectorType(ctx->i32, size);
4671    } else {
4672       LLVMTypeRef ptr_type;
4673       switch (type) {
4674       case AC_ARG_CONST_PTR:
4675          ptr_type = ctx->i8;
4676          break;
4677       case AC_ARG_CONST_FLOAT_PTR:
4678          ptr_type = ctx->f32;
4679          break;
4680       case AC_ARG_CONST_PTR_PTR:
4681          ptr_type = ac_array_in_const32_addr_space(ctx->i8);
4682          break;
4683       case AC_ARG_CONST_DESC_PTR:
4684          ptr_type = ctx->v4i32;
4685          break;
4686       case AC_ARG_CONST_IMAGE_PTR:
4687          ptr_type = ctx->v8i32;
4688          break;
4689       default:
4690          unreachable("unknown arg type");
4691       }
4692       if (size == 1) {
4693          return ac_array_in_const32_addr_space(ptr_type);
4694       } else {
4695          assert(size == 2);
4696          return ac_array_in_const_addr_space(ptr_type);
4697       }
4698    }
4699 }
4700 
ac_build_main(const struct ac_shader_args * args,struct ac_llvm_context * ctx,enum ac_llvm_calling_convention convention,const char * name,LLVMTypeRef ret_type,LLVMModuleRef module)4701 LLVMValueRef ac_build_main(const struct ac_shader_args *args, struct ac_llvm_context *ctx,
4702                            enum ac_llvm_calling_convention convention, const char *name,
4703                            LLVMTypeRef ret_type, LLVMModuleRef module)
4704 {
4705    LLVMTypeRef arg_types[AC_MAX_ARGS];
4706 
4707    for (unsigned i = 0; i < args->arg_count; i++) {
4708       arg_types[i] = arg_llvm_type(args->args[i].type, args->args[i].size, ctx);
4709    }
4710 
4711    LLVMTypeRef main_function_type = LLVMFunctionType(ret_type, arg_types, args->arg_count, 0);
4712 
4713    LLVMValueRef main_function = LLVMAddFunction(module, name, main_function_type);
4714    LLVMBasicBlockRef main_function_body =
4715       LLVMAppendBasicBlockInContext(ctx->context, main_function, "main_body");
4716    LLVMPositionBuilderAtEnd(ctx->builder, main_function_body);
4717 
4718    LLVMSetFunctionCallConv(main_function, convention);
4719    for (unsigned i = 0; i < args->arg_count; ++i) {
4720       LLVMValueRef P = LLVMGetParam(main_function, i);
4721 
4722       if (args->args[i].file != AC_ARG_SGPR)
4723          continue;
4724 
4725       ac_add_function_attr(ctx->context, main_function, i + 1, AC_FUNC_ATTR_INREG);
4726 
4727       if (LLVMGetTypeKind(LLVMTypeOf(P)) == LLVMPointerTypeKind) {
4728          ac_add_function_attr(ctx->context, main_function, i + 1, AC_FUNC_ATTR_NOALIAS);
4729          ac_add_attr_dereferenceable(P, UINT64_MAX);
4730          ac_add_attr_alignment(P, 4);
4731       }
4732    }
4733 
4734    ctx->main_function = main_function;
4735 
4736    /* Enable denormals for FP16 and FP64: */
4737    LLVMAddTargetDependentFunctionAttr(main_function, "denormal-fp-math", "ieee,ieee");
4738    /* Disable denormals for FP32: */
4739    LLVMAddTargetDependentFunctionAttr(main_function, "denormal-fp-math-f32",
4740                                       "preserve-sign,preserve-sign");
4741    return main_function;
4742 }
4743 
ac_build_s_endpgm(struct ac_llvm_context * ctx)4744 void ac_build_s_endpgm(struct ac_llvm_context *ctx)
4745 {
4746    LLVMTypeRef calltype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
4747    LLVMValueRef code = LLVMConstInlineAsm(calltype, "s_endpgm", "", true, false);
4748    LLVMBuildCall(ctx->builder, code, NULL, 0, "");
4749 }
4750 
4751 /**
4752  * Convert triangle strip indices to triangle indices. This is used to decompose
4753  * triangle strips into triangles.
4754  */
ac_build_triangle_strip_indices_to_triangle(struct ac_llvm_context * ctx,LLVMValueRef is_odd,LLVMValueRef flatshade_first,LLVMValueRef index[3])4755 void ac_build_triangle_strip_indices_to_triangle(struct ac_llvm_context *ctx, LLVMValueRef is_odd,
4756                                                  LLVMValueRef flatshade_first,
4757                                                  LLVMValueRef index[3])
4758 {
4759    LLVMBuilderRef builder = ctx->builder;
4760    LLVMValueRef out[3];
4761 
4762    /* We need to change the vertex order for odd triangles to get correct
4763     * front/back facing by swapping 2 vertex indices, but we also have to
4764     * keep the provoking vertex in the same place.
4765     *
4766     * If the first vertex is provoking, swap index 1 and 2.
4767     * If the last vertex is provoking, swap index 0 and 1.
4768     */
4769    out[0] = LLVMBuildSelect(builder, flatshade_first, index[0],
4770                             LLVMBuildSelect(builder, is_odd, index[1], index[0], ""), "");
4771    out[1] = LLVMBuildSelect(builder, flatshade_first,
4772                             LLVMBuildSelect(builder, is_odd, index[2], index[1], ""),
4773                             LLVMBuildSelect(builder, is_odd, index[0], index[1], ""), "");
4774    out[2] = LLVMBuildSelect(builder, flatshade_first,
4775                             LLVMBuildSelect(builder, is_odd, index[1], index[2], ""), index[2], "");
4776    memcpy(index, out, sizeof(out));
4777 }
4778 
ac_build_is_inf_or_nan(struct ac_llvm_context * ctx,LLVMValueRef a)4779 LLVMValueRef ac_build_is_inf_or_nan(struct ac_llvm_context *ctx, LLVMValueRef a)
4780 {
4781    LLVMValueRef args[2] = {
4782       a,
4783       LLVMConstInt(ctx->i32, S_NAN | Q_NAN | N_INFINITY | P_INFINITY, 0),
4784    };
4785    return ac_build_intrinsic(ctx, "llvm.amdgcn.class.f32", ctx->i1, args, 2,
4786                              AC_FUNC_ATTR_READNONE);
4787 }
4788