1 /*
2 * Copyright (C) 2021 Alyssa Rosenzweig
3 * Copyright (C) 2020-2021 Collabora, Ltd.
4 * Copyright (C) 2014 Broadcom
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 */
25
26 #include "agx_state.h"
27 #include "compiler/nir/nir_builder.h"
28 #include "asahi/compiler/agx_compile.h"
29 #include "gallium/auxiliary/util/u_blitter.h"
30
31 static void
agx_build_reload_shader(struct agx_device * dev)32 agx_build_reload_shader(struct agx_device *dev)
33 {
34 nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT,
35 &agx_nir_options, "agx_reload");
36 b.shader->info.internal = true;
37
38 nir_variable *out = nir_variable_create(b.shader, nir_var_shader_out,
39 glsl_vector_type(GLSL_TYPE_FLOAT, 4), "output");
40 out->data.location = FRAG_RESULT_DATA0;
41
42 nir_ssa_def *fragcoord = nir_load_frag_coord(&b);
43 nir_ssa_def *coord = nir_channels(&b, fragcoord, 0x3);
44
45 nir_tex_instr *tex = nir_tex_instr_create(b.shader, 1);
46 tex->dest_type = nir_type_float32;
47 tex->sampler_dim = GLSL_SAMPLER_DIM_RECT;
48 tex->op = nir_texop_tex;
49 tex->src[0].src_type = nir_tex_src_coord;
50 tex->src[0].src = nir_src_for_ssa(coord);
51 tex->coord_components = 2;
52 nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
53 nir_builder_instr_insert(&b, &tex->instr);
54 nir_store_var(&b, out, &tex->dest.ssa, 0xFF);
55
56 unsigned offset = 0;
57 unsigned bo_size = 4096;
58
59 struct agx_bo *bo = agx_bo_create(dev, bo_size, AGX_MEMORY_TYPE_SHADER);
60 dev->reload.bo = bo;
61
62 for (unsigned i = 0; i < AGX_NUM_FORMATS; ++i) {
63 struct util_dynarray binary;
64 util_dynarray_init(&binary, NULL);
65
66 nir_shader *s = nir_shader_clone(NULL, b.shader);
67 struct agx_shader_info info;
68
69 struct agx_shader_key key = {
70 .fs.tib_formats[0] = i
71 };
72
73 agx_compile_shader_nir(s, &key, &binary, &info);
74
75 assert(offset + binary.size < bo_size);
76 memcpy(((uint8_t *) bo->ptr.cpu) + offset, binary.data, binary.size);
77
78 dev->reload.format[i] = bo->ptr.gpu + offset;
79 offset += ALIGN_POT(binary.size, 128);
80
81 util_dynarray_fini(&binary);
82 }
83 }
84
85 static void
agx_blitter_save(struct agx_context * ctx,struct blitter_context * blitter,bool render_cond)86 agx_blitter_save(struct agx_context *ctx, struct blitter_context *blitter,
87 bool render_cond)
88 {
89 util_blitter_save_vertex_buffer_slot(blitter, ctx->vertex_buffers);
90 util_blitter_save_vertex_elements(blitter, ctx->attributes);
91 util_blitter_save_vertex_shader(blitter, ctx->stage[PIPE_SHADER_VERTEX].shader);
92 util_blitter_save_rasterizer(blitter, ctx->rast);
93 util_blitter_save_viewport(blitter, &ctx->viewport);
94 util_blitter_save_scissor(blitter, &ctx->scissor);
95 util_blitter_save_fragment_shader(blitter, ctx->stage[PIPE_SHADER_FRAGMENT].shader);
96 util_blitter_save_blend(blitter, ctx->blend);
97 util_blitter_save_depth_stencil_alpha(blitter, &ctx->zs);
98 util_blitter_save_stencil_ref(blitter, &ctx->stencil_ref);
99 util_blitter_save_so_targets(blitter, 0, NULL);
100 util_blitter_save_sample_mask(blitter, ctx->sample_mask);
101
102 util_blitter_save_framebuffer(blitter, &ctx->framebuffer);
103 util_blitter_save_fragment_sampler_states(blitter,
104 ctx->stage[PIPE_SHADER_FRAGMENT].sampler_count,
105 (void **)(ctx->stage[PIPE_SHADER_FRAGMENT].samplers));
106 util_blitter_save_fragment_sampler_views(blitter,
107 ctx->stage[PIPE_SHADER_FRAGMENT].texture_count,
108 (struct pipe_sampler_view **)ctx->stage[PIPE_SHADER_FRAGMENT].textures);
109 util_blitter_save_fragment_constant_buffer_slot(blitter,
110 ctx->stage[PIPE_SHADER_FRAGMENT].cb);
111
112 if (!render_cond) {
113 util_blitter_save_render_condition(blitter,
114 (struct pipe_query *) ctx->cond_query,
115 ctx->cond_cond, ctx->cond_mode);
116 }
117 }
118
119 void
agx_blit(struct pipe_context * pipe,const struct pipe_blit_info * info)120 agx_blit(struct pipe_context *pipe,
121 const struct pipe_blit_info *info)
122 {
123 //if (info->render_condition_enable &&
124 // !agx_render_condition_check(pan_context(pipe)))
125 // return;
126
127 struct agx_context *ctx = agx_context(pipe);
128
129 if (!util_blitter_is_blit_supported(ctx->blitter, info))
130 unreachable("Unsupported blit\n");
131
132 agx_blitter_save(ctx, ctx->blitter, info->render_condition_enable);
133 util_blitter_blit(ctx->blitter, info);
134 }
135
136 /* We need some fixed shaders for common rendering tasks. When colour buffer
137 * reload is not in use, a shader is used to clear a particular colour. At the
138 * end of rendering a tile, a shader is used to write it out. These shaders are
139 * too trivial to go through the compiler at this stage. */
140 #define AGX_STOP \
141 0x88, 0x00, 0x08, 0x00, 0x08, 0x00, 0x08, 0x00, 0x08, \
142 0x00, 0x08, 0x00, 0x08, 0x00, 0x08, 0x00, 0x08, 0x00 \
143
144 #define AGX_BLEND \
145 0x09, 0x00, 0x00, 0x04, 0xf0, 0xfc, 0x80, 0x03
146
147 /* Clears the tilebuffer, where u6-u7 are preloaded with the FP16 clear colour
148
149 0: 7e018c098040 bitop_mov r0, u6
150 6: 7e058e098000 bitop_mov r1, u7
151 c: 09000004f0fc8003 TODO.blend
152 */
153
154 static uint8_t shader_clear[] = {
155 0x7e, 0x01, 0x8c, 0x09, 0x80, 0x40,
156 0x7e, 0x05, 0x8e, 0x09, 0x80, 0x00,
157 AGX_BLEND,
158 AGX_STOP
159 };
160
161 static uint8_t shader_store[] = {
162 0x7e, 0x00, 0x04, 0x09, 0x80, 0x00,
163 0xb1, 0x80, 0x00, 0x80, 0x00, 0x4a, 0x00, 0x00, 0x0a, 0x00,
164 AGX_STOP
165 };
166
167 void
agx_internal_shaders(struct agx_device * dev)168 agx_internal_shaders(struct agx_device *dev)
169 {
170 unsigned clear_offset = 0;
171 unsigned store_offset = 1024;
172
173 struct agx_bo *bo = agx_bo_create(dev, 4096, AGX_MEMORY_TYPE_SHADER);
174 memcpy(((uint8_t *) bo->ptr.cpu) + clear_offset, shader_clear, sizeof(shader_clear));
175 memcpy(((uint8_t *) bo->ptr.cpu) + store_offset, shader_store, sizeof(shader_store));
176
177 dev->internal.bo = bo;
178 dev->internal.clear = bo->ptr.gpu + clear_offset;
179 dev->internal.store = bo->ptr.gpu + store_offset;
180
181 agx_build_reload_shader(dev);
182 }
183