1 /*
2  * Copyright (C) 2019 Collabora, Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors (Collabora):
24  *   Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
25  */
26 
27 #ifndef __PAN_ENCODER_H
28 #define __PAN_ENCODER_H
29 
30 #include "util/macros.h"
31 
32 #include <stdbool.h>
33 #include "util/format/u_format.h"
34 #include "pan_bo.h"
35 #include "genxml/gen_macros.h"
36 #include "pan_device.h"
37 
38 /* Tiler structure size computation */
39 
40 unsigned
41 panfrost_tiler_header_size(unsigned width, unsigned height, unsigned mask, bool hierarchy);
42 
43 unsigned
44 panfrost_tiler_full_size(unsigned width, unsigned height, unsigned mask, bool hierarchy);
45 
46 unsigned
47 panfrost_choose_hierarchy_mask(
48         unsigned width, unsigned height,
49         unsigned vertex_count, bool hierarchy);
50 
51 #if defined(PAN_ARCH) && PAN_ARCH <= 5
52 static inline unsigned
panfrost_tiler_get_polygon_list_size(const struct panfrost_device * dev,unsigned fb_width,unsigned fb_height,bool has_draws)53 panfrost_tiler_get_polygon_list_size(const struct panfrost_device *dev,
54                                      unsigned fb_width, unsigned fb_height,
55                                      bool has_draws)
56 {
57         if (!has_draws)
58                 return MALI_MIDGARD_TILER_MINIMUM_HEADER_SIZE + 4;
59 
60         bool hierarchy = !dev->model->quirks.no_hierarchical_tiling;
61         unsigned hierarchy_mask =
62                 panfrost_choose_hierarchy_mask(fb_width, fb_height, 1, hierarchy);
63 
64         return panfrost_tiler_full_size(fb_width, fb_height, hierarchy_mask, hierarchy) +
65                 panfrost_tiler_header_size(fb_width, fb_height, hierarchy_mask, hierarchy);
66 }
67 #endif
68 
69 /* Stack sizes */
70 
71 unsigned
72 panfrost_get_stack_shift(unsigned stack_size);
73 
74 unsigned
75 panfrost_get_total_stack_size(
76                 unsigned thread_size,
77                 unsigned threads_per_core,
78                 unsigned core_count);
79 
80 /* Attributes / instancing */
81 
82 unsigned
83 panfrost_padded_vertex_count(unsigned vertex_count);
84 
85 unsigned
86 panfrost_compute_magic_divisor(unsigned hw_divisor, unsigned *o_shift, unsigned *extra_flags);
87 
88 #ifdef PAN_ARCH
89 /* Records for gl_VertexID and gl_InstanceID use special encodings on Midgard */
90 
91 #if PAN_ARCH <= 5
92 static inline void
panfrost_vertex_id(unsigned padded_count,struct mali_attribute_buffer_packed * attr,bool instanced)93 panfrost_vertex_id(unsigned padded_count,
94                    struct mali_attribute_buffer_packed *attr,
95                    bool instanced)
96 {
97         pan_pack(attr, ATTRIBUTE_VERTEX_ID, cfg) {
98                 if (instanced) {
99                         cfg.divisor_r = __builtin_ctz(padded_count);
100                         cfg.divisor_p = padded_count >> (cfg.divisor_r + 1);
101                 } else {
102                         /* Large values so the modulo is a no-op */
103                         cfg.divisor_r = 0x1F;
104                         cfg.divisor_p = 0x4;
105                 }
106         }
107 }
108 
109 static inline void
panfrost_instance_id(unsigned padded_count,struct mali_attribute_buffer_packed * attr,bool instanced)110 panfrost_instance_id(unsigned padded_count,
111                      struct mali_attribute_buffer_packed *attr,
112                      bool instanced)
113 {
114         pan_pack(attr, ATTRIBUTE_INSTANCE_ID, cfg) {
115                 if (!instanced || padded_count <= 1) {
116                         /* Divide by large number to force to 0 */
117                         cfg.divisor_p = ((1u << 31) - 1);
118                         cfg.divisor_r = 0x1F;
119                         cfg.divisor_e = 0x1;
120                 } else if(util_is_power_of_two_or_zero(padded_count)) {
121                         /* Can't underflow since padded_count >= 2 */
122                         cfg.divisor_r = __builtin_ctz(padded_count) - 1;
123                 } else {
124                         cfg.divisor_p =
125                                 panfrost_compute_magic_divisor(padded_count,
126                                         &cfg.divisor_r, &cfg.divisor_e);
127                 }
128         }
129 }
130 #endif /* PAN_ARCH <= 5 */
131 
132 /* Sampler comparison functions are flipped in OpenGL from the hardware, so we
133  * need to be able to flip accordingly */
134 
135 static inline enum mali_func
panfrost_flip_compare_func(enum mali_func f)136 panfrost_flip_compare_func(enum mali_func f)
137 {
138         switch (f) {
139         case MALI_FUNC_LESS: return MALI_FUNC_GREATER;
140         case MALI_FUNC_GREATER: return MALI_FUNC_LESS;
141         case MALI_FUNC_LEQUAL: return MALI_FUNC_GEQUAL;
142         case MALI_FUNC_GEQUAL: return MALI_FUNC_LEQUAL;
143         default: return f;
144         }
145 
146 }
147 
148 /* Compute shaders are invoked with a gl_NumWorkGroups X/Y/Z triplet. Vertex
149  * shaders are invoked as (1, vertex_count, instance_count). Compute shaders
150  * also have a gl_WorkGroupSize X/Y/Z triplet. These 6 values are packed
151  * together in a dynamic bitfield, packed by this routine. */
152 
153 static inline void
panfrost_pack_work_groups_compute(struct mali_invocation_packed * out,unsigned num_x,unsigned num_y,unsigned num_z,unsigned size_x,unsigned size_y,unsigned size_z,bool quirk_graphics,bool indirect_dispatch)154 panfrost_pack_work_groups_compute(
155         struct mali_invocation_packed *out,
156         unsigned num_x, unsigned num_y, unsigned num_z,
157         unsigned size_x, unsigned size_y, unsigned size_z,
158         bool quirk_graphics, bool indirect_dispatch)
159 {
160         /* The values needing packing, in order, and the corresponding shifts.
161          * Indicies into shift are off-by-one to make the logic easier */
162 
163         unsigned values[6] = { size_x, size_y, size_z, num_x, num_y, num_z };
164         unsigned shifts[7] = { 0 };
165         uint32_t packed = 0;
166 
167         for (unsigned i = 0; i < 6; ++i) {
168                 /* Must be positive, otherwise we underflow */
169                 assert(values[i] >= 1);
170 
171                 /* OR it in, shifting as required */
172                 packed |= ((values[i] - 1) << shifts[i]);
173 
174                 /* How many bits did we use? */
175                 unsigned bit_count = util_logbase2_ceil(values[i]);
176 
177                 /* Set the next shift accordingly */
178                 shifts[i + 1] = shifts[i] + bit_count;
179         }
180 
181         pan_pack(out, INVOCATION, cfg) {
182                 cfg.invocations = packed;
183                 cfg.size_y_shift = shifts[1];
184                 cfg.size_z_shift = shifts[2];
185                 cfg.workgroups_x_shift = shifts[3];
186 
187                 if (!indirect_dispatch) {
188                         /* Leave zero for the dispatch shader */
189                         cfg.workgroups_y_shift = shifts[4];
190                         cfg.workgroups_z_shift = shifts[5];
191                 }
192 
193                 /* Quirk: for non-instanced graphics, the blob sets
194                  * workgroups_z_shift = 32. This doesn't appear to matter to
195                  * the hardware, but it's good to be bit-identical. */
196 
197                 if (quirk_graphics && (num_z <= 1))
198                         cfg.workgroups_z_shift = 32;
199 
200                 /* For graphics, set to the minimum efficient value. For
201                  * compute, must equal the workgroup X shift for barriers to
202                  * function correctly */
203 
204                 cfg.thread_group_split = quirk_graphics ?
205                         MALI_SPLIT_MIN_EFFICIENT : cfg.workgroups_x_shift;
206         }
207 }
208 
209 #if PAN_ARCH >= 5
210 /* Format conversion */
211 static inline enum mali_z_internal_format
panfrost_get_z_internal_format(enum pipe_format fmt)212 panfrost_get_z_internal_format(enum pipe_format fmt)
213 {
214          switch (fmt) {
215          case PIPE_FORMAT_Z16_UNORM:
216          case PIPE_FORMAT_Z16_UNORM_S8_UINT:
217                 return MALI_Z_INTERNAL_FORMAT_D16;
218          case PIPE_FORMAT_Z24_UNORM_S8_UINT:
219          case PIPE_FORMAT_Z24X8_UNORM:
220                 return MALI_Z_INTERNAL_FORMAT_D24;
221          case PIPE_FORMAT_Z32_FLOAT:
222          case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
223                 return MALI_Z_INTERNAL_FORMAT_D32;
224          default:
225                 unreachable("Unsupported depth/stencil format.");
226          }
227 }
228 #endif
229 
230 #endif /* PAN_ARCH */
231 
232 #endif
233