1 /*
2  * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3  * Copyright © 2018 Google, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  *
24  * Authors:
25  *    Rob Clark <robclark@freedesktop.org>
26  */
27 
28 #ifndef FD6_EMIT_H
29 #define FD6_EMIT_H
30 
31 #include "pipe/p_context.h"
32 
33 #include "fd6_context.h"
34 #include "fd6_format.h"
35 #include "fd6_program.h"
36 #include "freedreno_context.h"
37 #include "ir3_gallium.h"
38 
39 struct fd_ringbuffer;
40 
41 /* To collect all the state objects to emit in a single CP_SET_DRAW_STATE
42  * packet, the emit tracks a collection of however many state_group's that
43  * need to be emit'd.
44  */
45 enum fd6_state_id {
46    FD6_GROUP_PROG_CONFIG,
47    FD6_GROUP_PROG,
48    FD6_GROUP_PROG_BINNING,
49    FD6_GROUP_PROG_INTERP,
50    FD6_GROUP_PROG_FB_RAST,
51    FD6_GROUP_LRZ,
52    FD6_GROUP_LRZ_BINNING,
53    FD6_GROUP_VTXSTATE,
54    FD6_GROUP_VBO,
55    FD6_GROUP_CONST,
56    FD6_GROUP_VS_DRIVER_PARAMS,
57    FD6_GROUP_PRIMITIVE_PARAMS,
58    FD6_GROUP_VS_TEX,
59    FD6_GROUP_HS_TEX,
60    FD6_GROUP_DS_TEX,
61    FD6_GROUP_GS_TEX,
62    FD6_GROUP_FS_TEX,
63    FD6_GROUP_RASTERIZER,
64    FD6_GROUP_ZSA,
65    FD6_GROUP_BLEND,
66    FD6_GROUP_SCISSOR,
67    FD6_GROUP_BLEND_COLOR,
68    FD6_GROUP_SO,
69    FD6_GROUP_IBO,
70    FD6_GROUP_NON_GROUP, /* placeholder group for state emit in IB2, keep last */
71 };
72 
73 #define ENABLE_ALL                                                             \
74    (CP_SET_DRAW_STATE__0_BINNING | CP_SET_DRAW_STATE__0_GMEM |                 \
75     CP_SET_DRAW_STATE__0_SYSMEM)
76 #define ENABLE_DRAW (CP_SET_DRAW_STATE__0_GMEM | CP_SET_DRAW_STATE__0_SYSMEM)
77 
78 struct fd6_state_group {
79    struct fd_ringbuffer *stateobj;
80    enum fd6_state_id group_id;
81    /* enable_mask controls which states the stateobj is evaluated in,
82     * b0 is binning pass b1 and/or b2 is draw pass
83     */
84    uint32_t enable_mask;
85 };
86 
87 /* grouped together emit-state for prog/vertex/state emit: */
88 struct fd6_emit {
89    struct fd_context *ctx;
90    const struct fd_vertex_state *vtx;
91    const struct pipe_draw_info *info;
92 	unsigned drawid_offset;
93    const struct pipe_draw_indirect_info *indirect;
94 	const struct pipe_draw_start_count_bias *draw;
95    struct ir3_cache_key key;
96    enum fd_dirty_3d_state dirty;
97    uint32_t dirty_groups;
98 
99    uint32_t sprite_coord_enable; /* bitmask */
100    bool sprite_coord_mode;
101    bool rasterflat;
102    bool primitive_restart;
103    uint8_t patch_vertices;
104 
105    /* cached to avoid repeated lookups: */
106    const struct fd6_program_state *prog;
107 
108    struct ir3_shader_variant *bs;
109    struct ir3_shader_variant *vs;
110    struct ir3_shader_variant *hs;
111    struct ir3_shader_variant *ds;
112    struct ir3_shader_variant *gs;
113    struct ir3_shader_variant *fs;
114 
115    unsigned streamout_mask;
116 
117    struct fd6_state_group groups[32];
118    unsigned num_groups;
119 };
120 
121 static inline const struct fd6_program_state *
fd6_emit_get_prog(struct fd6_emit * emit)122 fd6_emit_get_prog(struct fd6_emit *emit)
123 {
124    if (!emit->prog) {
125       struct ir3_program_state *s = ir3_cache_lookup(
126          emit->ctx->shader_cache, &emit->key, &emit->ctx->debug);
127       emit->prog = fd6_program_state(s);
128    }
129    return emit->prog;
130 }
131 
132 static inline void
fd6_emit_take_group(struct fd6_emit * emit,struct fd_ringbuffer * stateobj,enum fd6_state_id group_id,unsigned enable_mask)133 fd6_emit_take_group(struct fd6_emit *emit, struct fd_ringbuffer *stateobj,
134                     enum fd6_state_id group_id, unsigned enable_mask)
135 {
136    debug_assert(emit->num_groups < ARRAY_SIZE(emit->groups));
137    struct fd6_state_group *g = &emit->groups[emit->num_groups++];
138    g->stateobj = stateobj;
139    g->group_id = group_id;
140    g->enable_mask = enable_mask;
141 }
142 
143 static inline void
fd6_emit_add_group(struct fd6_emit * emit,struct fd_ringbuffer * stateobj,enum fd6_state_id group_id,unsigned enable_mask)144 fd6_emit_add_group(struct fd6_emit *emit, struct fd_ringbuffer *stateobj,
145                    enum fd6_state_id group_id, unsigned enable_mask)
146 {
147    fd6_emit_take_group(emit, fd_ringbuffer_ref(stateobj), group_id,
148                        enable_mask);
149 }
150 
151 static inline unsigned
fd6_event_write(struct fd_batch * batch,struct fd_ringbuffer * ring,enum vgt_event_type evt,bool timestamp)152 fd6_event_write(struct fd_batch *batch, struct fd_ringbuffer *ring,
153                 enum vgt_event_type evt, bool timestamp)
154 {
155    unsigned seqno = 0;
156 
157    fd_reset_wfi(batch);
158 
159    OUT_PKT7(ring, CP_EVENT_WRITE, timestamp ? 4 : 1);
160    OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(evt));
161    if (timestamp) {
162       struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
163       seqno = ++fd6_ctx->seqno;
164       OUT_RELOC(ring, control_ptr(fd6_ctx, seqno)); /* ADDR_LO/HI */
165       OUT_RING(ring, seqno);
166    }
167 
168    return seqno;
169 }
170 
171 static inline void
fd6_cache_inv(struct fd_batch * batch,struct fd_ringbuffer * ring)172 fd6_cache_inv(struct fd_batch *batch, struct fd_ringbuffer *ring)
173 {
174    fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
175    fd6_event_write(batch, ring, PC_CCU_INVALIDATE_DEPTH, false);
176    fd6_event_write(batch, ring, CACHE_INVALIDATE, false);
177 }
178 
179 static inline void
fd6_cache_flush(struct fd_batch * batch,struct fd_ringbuffer * ring)180 fd6_cache_flush(struct fd_batch *batch, struct fd_ringbuffer *ring)
181 {
182    struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
183    unsigned seqno;
184 
185    seqno = fd6_event_write(batch, ring, RB_DONE_TS, true);
186 
187    OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
188    OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
189                      CP_WAIT_REG_MEM_0_POLL_MEMORY);
190    OUT_RELOC(ring, control_ptr(fd6_ctx, seqno));
191    OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(seqno));
192    OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(~0));
193    OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
194 
195    seqno = fd6_event_write(batch, ring, CACHE_FLUSH_TS, true);
196 
197    OUT_PKT7(ring, CP_WAIT_MEM_GTE, 4);
198    OUT_RING(ring, CP_WAIT_MEM_GTE_0_RESERVED(0));
199    OUT_RELOC(ring, control_ptr(fd6_ctx, seqno));
200    OUT_RING(ring, CP_WAIT_MEM_GTE_3_REF(seqno));
201 }
202 
203 static inline void
fd6_emit_blit(struct fd_batch * batch,struct fd_ringbuffer * ring)204 fd6_emit_blit(struct fd_batch *batch, struct fd_ringbuffer *ring)
205 {
206    emit_marker6(ring, 7);
207    fd6_event_write(batch, ring, BLIT, false);
208    emit_marker6(ring, 7);
209 }
210 
211 static inline void
fd6_emit_lrz_flush(struct fd_ringbuffer * ring)212 fd6_emit_lrz_flush(struct fd_ringbuffer *ring)
213 {
214    OUT_PKT7(ring, CP_EVENT_WRITE, 1);
215    OUT_RING(ring, LRZ_FLUSH);
216 }
217 
218 static inline bool
fd6_geom_stage(gl_shader_stage type)219 fd6_geom_stage(gl_shader_stage type)
220 {
221    switch (type) {
222    case MESA_SHADER_VERTEX:
223    case MESA_SHADER_TESS_CTRL:
224    case MESA_SHADER_TESS_EVAL:
225    case MESA_SHADER_GEOMETRY:
226       return true;
227    case MESA_SHADER_FRAGMENT:
228    case MESA_SHADER_COMPUTE:
229    case MESA_SHADER_KERNEL:
230       return false;
231    default:
232       unreachable("bad shader type");
233    }
234 }
235 
236 static inline uint32_t
fd6_stage2opcode(gl_shader_stage type)237 fd6_stage2opcode(gl_shader_stage type)
238 {
239    return fd6_geom_stage(type) ? CP_LOAD_STATE6_GEOM : CP_LOAD_STATE6_FRAG;
240 }
241 
242 static inline enum a6xx_state_block
fd6_stage2shadersb(gl_shader_stage type)243 fd6_stage2shadersb(gl_shader_stage type)
244 {
245    switch (type) {
246    case MESA_SHADER_VERTEX:
247       return SB6_VS_SHADER;
248    case MESA_SHADER_TESS_CTRL:
249       return SB6_HS_SHADER;
250    case MESA_SHADER_TESS_EVAL:
251       return SB6_DS_SHADER;
252    case MESA_SHADER_GEOMETRY:
253       return SB6_GS_SHADER;
254    case MESA_SHADER_FRAGMENT:
255       return SB6_FS_SHADER;
256    case MESA_SHADER_COMPUTE:
257    case MESA_SHADER_KERNEL:
258       return SB6_CS_SHADER;
259    default:
260       unreachable("bad shader type");
261       return ~0;
262    }
263 }
264 
265 static inline enum a6xx_tess_spacing
fd6_gl2spacing(enum gl_tess_spacing spacing)266 fd6_gl2spacing(enum gl_tess_spacing spacing)
267 {
268    switch (spacing) {
269    case TESS_SPACING_EQUAL:
270       return TESS_EQUAL;
271    case TESS_SPACING_FRACTIONAL_ODD:
272       return TESS_FRACTIONAL_ODD;
273    case TESS_SPACING_FRACTIONAL_EVEN:
274       return TESS_FRACTIONAL_EVEN;
275    case TESS_SPACING_UNSPECIFIED:
276    default:
277       unreachable("spacing must be specified");
278    }
279 }
280 
281 bool fd6_emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
282                        enum pipe_shader_type type,
283                        struct fd_texture_stateobj *tex, unsigned bcolor_offset,
284                        const struct ir3_shader_variant *v) assert_dt;
285 
286 void fd6_emit_state(struct fd_ringbuffer *ring,
287                     struct fd6_emit *emit) assert_dt;
288 
289 void fd6_emit_cs_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
290                        struct ir3_shader_variant *cp) assert_dt;
291 
292 void fd6_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring);
293 
294 void fd6_emit_init_screen(struct pipe_screen *pscreen);
295 void fd6_emit_init(struct pipe_context *pctx);
296 
297 static inline void
fd6_emit_ib(struct fd_ringbuffer * ring,struct fd_ringbuffer * target)298 fd6_emit_ib(struct fd_ringbuffer *ring, struct fd_ringbuffer *target)
299 {
300    emit_marker6(ring, 6);
301    __OUT_IB5(ring, target);
302    emit_marker6(ring, 6);
303 }
304 
305 #define WRITE(reg, val)                                                        \
306    do {                                                                        \
307       OUT_PKT4(ring, reg, 1);                                                  \
308       OUT_RING(ring, val);                                                     \
309    } while (0)
310 
311 #endif /* FD6_EMIT_H */
312