1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software Foundation,
14 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * The Original Code is Copyright (C) 2016 by Mike Erwin.
17 * All rights reserved.
18 */
19
20 /** \file
21 * \ingroup gpu
22 *
23 * GPU geometry batch
24 * Contains VAOs + VBOs + Shader representing a drawable entity.
25 */
26
27 #pragma once
28
29 #include "BLI_utildefines.h"
30
31 #include "GPU_index_buffer.h"
32 #include "GPU_shader.h"
33 #include "GPU_vertex_buffer.h"
34
35 #define GPU_BATCH_VBO_MAX_LEN 6
36 #define GPU_BATCH_INST_VBO_MAX_LEN 2
37 #define GPU_BATCH_VAO_STATIC_LEN 3
38 #define GPU_BATCH_VAO_DYN_ALLOC_COUNT 16
39
40 typedef enum eGPUBatchFlag {
41 /** Invalid default state. */
42 GPU_BATCH_INVALID = 0,
43
44 /** GPUVertBuf ownership. (One bit per vbo) */
45 GPU_BATCH_OWNS_VBO = (1 << 0),
46 GPU_BATCH_OWNS_VBO_MAX = (GPU_BATCH_OWNS_VBO << (GPU_BATCH_VBO_MAX_LEN - 1)),
47 GPU_BATCH_OWNS_VBO_ANY = ((GPU_BATCH_OWNS_VBO << GPU_BATCH_VBO_MAX_LEN) - 1),
48 /** Instance GPUVertBuf ownership. (One bit per vbo) */
49 GPU_BATCH_OWNS_INST_VBO = (GPU_BATCH_OWNS_VBO_MAX << 1),
50 GPU_BATCH_OWNS_INST_VBO_MAX = (GPU_BATCH_OWNS_INST_VBO << (GPU_BATCH_INST_VBO_MAX_LEN - 1)),
51 GPU_BATCH_OWNS_INST_VBO_ANY = ((GPU_BATCH_OWNS_INST_VBO << GPU_BATCH_INST_VBO_MAX_LEN) - 1) &
52 ~GPU_BATCH_OWNS_VBO_ANY,
53 /** GPUIndexBuf ownership. */
54 GPU_BATCH_OWNS_INDEX = (GPU_BATCH_OWNS_INST_VBO_MAX << 1),
55
56 /** Has been initialized. At least one VBO is set. */
57 GPU_BATCH_INIT = (1 << 16),
58 /** Batch is initialized but its VBOs are still being populated. (optional) */
59 GPU_BATCH_BUILDING = (1 << 16),
60 /** Cached data need to be rebuild. (VAO, PSO, ...) */
61 GPU_BATCH_DIRTY = (1 << 17),
62 } eGPUBatchFlag;
63
64 #define GPU_BATCH_OWNS_NONE GPU_BATCH_INVALID
65
66 BLI_STATIC_ASSERT(GPU_BATCH_OWNS_INDEX < GPU_BATCH_INIT,
67 "eGPUBatchFlag: Error: status flags are shadowed by the ownership bits!")
68
ENUM_OPERATORS(eGPUBatchFlag,GPU_BATCH_DIRTY)69 ENUM_OPERATORS(eGPUBatchFlag, GPU_BATCH_DIRTY)
70
71 #ifdef __cplusplus
72 extern "C" {
73 #endif
74
75 /**
76 * IMPORTANT: Do not allocate manually as the real struct is bigger (i.e: GLBatch). This is only
77 * the common and "public" part of the struct. Use the provided allocator.
78 * TODO(fclem): Make the content of this struct hidden and expose getters/setters.
79 **/
80 typedef struct GPUBatch {
81 /** verts[0] is required, others can be NULL */
82 GPUVertBuf *verts[GPU_BATCH_VBO_MAX_LEN];
83 /** Instance attributes. */
84 GPUVertBuf *inst[GPU_BATCH_INST_VBO_MAX_LEN];
85 /** NULL if element list not needed */
86 GPUIndexBuf *elem;
87 /** Bookeeping. */
88 eGPUBatchFlag flag;
89 /** Type of geometry to draw. */
90 GPUPrimType prim_type;
91 /** Current assigned shader. DEPRECATED. Here only for uniform binding. */
92 struct GPUShader *shader;
93 } GPUBatch;
94
95 GPUBatch *GPU_batch_calloc(void);
96 GPUBatch *GPU_batch_create_ex(GPUPrimType prim,
97 GPUVertBuf *vert,
98 GPUIndexBuf *elem,
99 eGPUBatchFlag owns_flag);
100 void GPU_batch_init_ex(GPUBatch *batch,
101 GPUPrimType prim,
102 GPUVertBuf *vert,
103 GPUIndexBuf *elem,
104 eGPUBatchFlag owns_flag);
105 void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src);
106
107 #define GPU_batch_create(prim, verts, elem) GPU_batch_create_ex(prim, verts, elem, 0)
108 #define GPU_batch_init(batch, prim, verts, elem) GPU_batch_init_ex(batch, prim, verts, elem, 0)
109
110 /* Same as discard but does not free. (does not call free callback). */
111 void GPU_batch_clear(GPUBatch *);
112
113 void GPU_batch_discard(GPUBatch *); /* verts & elem are not discarded */
114
115 void GPU_batch_instbuf_set(GPUBatch *, GPUVertBuf *, bool own_vbo); /* Instancing */
116 void GPU_batch_elembuf_set(GPUBatch *batch, GPUIndexBuf *elem, bool own_ibo);
117
118 int GPU_batch_instbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo);
119 int GPU_batch_vertbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo);
120
121 #define GPU_batch_vertbuf_add(batch, verts) GPU_batch_vertbuf_add_ex(batch, verts, false)
122
123 void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader);
124 void GPU_batch_program_set_imm_shader(GPUBatch *batch);
125 void GPU_batch_program_set_builtin(GPUBatch *batch, eGPUBuiltinShader shader_id);
126 void GPU_batch_program_set_builtin_with_config(GPUBatch *batch,
127 eGPUBuiltinShader shader_id,
128 eGPUShaderConfig sh_cfg);
129
130 /* Will only work after setting the batch program. */
131 /* TODO(fclem): Theses needs to be replaced by GPU_shader_uniform_* with explicit shader. */
132 #define GPU_batch_uniform_1i(batch, name, x) GPU_shader_uniform_1i((batch)->shader, name, x);
133 #define GPU_batch_uniform_1b(batch, name, x) GPU_shader_uniform_1b((batch)->shader, name, x);
134 #define GPU_batch_uniform_1f(batch, name, x) GPU_shader_uniform_1f((batch)->shader, name, x);
135 #define GPU_batch_uniform_2f(batch, name, x, y) GPU_shader_uniform_2f((batch)->shader, name, x, y);
136 #define GPU_batch_uniform_3f(batch, name, x, y, z) \
137 GPU_shader_uniform_3f((batch)->shader, name, x, y, z);
138 #define GPU_batch_uniform_4f(batch, name, x, y, z, w) \
139 GPU_shader_uniform_4f((batch)->shader, name, x, y, z, w);
140 #define GPU_batch_uniform_2fv(batch, name, val) GPU_shader_uniform_2fv((batch)->shader, name, val);
141 #define GPU_batch_uniform_3fv(batch, name, val) GPU_shader_uniform_3fv((batch)->shader, name, val);
142 #define GPU_batch_uniform_4fv(batch, name, val) GPU_shader_uniform_4fv((batch)->shader, name, val);
143 #define GPU_batch_uniform_2fv_array(batch, name, len, val) \
144 GPU_shader_uniform_2fv_array((batch)->shader, name, len, val);
145 #define GPU_batch_uniform_4fv_array(batch, name, len, val) \
146 GPU_shader_uniform_4fv_array((batch)->shader, name, len, val);
147 #define GPU_batch_uniform_mat4(batch, name, val) \
148 GPU_shader_uniform_mat4((batch)->shader, name, val);
149 #define GPU_batch_texture_bind(batch, name, tex) \
150 GPU_texture_bind(tex, GPU_shader_get_texture_binding((batch)->shader, name));
151
152 void GPU_batch_draw(GPUBatch *batch);
153 void GPU_batch_draw_range(GPUBatch *batch, int v_first, int v_count);
154 void GPU_batch_draw_instanced(GPUBatch *batch, int i_count);
155
156 /* This does not bind/unbind shader and does not call GPU_matrix_bind() */
157 void GPU_batch_draw_advanced(GPUBatch *, int v_first, int v_count, int i_first, int i_count);
158
159 #if 0 /* future plans */
160
161 /* Can multiple batches share a GPUVertBuf? Use ref count? */
162
163 /* We often need a batch with its own data, to be created and discarded together. */
164 /* WithOwn variants reduce number of system allocations. */
165
166 typedef struct BatchWithOwnVertexBuffer {
167 GPUBatch batch;
168 GPUVertBuf verts; /* link batch.verts to this */
169 } BatchWithOwnVertexBuffer;
170
171 typedef struct BatchWithOwnElementList {
172 GPUBatch batch;
173 GPUIndexBuf elem; /* link batch.elem to this */
174 } BatchWithOwnElementList;
175
176 typedef struct BatchWithOwnVertexBufferAndElementList {
177 GPUBatch batch;
178 GPUIndexBuf elem; /* link batch.elem to this */
179 GPUVertBuf verts; /* link batch.verts to this */
180 } BatchWithOwnVertexBufferAndElementList;
181
182 GPUBatch *create_BatchWithOwnVertexBuffer(GPUPrimType, GPUVertFormat *, uint v_len, GPUIndexBuf *);
183 GPUBatch *create_BatchWithOwnElementList(GPUPrimType, GPUVertBuf *, uint prim_len);
184 GPUBatch *create_BatchWithOwnVertexBufferAndElementList(GPUPrimType,
185 GPUVertFormat *,
186 uint v_len,
187 uint prim_len);
188 /* verts: shared, own */
189 /* elem: none, shared, own */
190 GPUBatch *create_BatchInGeneral(GPUPrimType, VertexBufferStuff, ElementListStuff);
191
192 #endif /* future plans */
193
194 void gpu_batch_init(void);
195 void gpu_batch_exit(void);
196
197 /* Macros */
198
199 #define GPU_BATCH_DISCARD_SAFE(batch) \
200 do { \
201 if (batch != NULL) { \
202 GPU_batch_discard(batch); \
203 batch = NULL; \
204 } \
205 } while (0)
206
207 #define GPU_BATCH_CLEAR_SAFE(batch) \
208 do { \
209 if (batch != NULL) { \
210 GPU_batch_clear(batch); \
211 memset(batch, 0, sizeof(*(batch))); \
212 } \
213 } while (0)
214
215 #define GPU_BATCH_DISCARD_ARRAY_SAFE(_batch_array, _len) \
216 do { \
217 if (_batch_array != NULL) { \
218 BLI_assert(_len > 0); \
219 for (int _i = 0; _i < _len; _i++) { \
220 GPU_BATCH_DISCARD_SAFE(_batch_array[_i]); \
221 } \
222 MEM_freeN(_batch_array); \
223 } \
224 } while (0)
225
226 #ifdef __cplusplus
227 }
228 #endif
229