1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2016 by Mike Erwin.
17  * All rights reserved.
18  */
19 
20 /** \file
21  * \ingroup gpu
22  *
23  * GPU geometry batch
24  * Contains VAOs + VBOs + Shader representing a drawable entity.
25  */
26 
27 #include "MEM_guardedalloc.h"
28 
29 #include "BLI_math_base.h"
30 
31 #include "GPU_batch.h"
32 #include "GPU_batch_presets.h"
33 #include "GPU_matrix.h"
34 #include "GPU_platform.h"
35 #include "GPU_shader.h"
36 
37 #include "gpu_backend.hh"
38 #include "gpu_context_private.hh"
39 #include "gpu_index_buffer_private.hh"
40 #include "gpu_shader_private.hh"
41 #include "gpu_vertex_buffer_private.hh"
42 
43 #include "gpu_batch_private.hh"
44 
45 #include <string.h>
46 
47 using namespace blender::gpu;
48 
49 /* -------------------------------------------------------------------- */
50 /** \name Creation & Deletion
51  * \{ */
52 
GPU_batch_calloc(void)53 GPUBatch *GPU_batch_calloc(void)
54 {
55   GPUBatch *batch = GPUBackend::get()->batch_alloc();
56   memset(batch, 0, sizeof(*batch));
57   return batch;
58 }
59 
GPU_batch_create_ex(GPUPrimType prim_type,GPUVertBuf * verts,GPUIndexBuf * elem,eGPUBatchFlag owns_flag)60 GPUBatch *GPU_batch_create_ex(GPUPrimType prim_type,
61                               GPUVertBuf *verts,
62                               GPUIndexBuf *elem,
63                               eGPUBatchFlag owns_flag)
64 {
65   GPUBatch *batch = GPU_batch_calloc();
66   GPU_batch_init_ex(batch, prim_type, verts, elem, owns_flag);
67   return batch;
68 }
69 
GPU_batch_init_ex(GPUBatch * batch,GPUPrimType prim_type,GPUVertBuf * verts,GPUIndexBuf * elem,eGPUBatchFlag owns_flag)70 void GPU_batch_init_ex(GPUBatch *batch,
71                        GPUPrimType prim_type,
72                        GPUVertBuf *verts,
73                        GPUIndexBuf *elem,
74                        eGPUBatchFlag owns_flag)
75 {
76   BLI_assert(verts != NULL);
77   /* Do not pass any other flag */
78   BLI_assert((owns_flag & ~(GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX)) == 0);
79 
80   batch->verts[0] = verts;
81   for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; v++) {
82     batch->verts[v] = NULL;
83   }
84   for (int v = 0; v < GPU_BATCH_INST_VBO_MAX_LEN; v++) {
85     batch->inst[v] = NULL;
86   }
87   batch->elem = elem;
88   batch->prim_type = prim_type;
89   batch->flag = owns_flag | GPU_BATCH_INIT | GPU_BATCH_DIRTY;
90   batch->shader = NULL;
91 }
92 
93 /* This will share the VBOs with the new batch. */
GPU_batch_copy(GPUBatch * batch_dst,GPUBatch * batch_src)94 void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src)
95 {
96   GPU_batch_init_ex(
97       batch_dst, GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, GPU_BATCH_INVALID);
98 
99   batch_dst->prim_type = batch_src->prim_type;
100   for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; v++) {
101     batch_dst->verts[v] = batch_src->verts[v];
102   }
103 }
104 
GPU_batch_clear(GPUBatch * batch)105 void GPU_batch_clear(GPUBatch *batch)
106 {
107   if (batch->flag & GPU_BATCH_OWNS_INDEX) {
108     GPU_indexbuf_discard(batch->elem);
109   }
110   if (batch->flag & GPU_BATCH_OWNS_VBO_ANY) {
111     for (int v = 0; (v < GPU_BATCH_VBO_MAX_LEN) && batch->verts[v]; v++) {
112       if (batch->flag & (GPU_BATCH_OWNS_VBO << v)) {
113         GPU_VERTBUF_DISCARD_SAFE(batch->verts[v]);
114       }
115     }
116   }
117   if (batch->flag & GPU_BATCH_OWNS_INST_VBO_ANY) {
118     for (int v = 0; (v < GPU_BATCH_INST_VBO_MAX_LEN) && batch->inst[v]; v++) {
119       if (batch->flag & (GPU_BATCH_OWNS_INST_VBO << v)) {
120         GPU_VERTBUF_DISCARD_SAFE(batch->inst[v]);
121       }
122     }
123   }
124   batch->flag = GPU_BATCH_INVALID;
125 }
126 
GPU_batch_discard(GPUBatch * batch)127 void GPU_batch_discard(GPUBatch *batch)
128 {
129   GPU_batch_clear(batch);
130 
131   delete static_cast<Batch *>(batch);
132 }
133 
134 /** \} */
135 
136 /* -------------------------------------------------------------------- */
137 /** \name Buffers Management
138  * \{ */
139 
140 /* NOTE: Override ONLY the first instance vbo (and free them if owned). */
GPU_batch_instbuf_set(GPUBatch * batch,GPUVertBuf * inst,bool own_vbo)141 void GPU_batch_instbuf_set(GPUBatch *batch, GPUVertBuf *inst, bool own_vbo)
142 {
143   BLI_assert(inst);
144   batch->flag |= GPU_BATCH_DIRTY;
145 
146   if (batch->inst[0] && (batch->flag & GPU_BATCH_OWNS_INST_VBO)) {
147     GPU_vertbuf_discard(batch->inst[0]);
148   }
149   batch->inst[0] = inst;
150 
151   SET_FLAG_FROM_TEST(batch->flag, own_vbo, GPU_BATCH_OWNS_INST_VBO);
152 }
153 
154 /* NOTE: Override any previously assigned elem (and free it if owned). */
GPU_batch_elembuf_set(GPUBatch * batch,GPUIndexBuf * elem,bool own_ibo)155 void GPU_batch_elembuf_set(GPUBatch *batch, GPUIndexBuf *elem, bool own_ibo)
156 {
157   BLI_assert(elem);
158   batch->flag |= GPU_BATCH_DIRTY;
159 
160   if (batch->elem && (batch->flag & GPU_BATCH_OWNS_INDEX)) {
161     GPU_indexbuf_discard(batch->elem);
162   }
163   batch->elem = elem;
164 
165   SET_FLAG_FROM_TEST(batch->flag, own_ibo, GPU_BATCH_OWNS_INDEX);
166 }
167 
GPU_batch_instbuf_add_ex(GPUBatch * batch,GPUVertBuf * insts,bool own_vbo)168 int GPU_batch_instbuf_add_ex(GPUBatch *batch, GPUVertBuf *insts, bool own_vbo)
169 {
170   BLI_assert(insts);
171   batch->flag |= GPU_BATCH_DIRTY;
172 
173   for (uint v = 0; v < GPU_BATCH_INST_VBO_MAX_LEN; v++) {
174     if (batch->inst[v] == NULL) {
175       /* for now all VertexBuffers must have same vertex_len */
176       if (batch->inst[0]) {
177         /* Allow for different size of vertex buffer (will choose the smallest number of verts). */
178         // BLI_assert(insts->vertex_len == batch->inst[0]->vertex_len);
179       }
180 
181       batch->inst[v] = insts;
182       SET_FLAG_FROM_TEST(batch->flag, own_vbo, (eGPUBatchFlag)(GPU_BATCH_OWNS_INST_VBO << v));
183       return v;
184     }
185   }
186   /* we only make it this far if there is no room for another GPUVertBuf */
187   BLI_assert(0 && "Not enough Instance VBO slot in batch");
188   return -1;
189 }
190 
191 /* Returns the index of verts in the batch. */
GPU_batch_vertbuf_add_ex(GPUBatch * batch,GPUVertBuf * verts,bool own_vbo)192 int GPU_batch_vertbuf_add_ex(GPUBatch *batch, GPUVertBuf *verts, bool own_vbo)
193 {
194   BLI_assert(verts);
195   batch->flag |= GPU_BATCH_DIRTY;
196 
197   for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; v++) {
198     if (batch->verts[v] == NULL) {
199       /* for now all VertexBuffers must have same vertex_len */
200       if (batch->verts[0] != NULL) {
201         /* This is an issue for the HACK inside DRW_vbo_request(). */
202         // BLI_assert(verts->vertex_len == batch->verts[0]->vertex_len);
203       }
204       batch->verts[v] = verts;
205       SET_FLAG_FROM_TEST(batch->flag, own_vbo, (eGPUBatchFlag)(GPU_BATCH_OWNS_VBO << v));
206       return v;
207     }
208   }
209   /* we only make it this far if there is no room for another GPUVertBuf */
210   BLI_assert(0 && "Not enough VBO slot in batch");
211   return -1;
212 }
213 
214 /** \} */
215 
216 /* -------------------------------------------------------------------- */
217 /** \name Uniform setters
218  *
219  * TODO(fclem): port this to GPUShader.
220  * \{ */
221 
GPU_batch_set_shader(GPUBatch * batch,GPUShader * shader)222 void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader)
223 {
224   batch->shader = shader;
225   GPU_shader_bind(batch->shader);
226 }
227 
228 /** \} */
229 
230 /* -------------------------------------------------------------------- */
231 /** \name Drawing / Drawcall functions
232  * \{ */
233 
GPU_batch_draw(GPUBatch * batch)234 void GPU_batch_draw(GPUBatch *batch)
235 {
236   GPU_shader_bind(batch->shader);
237   GPU_batch_draw_advanced(batch, 0, 0, 0, 0);
238 }
239 
GPU_batch_draw_range(GPUBatch * batch,int v_first,int v_count)240 void GPU_batch_draw_range(GPUBatch *batch, int v_first, int v_count)
241 {
242   GPU_shader_bind(batch->shader);
243   GPU_batch_draw_advanced(batch, v_first, v_count, 0, 0);
244 }
245 
246 /* Draw multiple instance of a batch without having any instance attributes. */
GPU_batch_draw_instanced(GPUBatch * batch,int i_count)247 void GPU_batch_draw_instanced(GPUBatch *batch, int i_count)
248 {
249   BLI_assert(batch->inst[0] == NULL);
250 
251   GPU_shader_bind(batch->shader);
252   GPU_batch_draw_advanced(batch, 0, 0, 0, i_count);
253 }
254 
GPU_batch_draw_advanced(GPUBatch * gpu_batch,int v_first,int v_count,int i_first,int i_count)255 void GPU_batch_draw_advanced(
256     GPUBatch *gpu_batch, int v_first, int v_count, int i_first, int i_count)
257 {
258   BLI_assert(Context::get()->shader != NULL);
259   Batch *batch = static_cast<Batch *>(gpu_batch);
260 
261   if (v_count == 0) {
262     if (batch->elem) {
263       v_count = batch->elem_()->index_len_get();
264     }
265     else {
266       v_count = batch->verts_(0)->vertex_len;
267     }
268   }
269   if (i_count == 0) {
270     i_count = (batch->inst[0]) ? batch->inst_(0)->vertex_len : 1;
271     /* Meh. This is to be able to use different numbers of verts in instance vbos. */
272     if (batch->inst[1] != NULL) {
273       i_count = min_ii(i_count, batch->inst_(1)->vertex_len);
274     }
275   }
276 
277   if (v_count == 0 || i_count == 0) {
278     /* Nothing to draw. */
279     return;
280   }
281 
282   batch->draw(v_first, v_count, i_first, i_count);
283 }
284 
285 /** \} */
286 
287 /* -------------------------------------------------------------------- */
288 /** \name Utilities
289  * \{ */
290 
GPU_batch_program_set_builtin_with_config(GPUBatch * batch,eGPUBuiltinShader shader_id,eGPUShaderConfig sh_cfg)291 void GPU_batch_program_set_builtin_with_config(GPUBatch *batch,
292                                                eGPUBuiltinShader shader_id,
293                                                eGPUShaderConfig sh_cfg)
294 {
295   GPUShader *shader = GPU_shader_get_builtin_shader_with_config(shader_id, sh_cfg);
296   GPU_batch_set_shader(batch, shader);
297 }
298 
GPU_batch_program_set_builtin(GPUBatch * batch,eGPUBuiltinShader shader_id)299 void GPU_batch_program_set_builtin(GPUBatch *batch, eGPUBuiltinShader shader_id)
300 {
301   GPU_batch_program_set_builtin_with_config(batch, shader_id, GPU_SHADER_CFG_DEFAULT);
302 }
303 
304 /* Bind program bound to IMM to the batch.
305  * XXX Use this with much care. Drawing with the GPUBatch API is not compatible with IMM.
306  * DO NOT DRAW WITH THE BATCH BEFORE CALLING immUnbindProgram. */
GPU_batch_program_set_imm_shader(GPUBatch * batch)307 void GPU_batch_program_set_imm_shader(GPUBatch *batch)
308 {
309   GPU_batch_set_shader(batch, immGetShader());
310 }
311 
312 /** \} */
313 
314 /* -------------------------------------------------------------------- */
315 /** \name Init/Exit
316  * \{ */
317 
gpu_batch_init(void)318 void gpu_batch_init(void)
319 {
320   gpu_batch_presets_init();
321 }
322 
gpu_batch_exit(void)323 void gpu_batch_exit(void)
324 {
325   gpu_batch_presets_exit();
326 }
327 
328 /** \} */
329