1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software Foundation,
14 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * The Original Code is Copyright (C) 2005 Blender Foundation.
17 * All rights reserved.
18 */
19
20 /** \file
21 * \ingroup gpu
22 *
23 * Mesh drawing using OpenGL VBO (Vertex Buffer Objects)
24 */
25
26 #include <limits.h>
27 #include <stddef.h>
28 #include <string.h>
29
30 #include "MEM_guardedalloc.h"
31
32 #include "BLI_bitmap.h"
33 #include "BLI_ghash.h"
34 #include "BLI_hash.h"
35 #include "BLI_math.h"
36 #include "BLI_math_color.h"
37 #include "BLI_math_color_blend.h"
38 #include "BLI_utildefines.h"
39
40 #include "DNA_meshdata_types.h"
41 #include "DNA_userdef_types.h"
42
43 #include "BKE_DerivedMesh.h"
44 #include "BKE_ccg.h"
45 #include "BKE_mesh.h"
46 #include "BKE_paint.h"
47 #include "BKE_pbvh.h"
48 #include "BKE_subdiv_ccg.h"
49
50 #include "GPU_batch.h"
51 #include "GPU_buffers.h"
52
53 #include "gpu_private.h"
54
55 #include "bmesh.h"
56
57 /* XXX: the rest of the code in this file is used for optimized PBVH
58 * drawing and doesn't interact at all with the buffer code above */
59
60 struct GPU_PBVH_Buffers {
61 GPUIndexBuf *index_buf, *index_buf_fast;
62 GPUIndexBuf *index_lines_buf, *index_lines_buf_fast;
63 GPUVertBuf *vert_buf;
64
65 GPUBatch *lines;
66 GPUBatch *lines_fast;
67 GPUBatch *triangles;
68 GPUBatch *triangles_fast;
69
70 /* mesh pointers in case buffer allocation fails */
71 const MPoly *mpoly;
72 const MLoop *mloop;
73 const MLoopTri *looptri;
74 const MVert *mvert;
75
76 const int *face_indices;
77 int face_indices_len;
78
79 /* grid pointers */
80 CCGKey gridkey;
81 CCGElem **grids;
82 const DMFlagMat *grid_flag_mats;
83 BLI_bitmap *const *grid_hidden;
84 const int *grid_indices;
85 int totgrid;
86
87 bool use_bmesh;
88 bool clear_bmesh_on_flush;
89
90 uint tot_tri, tot_quad;
91
92 short material_index;
93
94 /* The PBVH ensures that either all faces in the node are
95 * smooth-shaded or all faces are flat-shaded */
96 bool smooth;
97
98 bool show_overlay;
99 };
100
101 static struct {
102 GPUVertFormat format;
103 uint pos, nor, msk, col, fset;
104 } g_vbo_id = {{0}};
105
106 /** \} */
107
108 /* -------------------------------------------------------------------- */
109 /** \name PBVH Utils
110 * \{ */
111
gpu_pbvh_init()112 void gpu_pbvh_init()
113 {
114 /* Initialize vertex buffer (match 'VertexBufferFormat'). */
115 if (g_vbo_id.format.attr_len == 0) {
116 g_vbo_id.pos = GPU_vertformat_attr_add(
117 &g_vbo_id.format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
118 g_vbo_id.nor = GPU_vertformat_attr_add(
119 &g_vbo_id.format, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
120 /* TODO: Do not allocate these `.msk` and `.col` when they are not used. */
121 g_vbo_id.msk = GPU_vertformat_attr_add(
122 &g_vbo_id.format, "msk", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
123 g_vbo_id.col = GPU_vertformat_attr_add(
124 &g_vbo_id.format, "ac", GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
125 g_vbo_id.fset = GPU_vertformat_attr_add(
126 &g_vbo_id.format, "fset", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
127 }
128 }
129
gpu_pbvh_exit()130 void gpu_pbvh_exit()
131 {
132 /* Nothing to do. */
133 }
134
135 /* Allocates a non-initialized buffer to be sent to GPU.
136 * Return is false it indicates that the memory map failed. */
gpu_pbvh_vert_buf_data_set(GPU_PBVH_Buffers * buffers,uint vert_len)137 static bool gpu_pbvh_vert_buf_data_set(GPU_PBVH_Buffers *buffers, uint vert_len)
138 {
139 /* Keep so we can test #GPU_USAGE_DYNAMIC buffer use.
140 * Not that format initialization match in both blocks.
141 * Do this to keep braces balanced - otherwise indentation breaks. */
142 #if 0
143 if (buffers->vert_buf == NULL) {
144 /* Initialize vertex buffer (match 'VertexBufferFormat'). */
145 buffers->vert_buf = GPU_vertbuf_create_with_format_ex(&g_vbo_id.format, GPU_USAGE_DYNAMIC);
146 GPU_vertbuf_data_alloc(buffers->vert_buf, vert_len);
147 }
148 else if (vert_len != buffers->vert_buf->vertex_len) {
149 GPU_vertbuf_data_resize(buffers->vert_buf, vert_len);
150 }
151 #else
152 if (buffers->vert_buf == NULL) {
153 /* Initialize vertex buffer (match 'VertexBufferFormat'). */
154 buffers->vert_buf = GPU_vertbuf_create_with_format_ex(&g_vbo_id.format, GPU_USAGE_STATIC);
155 }
156 if (GPU_vertbuf_get_data(buffers->vert_buf) == NULL ||
157 GPU_vertbuf_get_vertex_len(buffers->vert_buf) != vert_len) {
158 /* Allocate buffer if not allocated yet or size changed. */
159 GPU_vertbuf_data_alloc(buffers->vert_buf, vert_len);
160 }
161 #endif
162
163 return GPU_vertbuf_get_data(buffers->vert_buf) != NULL;
164 }
165
gpu_pbvh_batch_init(GPU_PBVH_Buffers * buffers,GPUPrimType prim)166 static void gpu_pbvh_batch_init(GPU_PBVH_Buffers *buffers, GPUPrimType prim)
167 {
168 if (buffers->triangles == NULL) {
169 buffers->triangles = GPU_batch_create(prim,
170 buffers->vert_buf,
171 /* can be NULL if buffer is empty */
172 buffers->index_buf);
173 }
174
175 if ((buffers->triangles_fast == NULL) && buffers->index_buf_fast) {
176 buffers->triangles_fast = GPU_batch_create(prim, buffers->vert_buf, buffers->index_buf_fast);
177 }
178
179 if (buffers->lines == NULL) {
180 buffers->lines = GPU_batch_create(GPU_PRIM_LINES,
181 buffers->vert_buf,
182 /* can be NULL if buffer is empty */
183 buffers->index_lines_buf);
184 }
185
186 if ((buffers->lines_fast == NULL) && buffers->index_lines_buf_fast) {
187 buffers->lines_fast = GPU_batch_create(
188 GPU_PRIM_LINES, buffers->vert_buf, buffers->index_lines_buf_fast);
189 }
190 }
191
192 /** \} */
193
194 /* -------------------------------------------------------------------- */
195 /** \name Mesh PBVH
196 * \{ */
197
gpu_pbvh_is_looptri_visible(const MLoopTri * lt,const MVert * mvert,const MLoop * mloop,const int * sculpt_face_sets)198 static bool gpu_pbvh_is_looptri_visible(const MLoopTri *lt,
199 const MVert *mvert,
200 const MLoop *mloop,
201 const int *sculpt_face_sets)
202 {
203 return (!paint_is_face_hidden(lt, mvert, mloop) && sculpt_face_sets &&
204 sculpt_face_sets[lt->poly] > SCULPT_FACE_SET_NONE);
205 }
206
207 /* Threaded - do not call any functions that use OpenGL calls! */
GPU_pbvh_mesh_buffers_update(GPU_PBVH_Buffers * buffers,const MVert * mvert,const float * vmask,const MLoopCol * vcol,const int * sculpt_face_sets,const int face_sets_color_seed,const int face_sets_color_default,const MPropCol * vtcol,const int update_flags)208 void GPU_pbvh_mesh_buffers_update(GPU_PBVH_Buffers *buffers,
209 const MVert *mvert,
210 const float *vmask,
211 const MLoopCol *vcol,
212 const int *sculpt_face_sets,
213 const int face_sets_color_seed,
214 const int face_sets_color_default,
215 const MPropCol *vtcol,
216 const int update_flags)
217 {
218 const bool show_mask = vmask && (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
219 const bool show_face_sets = sculpt_face_sets &&
220 (update_flags & GPU_PBVH_BUFFERS_SHOW_SCULPT_FACE_SETS) != 0;
221 const bool show_vcol = (vcol || (vtcol && U.experimental.use_sculpt_vertex_colors)) &&
222 (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0;
223 bool empty_mask = true;
224 bool default_face_set = true;
225
226 {
227 const int totelem = buffers->tot_tri * 3;
228
229 /* Build VBO */
230 if (gpu_pbvh_vert_buf_data_set(buffers, totelem)) {
231 GPUVertBufRaw pos_step = {0};
232 GPUVertBufRaw nor_step = {0};
233 GPUVertBufRaw msk_step = {0};
234 GPUVertBufRaw fset_step = {0};
235 GPUVertBufRaw col_step = {0};
236
237 GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.pos, &pos_step);
238 GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.nor, &nor_step);
239 GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.msk, &msk_step);
240 GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.fset, &fset_step);
241 if (show_vcol) {
242 GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.col, &col_step);
243 }
244
245 /* calculate normal for each polygon only once */
246 uint mpoly_prev = UINT_MAX;
247 short no[3] = {0, 0, 0};
248
249 for (uint i = 0; i < buffers->face_indices_len; i++) {
250 const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]];
251 const uint vtri[3] = {
252 buffers->mloop[lt->tri[0]].v,
253 buffers->mloop[lt->tri[1]].v,
254 buffers->mloop[lt->tri[2]].v,
255 };
256
257 if (!gpu_pbvh_is_looptri_visible(lt, mvert, buffers->mloop, sculpt_face_sets)) {
258 continue;
259 }
260
261 /* Face normal and mask */
262 if (lt->poly != mpoly_prev && !buffers->smooth) {
263 const MPoly *mp = &buffers->mpoly[lt->poly];
264 float fno[3];
265 BKE_mesh_calc_poly_normal(mp, &buffers->mloop[mp->loopstart], mvert, fno);
266 normal_float_to_short_v3(no, fno);
267 mpoly_prev = lt->poly;
268 }
269
270 uchar face_set_color[4] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
271 if (show_face_sets) {
272 const int fset = abs(sculpt_face_sets[lt->poly]);
273 /* Skip for the default color Face Set to render it white. */
274 if (fset != face_sets_color_default) {
275 BKE_paint_face_set_overlay_color_get(fset, face_sets_color_seed, face_set_color);
276 default_face_set = false;
277 }
278 }
279
280 float fmask = 0.0f;
281 uchar cmask = 0;
282 if (show_mask && !buffers->smooth) {
283 fmask = (vmask[vtri[0]] + vmask[vtri[1]] + vmask[vtri[2]]) / 3.0f;
284 cmask = (uchar)(fmask * 255);
285 }
286
287 for (uint j = 0; j < 3; j++) {
288 const MVert *v = &mvert[vtri[j]];
289 copy_v3_v3(GPU_vertbuf_raw_step(&pos_step), v->co);
290
291 if (buffers->smooth) {
292 copy_v3_v3_short(no, v->no);
293 }
294 copy_v3_v3_short(GPU_vertbuf_raw_step(&nor_step), no);
295
296 if (show_mask && buffers->smooth) {
297 cmask = (uchar)(vmask[vtri[j]] * 255);
298 }
299
300 *(uchar *)GPU_vertbuf_raw_step(&msk_step) = cmask;
301 empty_mask = empty_mask && (cmask == 0);
302 /* Vertex Colors. */
303 if (show_vcol) {
304 ushort scol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
305 if (vtcol && U.experimental.use_sculpt_vertex_colors) {
306 scol[0] = unit_float_to_ushort_clamp(vtcol[vtri[j]].color[0]);
307 scol[1] = unit_float_to_ushort_clamp(vtcol[vtri[j]].color[1]);
308 scol[2] = unit_float_to_ushort_clamp(vtcol[vtri[j]].color[2]);
309 scol[3] = unit_float_to_ushort_clamp(vtcol[vtri[j]].color[3]);
310 memcpy(GPU_vertbuf_raw_step(&col_step), scol, sizeof(scol));
311 }
312 else {
313 const uint loop_index = lt->tri[j];
314 const MLoopCol *mcol = &vcol[loop_index];
315 scol[0] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol->r]);
316 scol[1] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol->g]);
317 scol[2] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol->b]);
318 scol[3] = unit_float_to_ushort_clamp(mcol->a * (1.0f / 255.0f));
319 memcpy(GPU_vertbuf_raw_step(&col_step), scol, sizeof(scol));
320 }
321 }
322 /* Face Sets. */
323 memcpy(GPU_vertbuf_raw_step(&fset_step), face_set_color, sizeof(uchar[3]));
324 }
325 }
326 }
327
328 gpu_pbvh_batch_init(buffers, GPU_PRIM_TRIS);
329 }
330
331 /* Get material index from the first face of this buffer. */
332 const MLoopTri *lt = &buffers->looptri[buffers->face_indices[0]];
333 const MPoly *mp = &buffers->mpoly[lt->poly];
334 buffers->material_index = mp->mat_nr;
335
336 buffers->show_overlay = !empty_mask || !default_face_set;
337 buffers->mvert = mvert;
338 }
339
340 /* Threaded - do not call any functions that use OpenGL calls! */
GPU_pbvh_mesh_buffers_build(const MPoly * mpoly,const MLoop * mloop,const MLoopTri * looptri,const MVert * mvert,const int * face_indices,const int * sculpt_face_sets,const int face_indices_len,const struct Mesh * mesh)341 GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(const MPoly *mpoly,
342 const MLoop *mloop,
343 const MLoopTri *looptri,
344 const MVert *mvert,
345 const int *face_indices,
346 const int *sculpt_face_sets,
347 const int face_indices_len,
348 const struct Mesh *mesh)
349 {
350 GPU_PBVH_Buffers *buffers;
351 int i, tottri;
352 int tot_real_edges = 0;
353
354 buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
355
356 /* smooth or flat for all */
357 buffers->smooth = mpoly[looptri[face_indices[0]].poly].flag & ME_SMOOTH;
358
359 buffers->show_overlay = false;
360
361 /* Count the number of visible triangles */
362 for (i = 0, tottri = 0; i < face_indices_len; i++) {
363 const MLoopTri *lt = &looptri[face_indices[i]];
364 if (gpu_pbvh_is_looptri_visible(lt, mvert, mloop, sculpt_face_sets)) {
365 int r_edges[3];
366 BKE_mesh_looptri_get_real_edges(mesh, lt, r_edges);
367 for (int j = 0; j < 3; j++) {
368 if (r_edges[j] != -1) {
369 tot_real_edges++;
370 }
371 }
372 tottri++;
373 }
374 }
375
376 if (tottri == 0) {
377 buffers->tot_tri = 0;
378
379 buffers->mpoly = mpoly;
380 buffers->mloop = mloop;
381 buffers->looptri = looptri;
382 buffers->face_indices = face_indices;
383 buffers->face_indices_len = 0;
384
385 return buffers;
386 }
387
388 /* Fill the only the line buffer. */
389 GPUIndexBufBuilder elb_lines;
390 GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tot_real_edges, INT_MAX);
391 int vert_idx = 0;
392
393 for (i = 0; i < face_indices_len; i++) {
394 const MLoopTri *lt = &looptri[face_indices[i]];
395
396 /* Skip hidden faces */
397 if (!gpu_pbvh_is_looptri_visible(lt, mvert, mloop, sculpt_face_sets)) {
398 continue;
399 }
400
401 int r_edges[3];
402 BKE_mesh_looptri_get_real_edges(mesh, lt, r_edges);
403 if (r_edges[0] != -1) {
404 GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 0, vert_idx * 3 + 1);
405 }
406 if (r_edges[1] != -1) {
407 GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 1, vert_idx * 3 + 2);
408 }
409 if (r_edges[2] != -1) {
410 GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 2, vert_idx * 3 + 0);
411 }
412
413 vert_idx++;
414 }
415 buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
416
417 buffers->tot_tri = tottri;
418
419 buffers->mpoly = mpoly;
420 buffers->mloop = mloop;
421 buffers->looptri = looptri;
422
423 buffers->face_indices = face_indices;
424 buffers->face_indices_len = face_indices_len;
425
426 return buffers;
427 }
428
429 /** \} */
430
431 /* -------------------------------------------------------------------- */
432 /** \name Grid PBVH
433 * \{ */
434
gpu_pbvh_grid_fill_index_buffers(GPU_PBVH_Buffers * buffers,SubdivCCG * UNUSED (subdiv_ccg),const int * UNUSED (face_sets),const int * grid_indices,uint visible_quad_len,int totgrid,int gridsize)435 static void gpu_pbvh_grid_fill_index_buffers(GPU_PBVH_Buffers *buffers,
436 SubdivCCG *UNUSED(subdiv_ccg),
437 const int *UNUSED(face_sets),
438 const int *grid_indices,
439 uint visible_quad_len,
440 int totgrid,
441 int gridsize)
442 {
443 GPUIndexBufBuilder elb, elb_lines;
444 GPUIndexBufBuilder elb_fast, elb_lines_fast;
445
446 GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, 2 * visible_quad_len, INT_MAX);
447 GPU_indexbuf_init(&elb_fast, GPU_PRIM_TRIS, 2 * totgrid, INT_MAX);
448 GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, 2 * totgrid * gridsize * (gridsize - 1), INT_MAX);
449 GPU_indexbuf_init(&elb_lines_fast, GPU_PRIM_LINES, 4 * totgrid, INT_MAX);
450
451 if (buffers->smooth) {
452 uint offset = 0;
453 const uint grid_vert_len = gridsize * gridsize;
454 for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
455 uint v0, v1, v2, v3;
456 bool grid_visible = false;
457
458 BLI_bitmap *gh = buffers->grid_hidden[grid_indices[i]];
459
460 for (int j = 0; j < gridsize - 1; j++) {
461 for (int k = 0; k < gridsize - 1; k++) {
462 /* Skip hidden grid face */
463 if (gh && paint_is_grid_face_hidden(gh, gridsize, k, j)) {
464 continue;
465 }
466 /* Indices in a Clockwise QUAD disposition. */
467 v0 = offset + j * gridsize + k;
468 v1 = v0 + 1;
469 v2 = v1 + gridsize;
470 v3 = v2 - 1;
471
472 GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
473 GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
474
475 GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
476 GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
477
478 if (j + 2 == gridsize) {
479 GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
480 }
481 grid_visible = true;
482 }
483
484 if (grid_visible) {
485 GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
486 }
487 }
488
489 if (grid_visible) {
490 /* Grid corners */
491 v0 = offset;
492 v1 = offset + gridsize - 1;
493 v2 = offset + grid_vert_len - 1;
494 v3 = offset + grid_vert_len - gridsize;
495
496 GPU_indexbuf_add_tri_verts(&elb_fast, v0, v2, v1);
497 GPU_indexbuf_add_tri_verts(&elb_fast, v0, v3, v2);
498
499 GPU_indexbuf_add_line_verts(&elb_lines_fast, v0, v1);
500 GPU_indexbuf_add_line_verts(&elb_lines_fast, v1, v2);
501 GPU_indexbuf_add_line_verts(&elb_lines_fast, v2, v3);
502 GPU_indexbuf_add_line_verts(&elb_lines_fast, v3, v0);
503 }
504 }
505 }
506 else {
507 uint offset = 0;
508 const uint grid_vert_len = square_uint(gridsize - 1) * 4;
509 for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
510 bool grid_visible = false;
511 BLI_bitmap *gh = buffers->grid_hidden[grid_indices[i]];
512
513 uint v0, v1, v2, v3;
514 for (int j = 0; j < gridsize - 1; j++) {
515 for (int k = 0; k < gridsize - 1; k++) {
516 /* Skip hidden grid face */
517 if (gh && paint_is_grid_face_hidden(gh, gridsize, k, j)) {
518 continue;
519 }
520 /* VBO data are in a Clockwise QUAD disposition. */
521 v0 = offset + (j * (gridsize - 1) + k) * 4;
522 v1 = v0 + 1;
523 v2 = v0 + 2;
524 v3 = v0 + 3;
525
526 GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
527 GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
528
529 GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
530 GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
531
532 if (j + 2 == gridsize) {
533 GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
534 }
535 grid_visible = true;
536 }
537
538 if (grid_visible) {
539 GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
540 }
541 }
542
543 if (grid_visible) {
544 /* Grid corners */
545 v0 = offset;
546 v1 = offset + (gridsize - 1) * 4 - 3;
547 v2 = offset + grid_vert_len - 2;
548 v3 = offset + grid_vert_len - (gridsize - 1) * 4 + 3;
549
550 GPU_indexbuf_add_tri_verts(&elb_fast, v0, v2, v1);
551 GPU_indexbuf_add_tri_verts(&elb_fast, v0, v3, v2);
552
553 GPU_indexbuf_add_line_verts(&elb_lines_fast, v0, v1);
554 GPU_indexbuf_add_line_verts(&elb_lines_fast, v1, v2);
555 GPU_indexbuf_add_line_verts(&elb_lines_fast, v2, v3);
556 GPU_indexbuf_add_line_verts(&elb_lines_fast, v3, v0);
557 }
558 }
559 }
560
561 buffers->index_buf = GPU_indexbuf_build(&elb);
562 buffers->index_buf_fast = GPU_indexbuf_build(&elb_fast);
563 buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
564 buffers->index_lines_buf_fast = GPU_indexbuf_build(&elb_lines_fast);
565 }
566
GPU_pbvh_grid_buffers_update_free(GPU_PBVH_Buffers * buffers,const struct DMFlagMat * grid_flag_mats,const int * grid_indices)567 void GPU_pbvh_grid_buffers_update_free(GPU_PBVH_Buffers *buffers,
568 const struct DMFlagMat *grid_flag_mats,
569 const int *grid_indices)
570 {
571 const bool smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
572
573 if (buffers->smooth != smooth) {
574 buffers->smooth = smooth;
575 GPU_BATCH_DISCARD_SAFE(buffers->triangles);
576 GPU_BATCH_DISCARD_SAFE(buffers->triangles_fast);
577 GPU_BATCH_DISCARD_SAFE(buffers->lines);
578 GPU_BATCH_DISCARD_SAFE(buffers->lines_fast);
579
580 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
581 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf_fast);
582 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
583 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf_fast);
584 }
585 }
586
587 /* Threaded - do not call any functions that use OpenGL calls! */
GPU_pbvh_grid_buffers_update(GPU_PBVH_Buffers * buffers,SubdivCCG * subdiv_ccg,CCGElem ** grids,const struct DMFlagMat * grid_flag_mats,int * grid_indices,int totgrid,const int * sculpt_face_sets,const int face_sets_color_seed,const int face_sets_color_default,const struct CCGKey * key,const int update_flags)588 void GPU_pbvh_grid_buffers_update(GPU_PBVH_Buffers *buffers,
589 SubdivCCG *subdiv_ccg,
590 CCGElem **grids,
591 const struct DMFlagMat *grid_flag_mats,
592 int *grid_indices,
593 int totgrid,
594 const int *sculpt_face_sets,
595 const int face_sets_color_seed,
596 const int face_sets_color_default,
597 const struct CCGKey *key,
598 const int update_flags)
599 {
600 const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
601 const bool show_vcol = (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0;
602 const bool show_face_sets = sculpt_face_sets &&
603 (update_flags & GPU_PBVH_BUFFERS_SHOW_SCULPT_FACE_SETS) != 0;
604 bool empty_mask = true;
605 bool default_face_set = true;
606
607 int i, j, k, x, y;
608
609 /* Build VBO */
610 const int has_mask = key->has_mask;
611
612 buffers->smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
613
614 uint vert_per_grid = (buffers->smooth) ? key->grid_area : (square_i(key->grid_size - 1) * 4);
615 uint vert_count = totgrid * vert_per_grid;
616
617 if (buffers->index_buf == NULL) {
618 uint visible_quad_len = BKE_pbvh_count_grid_quads(
619 (BLI_bitmap **)buffers->grid_hidden, grid_indices, totgrid, key->grid_size);
620
621 /* totally hidden node, return here to avoid BufferData with zero below. */
622 if (visible_quad_len == 0) {
623 return;
624 }
625
626 gpu_pbvh_grid_fill_index_buffers(buffers,
627 subdiv_ccg,
628 sculpt_face_sets,
629 grid_indices,
630 visible_quad_len,
631 totgrid,
632 key->grid_size);
633 }
634
635 uint vbo_index_offset = 0;
636 /* Build VBO */
637 if (gpu_pbvh_vert_buf_data_set(buffers, vert_count)) {
638 GPUIndexBufBuilder elb_lines;
639
640 if (buffers->index_lines_buf == NULL) {
641 GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, totgrid * key->grid_area * 2, vert_count);
642 }
643
644 for (i = 0; i < totgrid; i++) {
645 const int grid_index = grid_indices[i];
646 CCGElem *grid = grids[grid_index];
647 int vbo_index = vbo_index_offset;
648
649 uchar face_set_color[4] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
650
651 if (show_face_sets && subdiv_ccg && sculpt_face_sets) {
652 const int face_index = BKE_subdiv_ccg_grid_to_face_index(subdiv_ccg, grid_index);
653
654 const int fset = abs(sculpt_face_sets[face_index]);
655 /* Skip for the default color Face Set to render it white. */
656 if (fset != face_sets_color_default) {
657 BKE_paint_face_set_overlay_color_get(fset, face_sets_color_seed, face_set_color);
658 default_face_set = false;
659 }
660 }
661
662 if (buffers->smooth) {
663 for (y = 0; y < key->grid_size; y++) {
664 for (x = 0; x < key->grid_size; x++) {
665 CCGElem *elem = CCG_grid_elem(key, grid, x, y);
666 GPU_vertbuf_attr_set(
667 buffers->vert_buf, g_vbo_id.pos, vbo_index, CCG_elem_co(key, elem));
668
669 short no_short[3];
670 normal_float_to_short_v3(no_short, CCG_elem_no(key, elem));
671 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index, no_short);
672
673 if (has_mask && show_mask) {
674 float fmask = *CCG_elem_mask(key, elem);
675 uchar cmask = (uchar)(fmask * 255);
676 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index, &cmask);
677 empty_mask = empty_mask && (cmask == 0);
678 }
679
680 if (show_vcol) {
681 const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
682 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index, &vcol);
683 }
684
685 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.fset, vbo_index, &face_set_color);
686
687 vbo_index += 1;
688 }
689 }
690 vbo_index_offset += key->grid_area;
691 }
692 else {
693 for (j = 0; j < key->grid_size - 1; j++) {
694 for (k = 0; k < key->grid_size - 1; k++) {
695 CCGElem *elems[4] = {
696 CCG_grid_elem(key, grid, k, j),
697 CCG_grid_elem(key, grid, k + 1, j),
698 CCG_grid_elem(key, grid, k + 1, j + 1),
699 CCG_grid_elem(key, grid, k, j + 1),
700 };
701 float *co[4] = {
702 CCG_elem_co(key, elems[0]),
703 CCG_elem_co(key, elems[1]),
704 CCG_elem_co(key, elems[2]),
705 CCG_elem_co(key, elems[3]),
706 };
707
708 float fno[3];
709 short no_short[3];
710 /* Note: Clockwise indices ordering, that's why we invert order here. */
711 normal_quad_v3(fno, co[3], co[2], co[1], co[0]);
712 normal_float_to_short_v3(no_short, fno);
713
714 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 0, co[0]);
715 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 0, no_short);
716 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 1, co[1]);
717 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 1, no_short);
718 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 2, co[2]);
719 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 2, no_short);
720 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 3, co[3]);
721 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 3, no_short);
722
723 if (has_mask && show_mask) {
724 float fmask = (*CCG_elem_mask(key, elems[0]) + *CCG_elem_mask(key, elems[1]) +
725 *CCG_elem_mask(key, elems[2]) + *CCG_elem_mask(key, elems[3])) *
726 0.25f;
727 uchar cmask = (uchar)(fmask * 255);
728 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 0, &cmask);
729 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 1, &cmask);
730 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 2, &cmask);
731 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 3, &cmask);
732 empty_mask = empty_mask && (cmask == 0);
733 }
734
735 const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
736 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index + 0, &vcol);
737 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index + 1, &vcol);
738 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index + 2, &vcol);
739 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index + 3, &vcol);
740
741 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.fset, vbo_index + 0, &face_set_color);
742 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.fset, vbo_index + 1, &face_set_color);
743 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.fset, vbo_index + 2, &face_set_color);
744 GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.fset, vbo_index + 3, &face_set_color);
745
746 vbo_index += 4;
747 }
748 }
749 vbo_index_offset += square_i(key->grid_size - 1) * 4;
750 }
751 }
752
753 gpu_pbvh_batch_init(buffers, GPU_PRIM_TRIS);
754 }
755
756 /* Get material index from the first face of this buffer. */
757 buffers->material_index = grid_flag_mats[grid_indices[0]].mat_nr;
758
759 buffers->grids = grids;
760 buffers->grid_indices = grid_indices;
761 buffers->totgrid = totgrid;
762 buffers->grid_flag_mats = grid_flag_mats;
763 buffers->gridkey = *key;
764 buffers->show_overlay = !empty_mask || !default_face_set;
765 }
766
767 /* Threaded - do not call any functions that use OpenGL calls! */
GPU_pbvh_grid_buffers_build(int totgrid,BLI_bitmap ** grid_hidden)768 GPU_PBVH_Buffers *GPU_pbvh_grid_buffers_build(int totgrid, BLI_bitmap **grid_hidden)
769 {
770 GPU_PBVH_Buffers *buffers;
771
772 buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
773 buffers->grid_hidden = grid_hidden;
774 buffers->totgrid = totgrid;
775
776 buffers->show_overlay = false;
777
778 return buffers;
779 }
780
781 #undef FILL_QUAD_BUFFER
782
783 /** \} */
784
785 /* -------------------------------------------------------------------- */
786 /** \name BMesh PBVH
787 * \{ */
788
789 /* Output a BMVert into a VertexBufferFormat array at v_index. */
gpu_bmesh_vert_to_buffer_copy(BMVert * v,GPUVertBuf * vert_buf,int v_index,const float fno[3],const float * fmask,const int cd_vert_mask_offset,const bool show_mask,const bool show_vcol,bool * empty_mask)790 static void gpu_bmesh_vert_to_buffer_copy(BMVert *v,
791 GPUVertBuf *vert_buf,
792 int v_index,
793 const float fno[3],
794 const float *fmask,
795 const int cd_vert_mask_offset,
796 const bool show_mask,
797 const bool show_vcol,
798 bool *empty_mask)
799 {
800 /* Vertex should always be visible if it's used by a visible face. */
801 BLI_assert(!BM_elem_flag_test(v, BM_ELEM_HIDDEN));
802
803 /* Set coord, normal, and mask */
804 GPU_vertbuf_attr_set(vert_buf, g_vbo_id.pos, v_index, v->co);
805
806 short no_short[3];
807 normal_float_to_short_v3(no_short, fno ? fno : v->no);
808 GPU_vertbuf_attr_set(vert_buf, g_vbo_id.nor, v_index, no_short);
809
810 if (show_mask) {
811 float effective_mask = fmask ? *fmask : BM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset);
812 uchar cmask = (uchar)(effective_mask * 255);
813 GPU_vertbuf_attr_set(vert_buf, g_vbo_id.msk, v_index, &cmask);
814 *empty_mask = *empty_mask && (cmask == 0);
815 }
816
817 if (show_vcol) {
818 const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
819 GPU_vertbuf_attr_set(vert_buf, g_vbo_id.col, v_index, &vcol);
820 }
821
822 /* Add default face sets color to avoid artifacts. */
823 const uchar face_set[3] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
824 GPU_vertbuf_attr_set(vert_buf, g_vbo_id.fset, v_index, &face_set);
825 }
826
827 /* Return the total number of vertices that don't have BM_ELEM_HIDDEN set */
gpu_bmesh_vert_visible_count(GSet * bm_unique_verts,GSet * bm_other_verts)828 static int gpu_bmesh_vert_visible_count(GSet *bm_unique_verts, GSet *bm_other_verts)
829 {
830 GSetIterator gs_iter;
831 int totvert = 0;
832
833 GSET_ITER (gs_iter, bm_unique_verts) {
834 BMVert *v = BLI_gsetIterator_getKey(&gs_iter);
835 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) {
836 totvert++;
837 }
838 }
839 GSET_ITER (gs_iter, bm_other_verts) {
840 BMVert *v = BLI_gsetIterator_getKey(&gs_iter);
841 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) {
842 totvert++;
843 }
844 }
845
846 return totvert;
847 }
848
849 /* Return the total number of visible faces */
gpu_bmesh_face_visible_count(GSet * bm_faces)850 static int gpu_bmesh_face_visible_count(GSet *bm_faces)
851 {
852 GSetIterator gh_iter;
853 int totface = 0;
854
855 GSET_ITER (gh_iter, bm_faces) {
856 BMFace *f = BLI_gsetIterator_getKey(&gh_iter);
857
858 if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
859 totface++;
860 }
861 }
862
863 return totface;
864 }
865
GPU_pbvh_bmesh_buffers_update_free(GPU_PBVH_Buffers * buffers)866 void GPU_pbvh_bmesh_buffers_update_free(GPU_PBVH_Buffers *buffers)
867 {
868 if (buffers->smooth) {
869 /* Smooth needs to recreate index buffer, so we have to invalidate the batch. */
870 GPU_BATCH_DISCARD_SAFE(buffers->triangles);
871 GPU_BATCH_DISCARD_SAFE(buffers->lines);
872 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
873 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
874 }
875 else {
876 GPU_BATCH_DISCARD_SAFE(buffers->lines);
877 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
878 }
879 }
880
881 /* Creates a vertex buffer (coordinate, normal, color) and, if smooth
882 * shading, an element index buffer.
883 * Threaded - do not call any functions that use OpenGL calls! */
GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers * buffers,BMesh * bm,GSet * bm_faces,GSet * bm_unique_verts,GSet * bm_other_verts,const int update_flags)884 void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
885 BMesh *bm,
886 GSet *bm_faces,
887 GSet *bm_unique_verts,
888 GSet *bm_other_verts,
889 const int update_flags)
890 {
891 const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
892 const bool show_vcol = (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0;
893 int tottri, totvert;
894 bool empty_mask = true;
895 BMFace *f = NULL;
896
897 /* Count visible triangles */
898 tottri = gpu_bmesh_face_visible_count(bm_faces);
899
900 if (buffers->smooth) {
901 /* Count visible vertices */
902 totvert = gpu_bmesh_vert_visible_count(bm_unique_verts, bm_other_verts);
903 }
904 else {
905 totvert = tottri * 3;
906 }
907
908 if (!tottri) {
909 if (BLI_gset_len(bm_faces) != 0) {
910 /* Node is just hidden. */
911 }
912 else {
913 buffers->clear_bmesh_on_flush = true;
914 }
915 buffers->tot_tri = 0;
916 return;
917 }
918
919 /* TODO, make mask layer optional for bmesh buffer */
920 const int cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
921
922 /* Fill vertex buffer */
923 if (!gpu_pbvh_vert_buf_data_set(buffers, totvert)) {
924 /* Memory map failed */
925 return;
926 }
927
928 int v_index = 0;
929
930 if (buffers->smooth) {
931 /* Fill the vertex and triangle buffer in one pass over faces. */
932 GPUIndexBufBuilder elb, elb_lines;
933 GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottri, totvert);
934 GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, totvert);
935
936 GHash *bm_vert_to_index = BLI_ghash_int_new_ex("bm_vert_to_index", totvert);
937
938 GSetIterator gs_iter;
939 GSET_ITER (gs_iter, bm_faces) {
940 f = BLI_gsetIterator_getKey(&gs_iter);
941
942 if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
943 BMVert *v[3];
944 BM_face_as_array_vert_tri(f, v);
945
946 uint idx[3];
947 for (int i = 0; i < 3; i++) {
948 void **idx_p;
949 if (!BLI_ghash_ensure_p(bm_vert_to_index, v[i], &idx_p)) {
950 /* Add vertex to the vertex buffer each time a new one is encountered */
951 *idx_p = POINTER_FROM_UINT(v_index);
952
953 gpu_bmesh_vert_to_buffer_copy(v[i],
954 buffers->vert_buf,
955 v_index,
956 NULL,
957 NULL,
958 cd_vert_mask_offset,
959 show_mask,
960 show_vcol,
961 &empty_mask);
962
963 idx[i] = v_index;
964 v_index++;
965 }
966 else {
967 /* Vertex already in the vertex buffer, just get the index. */
968 idx[i] = POINTER_AS_UINT(*idx_p);
969 }
970 }
971
972 GPU_indexbuf_add_tri_verts(&elb, idx[0], idx[1], idx[2]);
973
974 GPU_indexbuf_add_line_verts(&elb_lines, idx[0], idx[1]);
975 GPU_indexbuf_add_line_verts(&elb_lines, idx[1], idx[2]);
976 GPU_indexbuf_add_line_verts(&elb_lines, idx[2], idx[0]);
977 }
978 }
979
980 BLI_ghash_free(bm_vert_to_index, NULL, NULL);
981
982 buffers->tot_tri = tottri;
983 if (buffers->index_buf == NULL) {
984 buffers->index_buf = GPU_indexbuf_build(&elb);
985 }
986 else {
987 GPU_indexbuf_build_in_place(&elb, buffers->index_buf);
988 }
989 buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
990 }
991 else {
992 GSetIterator gs_iter;
993
994 GPUIndexBufBuilder elb_lines;
995 GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, tottri * 3);
996
997 GSET_ITER (gs_iter, bm_faces) {
998 f = BLI_gsetIterator_getKey(&gs_iter);
999
1000 BLI_assert(f->len == 3);
1001
1002 if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
1003 BMVert *v[3];
1004 float fmask = 0.0f;
1005 int i;
1006
1007 BM_face_as_array_vert_tri(f, v);
1008
1009 /* Average mask value */
1010 for (i = 0; i < 3; i++) {
1011 fmask += BM_ELEM_CD_GET_FLOAT(v[i], cd_vert_mask_offset);
1012 }
1013 fmask /= 3.0f;
1014
1015 GPU_indexbuf_add_line_verts(&elb_lines, v_index + 0, v_index + 1);
1016 GPU_indexbuf_add_line_verts(&elb_lines, v_index + 1, v_index + 2);
1017 GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index + 0);
1018
1019 for (i = 0; i < 3; i++) {
1020 gpu_bmesh_vert_to_buffer_copy(v[i],
1021 buffers->vert_buf,
1022 v_index++,
1023 f->no,
1024 &fmask,
1025 cd_vert_mask_offset,
1026 show_mask,
1027 show_vcol,
1028 &empty_mask);
1029 }
1030 }
1031 }
1032
1033 buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
1034 buffers->tot_tri = tottri;
1035 }
1036
1037 /* Get material index from the last face we iterated on. */
1038 buffers->material_index = (f) ? f->mat_nr : 0;
1039
1040 buffers->show_overlay = !empty_mask;
1041
1042 gpu_pbvh_batch_init(buffers, GPU_PRIM_TRIS);
1043 }
1044
1045 /** \} */
1046
1047 /* -------------------------------------------------------------------- */
1048 /** \name Generic
1049 * \{ */
1050
1051 /* Threaded - do not call any functions that use OpenGL calls! */
GPU_pbvh_bmesh_buffers_build(bool smooth_shading)1052 GPU_PBVH_Buffers *GPU_pbvh_bmesh_buffers_build(bool smooth_shading)
1053 {
1054 GPU_PBVH_Buffers *buffers;
1055
1056 buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
1057 buffers->use_bmesh = true;
1058 buffers->smooth = smooth_shading;
1059 buffers->show_overlay = true;
1060
1061 return buffers;
1062 }
1063
GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers * buffers,bool fast,bool wires)1064 GPUBatch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast, bool wires)
1065 {
1066 if (wires) {
1067 return (fast && buffers->lines_fast) ? buffers->lines_fast : buffers->lines;
1068 }
1069
1070 return (fast && buffers->triangles_fast) ? buffers->triangles_fast : buffers->triangles;
1071 }
1072
GPU_pbvh_buffers_has_overlays(GPU_PBVH_Buffers * buffers)1073 bool GPU_pbvh_buffers_has_overlays(GPU_PBVH_Buffers *buffers)
1074 {
1075 return buffers->show_overlay;
1076 }
1077
GPU_pbvh_buffers_material_index_get(GPU_PBVH_Buffers * buffers)1078 short GPU_pbvh_buffers_material_index_get(GPU_PBVH_Buffers *buffers)
1079 {
1080 return buffers->material_index;
1081 }
1082
gpu_pbvh_buffers_clear(GPU_PBVH_Buffers * buffers)1083 static void gpu_pbvh_buffers_clear(GPU_PBVH_Buffers *buffers)
1084 {
1085 GPU_BATCH_DISCARD_SAFE(buffers->lines);
1086 GPU_BATCH_DISCARD_SAFE(buffers->lines_fast);
1087 GPU_BATCH_DISCARD_SAFE(buffers->triangles);
1088 GPU_BATCH_DISCARD_SAFE(buffers->triangles_fast);
1089 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf_fast);
1090 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
1091 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf_fast);
1092 GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
1093 GPU_VERTBUF_DISCARD_SAFE(buffers->vert_buf);
1094 }
1095
GPU_pbvh_buffers_update_flush(GPU_PBVH_Buffers * buffers)1096 void GPU_pbvh_buffers_update_flush(GPU_PBVH_Buffers *buffers)
1097 {
1098 /* Free empty bmesh node buffers. */
1099 if (buffers->clear_bmesh_on_flush) {
1100 gpu_pbvh_buffers_clear(buffers);
1101 buffers->clear_bmesh_on_flush = false;
1102 }
1103
1104 /* Force flushing to the GPU. */
1105 if (buffers->vert_buf && GPU_vertbuf_get_data(buffers->vert_buf)) {
1106 GPU_vertbuf_use(buffers->vert_buf);
1107 }
1108 }
1109
GPU_pbvh_buffers_free(GPU_PBVH_Buffers * buffers)1110 void GPU_pbvh_buffers_free(GPU_PBVH_Buffers *buffers)
1111 {
1112 if (buffers) {
1113 gpu_pbvh_buffers_clear(buffers);
1114 MEM_freeN(buffers);
1115 }
1116 }
1117
1118 /** \} */
1119