1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software Foundation,
14 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * The Original Code is Copyright (C) 2017 by Blender Foundation.
17 * All rights reserved.
18 */
19
20 /** \file
21 * \ingroup draw
22 *
23 * \brief Curve API for render engines
24 */
25
26 #include "MEM_guardedalloc.h"
27
28 #include "BLI_listbase.h"
29 #include "BLI_math_vector.h"
30 #include "BLI_utildefines.h"
31
32 #include "DNA_curve_types.h"
33
34 #include "BKE_curve.h"
35 #include "BKE_displist.h"
36 #include "BKE_font.h"
37
38 #include "GPU_batch.h"
39 #include "GPU_material.h"
40 #include "GPU_texture.h"
41
42 #include "UI_resources.h"
43
44 #include "DRW_render.h"
45
46 #include "draw_cache_inline.h"
47
48 #include "draw_cache_impl.h" /* own include */
49
50 /* See: edit_curve_point_vert.glsl for duplicate includes. */
51 #define SELECT 1
52 #define ACTIVE_NURB 1 << 2
53 #define BEZIER_HANDLE 1 << 3
54 #define EVEN_U_BIT 1 << 4 /* Alternate this bit for every U vert. */
55 #define COLOR_SHIFT 5
56
57 /* Used as values of `color_id` in `edit_curve_overlay_handle_geom.glsl` */
58 enum {
59 COLOR_NURB_ULINE_ID = TH_HANDLE_AUTOCLAMP - TH_HANDLE_FREE + 2,
60
61 TOT_HANDLE_COL,
62 };
63
64 /**
65 * TODO
66 * - Ensure `CurveCache`, `SEQUENCER_DAG_WORKAROUND`.
67 * - Check number of verts/edges to see if cache is valid.
68 * - Check if 'overlay.edges' can use single attribute per edge, not 2 (for selection drawing).
69 */
70
71 static void curve_batch_cache_clear(Curve *cu);
72
73 /* ---------------------------------------------------------------------- */
74 /* Curve Interface, direct access to basic data. */
75
curve_render_overlay_verts_edges_len_get(ListBase * lb,int * r_vert_len,int * r_edge_len)76 static void curve_render_overlay_verts_edges_len_get(ListBase *lb,
77 int *r_vert_len,
78 int *r_edge_len)
79 {
80 BLI_assert(r_vert_len || r_edge_len);
81 int vert_len = 0;
82 int edge_len = 0;
83 LISTBASE_FOREACH (Nurb *, nu, lb) {
84 if (nu->bezt) {
85 vert_len += nu->pntsu * 3;
86 /* 2x handles per point*/
87 edge_len += 2 * nu->pntsu;
88 }
89 else if (nu->bp) {
90 vert_len += nu->pntsu * nu->pntsv;
91 /* segments between points */
92 edge_len += (nu->pntsu - 1) * nu->pntsv;
93 edge_len += (nu->pntsv - 1) * nu->pntsu;
94 }
95 }
96 if (r_vert_len) {
97 *r_vert_len = vert_len;
98 }
99 if (r_edge_len) {
100 *r_edge_len = edge_len;
101 }
102 }
103
curve_render_wire_verts_edges_len_get(const CurveCache * ob_curve_cache,int * r_curve_len,int * r_vert_len,int * r_edge_len)104 static void curve_render_wire_verts_edges_len_get(const CurveCache *ob_curve_cache,
105 int *r_curve_len,
106 int *r_vert_len,
107 int *r_edge_len)
108 {
109 BLI_assert(r_vert_len || r_edge_len);
110 int vert_len = 0;
111 int edge_len = 0;
112 int curve_len = 0;
113 LISTBASE_FOREACH (const BevList *, bl, &ob_curve_cache->bev) {
114 if (bl->nr > 0) {
115 const bool is_cyclic = bl->poly != -1;
116 edge_len += (is_cyclic) ? bl->nr : bl->nr - 1;
117 vert_len += bl->nr;
118 curve_len += 1;
119 }
120 }
121 LISTBASE_FOREACH (const DispList *, dl, &ob_curve_cache->disp) {
122 if (ELEM(dl->type, DL_SEGM, DL_POLY)) {
123 BLI_assert(dl->parts == 1);
124 const bool is_cyclic = dl->type == DL_POLY;
125 edge_len += (is_cyclic) ? dl->nr : dl->nr - 1;
126 vert_len += dl->nr;
127 curve_len += 1;
128 }
129 }
130 if (r_vert_len) {
131 *r_vert_len = vert_len;
132 }
133 if (r_edge_len) {
134 *r_edge_len = edge_len;
135 }
136 if (r_curve_len) {
137 *r_curve_len = curve_len;
138 }
139 }
140
curve_render_normal_len_get(const ListBase * lb,const CurveCache * ob_curve_cache)141 static int curve_render_normal_len_get(const ListBase *lb, const CurveCache *ob_curve_cache)
142 {
143 int normal_len = 0;
144 const BevList *bl;
145 const Nurb *nu;
146 for (bl = ob_curve_cache->bev.first, nu = lb->first; nu && bl; bl = bl->next, nu = nu->next) {
147 int nr = bl->nr;
148 int skip = nu->resolu / 16;
149 #if 0
150 while (nr-- > 0) { /* accounts for empty bevel lists */
151 normal_len += 1;
152 nr -= skip;
153 }
154 #else
155 /* Same as loop above */
156 normal_len += (nr / (skip + 1)) + ((nr % (skip + 1)) != 0);
157 #endif
158 }
159 return normal_len;
160 }
161
162 /* ---------------------------------------------------------------------- */
163 /* Curve Interface, indirect, partially cached access to complex data. */
164
165 typedef struct CurveRenderData {
166 int types;
167
168 struct {
169 int vert_len;
170 int edge_len;
171 } overlay;
172
173 struct {
174 int curve_len;
175 int vert_len;
176 int edge_len;
177 } wire;
178
179 /* edit mode normal's */
180 struct {
181 /* 'edge_len == len * 2'
182 * 'vert_len == len * 3' */
183 int len;
184 } normal;
185
186 struct {
187 EditFont *edit_font;
188 } text;
189
190 /* borrow from 'Object' */
191 CurveCache *ob_curve_cache;
192
193 /* borrow from 'Curve' */
194 ListBase *nurbs;
195
196 /* edit, index in nurb list */
197 int actnu;
198 /* edit, index in active nurb (BPoint or BezTriple) */
199 int actvert;
200 } CurveRenderData;
201
202 enum {
203 /* Wire center-line */
204 CU_DATATYPE_WIRE = 1 << 0,
205 /* Edit-mode verts and optionally handles */
206 CU_DATATYPE_OVERLAY = 1 << 1,
207 /* Edit-mode normals */
208 CU_DATATYPE_NORMAL = 1 << 2,
209 /* Geometry */
210 CU_DATATYPE_SURFACE = 1 << 3,
211 /* Text */
212 CU_DATATYPE_TEXT_SELECT = 1 << 4,
213 };
214
215 /*
216 * ob_curve_cache can be NULL, only needed for CU_DATATYPE_WIRE
217 */
curve_render_data_create(Curve * cu,CurveCache * ob_curve_cache,const int types)218 static CurveRenderData *curve_render_data_create(Curve *cu,
219 CurveCache *ob_curve_cache,
220 const int types)
221 {
222 CurveRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
223 rdata->types = types;
224 ListBase *nurbs;
225
226 rdata->actnu = cu->actnu;
227 rdata->actvert = cu->actvert;
228
229 rdata->ob_curve_cache = ob_curve_cache;
230
231 if (types & CU_DATATYPE_WIRE) {
232 curve_render_wire_verts_edges_len_get(rdata->ob_curve_cache,
233 &rdata->wire.curve_len,
234 &rdata->wire.vert_len,
235 &rdata->wire.edge_len);
236 }
237
238 if (cu->editnurb) {
239 EditNurb *editnurb = cu->editnurb;
240 nurbs = &editnurb->nurbs;
241
242 if (types & CU_DATATYPE_OVERLAY) {
243 curve_render_overlay_verts_edges_len_get(
244 nurbs, &rdata->overlay.vert_len, &rdata->overlay.edge_len);
245
246 rdata->actnu = cu->actnu;
247 rdata->actvert = cu->actvert;
248 }
249 if (types & CU_DATATYPE_NORMAL) {
250 rdata->normal.len = curve_render_normal_len_get(nurbs, rdata->ob_curve_cache);
251 }
252 }
253 else {
254 nurbs = &cu->nurb;
255 }
256
257 rdata->nurbs = nurbs;
258
259 rdata->text.edit_font = cu->editfont;
260
261 return rdata;
262 }
263
curve_render_data_free(CurveRenderData * rdata)264 static void curve_render_data_free(CurveRenderData *rdata)
265 {
266 #if 0
267 if (rdata->loose_verts) {
268 MEM_freeN(rdata->loose_verts);
269 }
270 #endif
271 MEM_freeN(rdata);
272 }
273
curve_render_data_overlay_verts_len_get(const CurveRenderData * rdata)274 static int curve_render_data_overlay_verts_len_get(const CurveRenderData *rdata)
275 {
276 BLI_assert(rdata->types & CU_DATATYPE_OVERLAY);
277 return rdata->overlay.vert_len;
278 }
279
curve_render_data_overlay_edges_len_get(const CurveRenderData * rdata)280 static int curve_render_data_overlay_edges_len_get(const CurveRenderData *rdata)
281 {
282 BLI_assert(rdata->types & CU_DATATYPE_OVERLAY);
283 return rdata->overlay.edge_len;
284 }
285
curve_render_data_wire_verts_len_get(const CurveRenderData * rdata)286 static int curve_render_data_wire_verts_len_get(const CurveRenderData *rdata)
287 {
288 BLI_assert(rdata->types & CU_DATATYPE_WIRE);
289 return rdata->wire.vert_len;
290 }
291
curve_render_data_wire_edges_len_get(const CurveRenderData * rdata)292 static int curve_render_data_wire_edges_len_get(const CurveRenderData *rdata)
293 {
294 BLI_assert(rdata->types & CU_DATATYPE_WIRE);
295 return rdata->wire.edge_len;
296 }
297
curve_render_data_wire_curve_len_get(const CurveRenderData * rdata)298 static int curve_render_data_wire_curve_len_get(const CurveRenderData *rdata)
299 {
300 BLI_assert(rdata->types & CU_DATATYPE_WIRE);
301 return rdata->wire.curve_len;
302 }
303
curve_render_data_normal_len_get(const CurveRenderData * rdata)304 static int curve_render_data_normal_len_get(const CurveRenderData *rdata)
305 {
306 BLI_assert(rdata->types & CU_DATATYPE_NORMAL);
307 return rdata->normal.len;
308 }
309
curve_cd_calc_used_gpu_layers(CustomDataMask * cd_layers,struct GPUMaterial ** gpumat_array,int gpumat_array_len)310 static void curve_cd_calc_used_gpu_layers(CustomDataMask *cd_layers,
311 struct GPUMaterial **gpumat_array,
312 int gpumat_array_len)
313 {
314 for (int i = 0; i < gpumat_array_len; i++) {
315 struct GPUMaterial *gpumat = gpumat_array[i];
316 if (gpumat == NULL) {
317 continue;
318 }
319
320 ListBase gpu_attrs = GPU_material_attributes(gpumat);
321 LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
322 const char *name = gpu_attr->name;
323 int type = gpu_attr->type;
324
325 /* Curves cannot have named layers.
326 * Note: We could relax this assumption later. */
327 if (name[0] != '\0') {
328 continue;
329 }
330
331 if (type == CD_AUTO_FROM_NAME) {
332 type = CD_MTFACE;
333 }
334
335 switch (type) {
336 case CD_MTFACE:
337 *cd_layers |= CD_MASK_MLOOPUV;
338 break;
339 case CD_TANGENT:
340 *cd_layers |= CD_MASK_TANGENT;
341 break;
342 case CD_MCOL:
343 /* Curve object don't have Color data. */
344 break;
345 case CD_ORCO:
346 *cd_layers |= CD_MASK_ORCO;
347 break;
348 }
349 }
350 }
351 }
352
353 /* ---------------------------------------------------------------------- */
354 /* Curve GPUBatch Cache */
355
356 typedef struct CurveBatchCache {
357 struct {
358 GPUVertBuf *pos_nor;
359 GPUVertBuf *edge_fac;
360 GPUVertBuf *curves_pos;
361
362 GPUVertBuf *loop_pos_nor;
363 GPUVertBuf *loop_uv;
364 GPUVertBuf *loop_tan;
365 } ordered;
366
367 struct {
368 /* Curve points. Aligned with ordered.pos_nor */
369 GPUVertBuf *curves_nor;
370 GPUVertBuf *curves_weight; /* TODO. */
371 /* Edit points (beztriples and bpoints) */
372 GPUVertBuf *pos;
373 GPUVertBuf *data;
374 } edit;
375
376 struct {
377 GPUIndexBuf *surfaces_tris;
378 GPUIndexBuf *surfaces_lines;
379 GPUIndexBuf *curves_lines;
380 GPUIndexBuf *edges_adj_lines;
381 /* Edit mode */
382 GPUIndexBuf *edit_verts;
383 GPUIndexBuf *edit_lines;
384 } ibo;
385
386 struct {
387 GPUBatch *surfaces;
388 GPUBatch *surfaces_edges;
389 GPUBatch *curves;
390 /* control handles and vertices */
391 GPUBatch *edit_edges;
392 GPUBatch *edit_verts;
393 GPUBatch *edit_normals;
394 GPUBatch *edge_detection;
395 } batch;
396
397 GPUIndexBuf **surf_per_mat_tris;
398 GPUBatch **surf_per_mat;
399 int mat_len;
400 CustomDataMask cd_used, cd_needed;
401
402 /* settings to determine if cache is invalid */
403 bool is_dirty;
404 bool is_editmode;
405
406 /* Valid only if edge_detection is up to date. */
407 bool is_manifold;
408 } CurveBatchCache;
409
410 /* GPUBatch cache management. */
411
curve_batch_cache_valid(Curve * cu)412 static bool curve_batch_cache_valid(Curve *cu)
413 {
414 CurveBatchCache *cache = cu->batch_cache;
415
416 if (cache == NULL) {
417 return false;
418 }
419
420 if (cache->mat_len != DRW_curve_material_count_get(cu)) {
421 return false;
422 }
423
424 if (cache->is_dirty) {
425 return false;
426 }
427
428 if (cache->is_editmode != ((cu->editnurb != NULL) || (cu->editfont != NULL))) {
429 return false;
430 }
431
432 if (cache->is_editmode) {
433 if (cu->editfont) {
434 /* TODO */
435 }
436 }
437
438 return true;
439 }
440
curve_batch_cache_init(Curve * cu)441 static void curve_batch_cache_init(Curve *cu)
442 {
443 CurveBatchCache *cache = cu->batch_cache;
444
445 if (!cache) {
446 cache = cu->batch_cache = MEM_callocN(sizeof(*cache), __func__);
447 }
448 else {
449 memset(cache, 0, sizeof(*cache));
450 }
451
452 #if 0
453 ListBase *nurbs;
454 if (cu->editnurb) {
455 EditNurb *editnurb = cu->editnurb;
456 nurbs = &editnurb->nurbs;
457 }
458 else {
459 nurbs = &cu->nurb;
460 }
461 #endif
462
463 cache->cd_used = 0;
464 cache->mat_len = DRW_curve_material_count_get(cu);
465 cache->surf_per_mat_tris = MEM_callocN(sizeof(*cache->surf_per_mat_tris) * cache->mat_len,
466 __func__);
467 cache->surf_per_mat = MEM_callocN(sizeof(*cache->surf_per_mat) * cache->mat_len, __func__);
468
469 cache->is_editmode = (cu->editnurb != NULL) || (cu->editfont != NULL);
470
471 cache->is_dirty = false;
472 }
473
DRW_curve_batch_cache_validate(Curve * cu)474 void DRW_curve_batch_cache_validate(Curve *cu)
475 {
476 if (!curve_batch_cache_valid(cu)) {
477 curve_batch_cache_clear(cu);
478 curve_batch_cache_init(cu);
479 }
480 }
481
curve_batch_cache_get(Curve * cu)482 static CurveBatchCache *curve_batch_cache_get(Curve *cu)
483 {
484 return cu->batch_cache;
485 }
486
DRW_curve_batch_cache_dirty_tag(Curve * cu,int mode)487 void DRW_curve_batch_cache_dirty_tag(Curve *cu, int mode)
488 {
489 CurveBatchCache *cache = cu->batch_cache;
490 if (cache == NULL) {
491 return;
492 }
493 switch (mode) {
494 case BKE_CURVE_BATCH_DIRTY_ALL:
495 cache->is_dirty = true;
496 break;
497 case BKE_CURVE_BATCH_DIRTY_SELECT:
498 GPU_VERTBUF_DISCARD_SAFE(cache->edit.data);
499
500 GPU_BATCH_DISCARD_SAFE(cache->batch.edit_edges);
501 GPU_BATCH_DISCARD_SAFE(cache->batch.edit_verts);
502 break;
503 default:
504 BLI_assert(0);
505 }
506 }
507
curve_batch_cache_clear(Curve * cu)508 static void curve_batch_cache_clear(Curve *cu)
509 {
510 CurveBatchCache *cache = cu->batch_cache;
511 if (!cache) {
512 return;
513 }
514
515 for (int i = 0; i < sizeof(cache->ordered) / sizeof(void *); i++) {
516 GPUVertBuf **vbo = (GPUVertBuf **)&cache->ordered;
517 GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
518 }
519 for (int i = 0; i < sizeof(cache->edit) / sizeof(void *); i++) {
520 GPUVertBuf **vbo = (GPUVertBuf **)&cache->edit;
521 GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
522 }
523 for (int i = 0; i < sizeof(cache->ibo) / sizeof(void *); i++) {
524 GPUIndexBuf **ibo = (GPUIndexBuf **)&cache->ibo;
525 GPU_INDEXBUF_DISCARD_SAFE(ibo[i]);
526 }
527 for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); i++) {
528 GPUBatch **batch = (GPUBatch **)&cache->batch;
529 GPU_BATCH_DISCARD_SAFE(batch[i]);
530 }
531
532 for (int i = 0; i < cache->mat_len; i++) {
533 GPU_INDEXBUF_DISCARD_SAFE(cache->surf_per_mat_tris[i]);
534 GPU_BATCH_DISCARD_SAFE(cache->surf_per_mat[i]);
535 }
536 MEM_SAFE_FREE(cache->surf_per_mat_tris);
537 MEM_SAFE_FREE(cache->surf_per_mat);
538 cache->mat_len = 0;
539 cache->cd_used = 0;
540 }
541
DRW_curve_batch_cache_free(Curve * cu)542 void DRW_curve_batch_cache_free(Curve *cu)
543 {
544 curve_batch_cache_clear(cu);
545 MEM_SAFE_FREE(cu->batch_cache);
546 }
547
548 /* -------------------------------------------------------------------- */
549 /** \name Private Curve Cache API
550 * \{ */
551
552 /* GPUBatch cache usage. */
curve_create_curves_pos(CurveRenderData * rdata,GPUVertBuf * vbo_curves_pos)553 static void curve_create_curves_pos(CurveRenderData *rdata, GPUVertBuf *vbo_curves_pos)
554 {
555 BLI_assert(rdata->ob_curve_cache != NULL);
556
557 static GPUVertFormat format = {0};
558 static struct {
559 uint pos;
560 } attr_id;
561 if (format.attr_len == 0) {
562 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
563 }
564
565 const int vert_len = curve_render_data_wire_verts_len_get(rdata);
566 GPU_vertbuf_init_with_format(vbo_curves_pos, &format);
567 GPU_vertbuf_data_alloc(vbo_curves_pos, vert_len);
568
569 int v_idx = 0;
570 LISTBASE_FOREACH (const BevList *, bl, &rdata->ob_curve_cache->bev) {
571 if (bl->nr <= 0) {
572 continue;
573 }
574 const int i_end = v_idx + bl->nr;
575 for (const BevPoint *bevp = bl->bevpoints; v_idx < i_end; v_idx++, bevp++) {
576 GPU_vertbuf_attr_set(vbo_curves_pos, attr_id.pos, v_idx, bevp->vec);
577 }
578 }
579 LISTBASE_FOREACH (const DispList *, dl, &rdata->ob_curve_cache->disp) {
580 if (ELEM(dl->type, DL_SEGM, DL_POLY)) {
581 for (int i = 0; i < dl->nr; v_idx++, i++) {
582 GPU_vertbuf_attr_set(vbo_curves_pos, attr_id.pos, v_idx, &((float(*)[3])dl->verts)[i]);
583 }
584 }
585 }
586 BLI_assert(v_idx == vert_len);
587 }
588
curve_create_curves_lines(CurveRenderData * rdata,GPUIndexBuf * ibo_curve_lines)589 static void curve_create_curves_lines(CurveRenderData *rdata, GPUIndexBuf *ibo_curve_lines)
590 {
591 BLI_assert(rdata->ob_curve_cache != NULL);
592
593 const int vert_len = curve_render_data_wire_verts_len_get(rdata);
594 const int edge_len = curve_render_data_wire_edges_len_get(rdata);
595 const int curve_len = curve_render_data_wire_curve_len_get(rdata);
596 /* Count the last vertex or each strip and the primitive restart. */
597 const int index_len = edge_len + curve_len * 2;
598
599 GPUIndexBufBuilder elb;
600 GPU_indexbuf_init_ex(&elb, GPU_PRIM_LINE_STRIP, index_len, vert_len);
601
602 int v_idx = 0;
603 LISTBASE_FOREACH (const BevList *, bl, &rdata->ob_curve_cache->bev) {
604 if (bl->nr <= 0) {
605 continue;
606 }
607 const bool is_cyclic = bl->poly != -1;
608 if (is_cyclic) {
609 GPU_indexbuf_add_generic_vert(&elb, v_idx + (bl->nr - 1));
610 }
611 for (int i = 0; i < bl->nr; i++) {
612 GPU_indexbuf_add_generic_vert(&elb, v_idx + i);
613 }
614 GPU_indexbuf_add_primitive_restart(&elb);
615 v_idx += bl->nr;
616 }
617 LISTBASE_FOREACH (const DispList *, dl, &rdata->ob_curve_cache->disp) {
618 if (ELEM(dl->type, DL_SEGM, DL_POLY)) {
619 const bool is_cyclic = dl->type == DL_POLY;
620 if (is_cyclic) {
621 GPU_indexbuf_add_generic_vert(&elb, v_idx + (dl->nr - 1));
622 }
623 for (int i = 0; i < dl->nr; i++) {
624 GPU_indexbuf_add_generic_vert(&elb, v_idx + i);
625 }
626 GPU_indexbuf_add_primitive_restart(&elb);
627 v_idx += dl->nr;
628 }
629 }
630 GPU_indexbuf_build_in_place(&elb, ibo_curve_lines);
631 }
632
curve_create_edit_curves_nor(CurveRenderData * rdata,GPUVertBuf * vbo_curves_nor)633 static void curve_create_edit_curves_nor(CurveRenderData *rdata, GPUVertBuf *vbo_curves_nor)
634 {
635 static GPUVertFormat format = {0};
636 static struct {
637 uint pos, nor, tan, rad;
638 } attr_id;
639 if (format.attr_len == 0) {
640 /* initialize vertex formats */
641 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
642 attr_id.rad = GPU_vertformat_attr_add(&format, "rad", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
643 attr_id.nor = GPU_vertformat_attr_add(
644 &format, "nor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
645 attr_id.tan = GPU_vertformat_attr_add(
646 &format, "tan", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
647 }
648
649 int verts_len_capacity = curve_render_data_normal_len_get(rdata) * 2;
650 int vbo_len_used = 0;
651
652 GPU_vertbuf_init_with_format(vbo_curves_nor, &format);
653 GPU_vertbuf_data_alloc(vbo_curves_nor, verts_len_capacity);
654
655 const BevList *bl;
656 const Nurb *nu;
657
658 for (bl = rdata->ob_curve_cache->bev.first, nu = rdata->nurbs->first; nu && bl;
659 bl = bl->next, nu = nu->next) {
660 const BevPoint *bevp = bl->bevpoints;
661 int nr = bl->nr;
662 int skip = nu->resolu / 16;
663
664 while (nr-- > 0) { /* accounts for empty bevel lists */
665 float nor[3] = {1.0f, 0.0f, 0.0f};
666 mul_qt_v3(bevp->quat, nor);
667
668 GPUPackedNormal pnor = GPU_normal_convert_i10_v3(nor);
669 GPUPackedNormal ptan = GPU_normal_convert_i10_v3(bevp->dir);
670
671 /* Only set attributes for one vertex. */
672 GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.pos, vbo_len_used, bevp->vec);
673 GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.rad, vbo_len_used, &bevp->radius);
674 GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.nor, vbo_len_used, &pnor);
675 GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.tan, vbo_len_used, &ptan);
676 vbo_len_used++;
677
678 /* Skip the other vertex (it does not need to be offsetted). */
679 GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.pos, vbo_len_used, bevp->vec);
680 vbo_len_used++;
681
682 bevp += skip + 1;
683 nr -= skip;
684 }
685 }
686 BLI_assert(vbo_len_used == verts_len_capacity);
687 }
688
beztriple_vflag_get(CurveRenderData * rdata,uint8_t flag,uint8_t col_id,int v_idx,int nu_id,bool handle_point,const bool handle_selected)689 static uint8_t beztriple_vflag_get(CurveRenderData *rdata,
690 uint8_t flag,
691 uint8_t col_id,
692 int v_idx,
693 int nu_id,
694 bool handle_point,
695 const bool handle_selected)
696 {
697 uint8_t vflag = 0;
698 SET_FLAG_FROM_TEST(vflag, (flag & SELECT), VFLAG_VERT_SELECTED);
699 SET_FLAG_FROM_TEST(vflag, (v_idx == rdata->actvert && nu_id == rdata->actnu), VFLAG_VERT_ACTIVE);
700 SET_FLAG_FROM_TEST(vflag, (nu_id == rdata->actnu), ACTIVE_NURB);
701 SET_FLAG_FROM_TEST(vflag, handle_point, BEZIER_HANDLE);
702 SET_FLAG_FROM_TEST(vflag, handle_selected, VFLAG_VERT_SELECTED_BEZT_HANDLE);
703 /* Setting flags that overlap with will cause the color id not to work properly. */
704 BLI_assert((vflag >> COLOR_SHIFT) == 0);
705 /* handle color id */
706 vflag |= col_id << COLOR_SHIFT;
707 return vflag;
708 }
709
bpoint_vflag_get(CurveRenderData * rdata,uint8_t flag,int v_idx,int nu_id,int u)710 static uint8_t bpoint_vflag_get(CurveRenderData *rdata, uint8_t flag, int v_idx, int nu_id, int u)
711 {
712 uint8_t vflag = 0;
713 SET_FLAG_FROM_TEST(vflag, (flag & SELECT), VFLAG_VERT_SELECTED);
714 SET_FLAG_FROM_TEST(vflag, (v_idx == rdata->actvert && nu_id == rdata->actnu), VFLAG_VERT_ACTIVE);
715 SET_FLAG_FROM_TEST(vflag, (nu_id == rdata->actnu), ACTIVE_NURB);
716 SET_FLAG_FROM_TEST(vflag, ((u % 2) == 0), EVEN_U_BIT);
717 /* Setting flags that overlap with will cause the color id not to work properly. */
718 BLI_assert((vflag >> COLOR_SHIFT) == 0);
719 vflag |= COLOR_NURB_ULINE_ID << COLOR_SHIFT;
720 return vflag;
721 }
722
curve_create_edit_data_and_handles(CurveRenderData * rdata,GPUVertBuf * vbo_pos,GPUVertBuf * vbo_data,GPUIndexBuf * ibo_edit_verts_points,GPUIndexBuf * ibo_edit_lines)723 static void curve_create_edit_data_and_handles(CurveRenderData *rdata,
724 GPUVertBuf *vbo_pos,
725 GPUVertBuf *vbo_data,
726 GPUIndexBuf *ibo_edit_verts_points,
727 GPUIndexBuf *ibo_edit_lines)
728 {
729 static GPUVertFormat format_pos = {0};
730 static GPUVertFormat format_data = {0};
731 static struct {
732 uint pos, data;
733 } attr_id;
734 if (format_pos.attr_len == 0) {
735 /* initialize vertex formats */
736 attr_id.pos = GPU_vertformat_attr_add(&format_pos, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
737 attr_id.data = GPU_vertformat_attr_add(&format_data, "data", GPU_COMP_U8, 1, GPU_FETCH_INT);
738 }
739
740 int verts_len_capacity = curve_render_data_overlay_verts_len_get(rdata);
741 int edges_len_capacity = curve_render_data_overlay_edges_len_get(rdata) * 2;
742 int vbo_len_used = 0;
743
744 if (DRW_TEST_ASSIGN_VBO(vbo_pos)) {
745 GPU_vertbuf_init_with_format(vbo_pos, &format_pos);
746 GPU_vertbuf_data_alloc(vbo_pos, verts_len_capacity);
747 }
748 if (DRW_TEST_ASSIGN_VBO(vbo_data)) {
749 GPU_vertbuf_init_with_format(vbo_data, &format_data);
750 GPU_vertbuf_data_alloc(vbo_data, verts_len_capacity);
751 }
752
753 GPUIndexBufBuilder elb_verts, *elbp_verts = NULL;
754 GPUIndexBufBuilder elb_lines, *elbp_lines = NULL;
755 if (DRW_TEST_ASSIGN_IBO(ibo_edit_verts_points)) {
756 elbp_verts = &elb_verts;
757 GPU_indexbuf_init(elbp_verts, GPU_PRIM_POINTS, verts_len_capacity, verts_len_capacity);
758 }
759 if (DRW_TEST_ASSIGN_IBO(ibo_edit_lines)) {
760 elbp_lines = &elb_lines;
761 GPU_indexbuf_init(elbp_lines, GPU_PRIM_LINES, edges_len_capacity, verts_len_capacity);
762 }
763
764 int nu_id = 0;
765 for (Nurb *nu = rdata->nurbs->first; nu; nu = nu->next, nu_id++) {
766 const BezTriple *bezt = nu->bezt;
767 const BPoint *bp = nu->bp;
768
769 if (bezt) {
770 for (int a = 0; a < nu->pntsu; a++, bezt++) {
771 if (bezt->hide == true) {
772 continue;
773 }
774 const bool handle_selected = BEZT_ISSEL_ANY(bezt);
775
776 if (elbp_verts) {
777 GPU_indexbuf_add_point_vert(elbp_verts, vbo_len_used + 0);
778 GPU_indexbuf_add_point_vert(elbp_verts, vbo_len_used + 1);
779 GPU_indexbuf_add_point_vert(elbp_verts, vbo_len_used + 2);
780 }
781 if (elbp_lines) {
782 GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used + 1, vbo_len_used + 0);
783 GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used + 1, vbo_len_used + 2);
784 }
785 if (vbo_data) {
786 const uint8_t vflag[3] = {
787 beztriple_vflag_get(rdata, bezt->f1, bezt->h1, a, nu_id, true, handle_selected),
788 beztriple_vflag_get(rdata, bezt->f2, bezt->h1, a, nu_id, false, handle_selected),
789 beztriple_vflag_get(rdata, bezt->f3, bezt->h2, a, nu_id, true, handle_selected),
790 };
791 for (int j = 0; j < 3; j++) {
792 GPU_vertbuf_attr_set(vbo_data, attr_id.data, vbo_len_used + j, &vflag[j]);
793 }
794 }
795 if (vbo_pos) {
796 for (int j = 0; j < 3; j++) {
797 GPU_vertbuf_attr_set(vbo_pos, attr_id.pos, vbo_len_used + j, bezt->vec[j]);
798 }
799 }
800 vbo_len_used += 3;
801 }
802 }
803 else if (bp) {
804 int pt_len = nu->pntsu * nu->pntsv;
805 for (int a = 0; a < pt_len; a++, bp++, vbo_len_used += 1) {
806 if (bp->hide == true) {
807 continue;
808 }
809 int u = (a % nu->pntsu);
810 int v = (a / nu->pntsu);
811 /* Use indexed rendering for bezier.
812 * Specify all points and use indices to hide/show. */
813 if (elbp_verts) {
814 GPU_indexbuf_add_point_vert(elbp_verts, vbo_len_used);
815 }
816 if (elbp_lines) {
817 const BPoint *bp_next_u = (u < (nu->pntsu - 1)) ? &nu->bp[a + 1] : NULL;
818 const BPoint *bp_next_v = (v < (nu->pntsv - 1)) ? &nu->bp[a + nu->pntsu] : NULL;
819 if (bp_next_u && (bp_next_u->hide == false)) {
820 GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used, vbo_len_used + 1);
821 }
822 if (bp_next_v && (bp_next_v->hide == false)) {
823 GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used, vbo_len_used + nu->pntsu);
824 }
825 }
826 if (vbo_data) {
827 uint8_t vflag = bpoint_vflag_get(rdata, bp->f1, a, nu_id, u);
828 GPU_vertbuf_attr_set(vbo_data, attr_id.data, vbo_len_used, &vflag);
829 }
830 if (vbo_pos) {
831 GPU_vertbuf_attr_set(vbo_pos, attr_id.pos, vbo_len_used, bp->vec);
832 }
833 }
834 }
835 }
836
837 /* Resize & Finish */
838 if (elbp_verts != NULL) {
839 GPU_indexbuf_build_in_place(elbp_verts, ibo_edit_verts_points);
840 }
841 if (elbp_lines != NULL) {
842 GPU_indexbuf_build_in_place(elbp_lines, ibo_edit_lines);
843 }
844 if (vbo_len_used != verts_len_capacity) {
845 if (vbo_pos != NULL) {
846 GPU_vertbuf_data_resize(vbo_pos, vbo_len_used);
847 }
848 if (vbo_data != NULL) {
849 GPU_vertbuf_data_resize(vbo_data, vbo_len_used);
850 }
851 }
852 }
853
854 /** \} */
855
856 /* -------------------------------------------------------------------- */
857 /** \name Public Object/Curve API
858 * \{ */
859
DRW_curve_batch_cache_get_wire_edge(Curve * cu)860 GPUBatch *DRW_curve_batch_cache_get_wire_edge(Curve *cu)
861 {
862 CurveBatchCache *cache = curve_batch_cache_get(cu);
863 return DRW_batch_request(&cache->batch.curves);
864 }
865
DRW_curve_batch_cache_get_normal_edge(Curve * cu)866 GPUBatch *DRW_curve_batch_cache_get_normal_edge(Curve *cu)
867 {
868 CurveBatchCache *cache = curve_batch_cache_get(cu);
869 return DRW_batch_request(&cache->batch.edit_normals);
870 }
871
DRW_curve_batch_cache_get_edit_edges(Curve * cu)872 GPUBatch *DRW_curve_batch_cache_get_edit_edges(Curve *cu)
873 {
874 CurveBatchCache *cache = curve_batch_cache_get(cu);
875 return DRW_batch_request(&cache->batch.edit_edges);
876 }
877
DRW_curve_batch_cache_get_edit_verts(Curve * cu)878 GPUBatch *DRW_curve_batch_cache_get_edit_verts(Curve *cu)
879 {
880 CurveBatchCache *cache = curve_batch_cache_get(cu);
881 return DRW_batch_request(&cache->batch.edit_verts);
882 }
883
DRW_curve_batch_cache_get_triangles_with_normals(struct Curve * cu)884 GPUBatch *DRW_curve_batch_cache_get_triangles_with_normals(struct Curve *cu)
885 {
886 CurveBatchCache *cache = curve_batch_cache_get(cu);
887 return DRW_batch_request(&cache->batch.surfaces);
888 }
889
DRW_curve_batch_cache_get_surface_shaded(struct Curve * cu,struct GPUMaterial ** gpumat_array,uint gpumat_array_len)890 GPUBatch **DRW_curve_batch_cache_get_surface_shaded(struct Curve *cu,
891 struct GPUMaterial **gpumat_array,
892 uint gpumat_array_len)
893 {
894 CurveBatchCache *cache = curve_batch_cache_get(cu);
895
896 BLI_assert(gpumat_array_len == cache->mat_len);
897
898 curve_cd_calc_used_gpu_layers(&cache->cd_needed, gpumat_array, gpumat_array_len);
899
900 for (int i = 0; i < cache->mat_len; i++) {
901 DRW_batch_request(&cache->surf_per_mat[i]);
902 }
903 return cache->surf_per_mat;
904 }
905
DRW_curve_batch_cache_pos_vertbuf_get(struct Curve * cu)906 GPUVertBuf *DRW_curve_batch_cache_pos_vertbuf_get(struct Curve *cu)
907 {
908 CurveBatchCache *cache = curve_batch_cache_get(cu);
909 /* Request surface to trigger the vbo filling. Otherwise it may do nothing. */
910 DRW_batch_request(&cache->batch.surfaces);
911
912 DRW_vbo_request(NULL, &cache->ordered.loop_pos_nor);
913 return cache->ordered.loop_pos_nor;
914 }
915
DRW_curve_batch_cache_get_wireframes_face(Curve * cu)916 GPUBatch *DRW_curve_batch_cache_get_wireframes_face(Curve *cu)
917 {
918 CurveBatchCache *cache = curve_batch_cache_get(cu);
919 return DRW_batch_request(&cache->batch.surfaces_edges);
920 }
921
DRW_curve_batch_cache_get_edge_detection(Curve * cu,bool * r_is_manifold)922 GPUBatch *DRW_curve_batch_cache_get_edge_detection(Curve *cu, bool *r_is_manifold)
923 {
924 CurveBatchCache *cache = curve_batch_cache_get(cu);
925 /* Even if is_manifold is not correct (not updated),
926 * the default (not manifold) is just the worst case. */
927 if (r_is_manifold) {
928 *r_is_manifold = cache->is_manifold;
929 }
930 return DRW_batch_request(&cache->batch.edge_detection);
931 }
932
DRW_curve_material_count_get(Curve * cu)933 int DRW_curve_material_count_get(Curve *cu)
934 {
935 return max_ii(1, cu->totcol);
936 }
937
938 /** \} */
939
940 /* -------------------------------------------------------------------- */
941 /** \name Grouped batch generation
942 * \{ */
943
DRW_curve_batch_cache_create_requested(Object * ob)944 void DRW_curve_batch_cache_create_requested(Object *ob)
945 {
946 BLI_assert(ELEM(ob->type, OB_CURVE, OB_SURF, OB_FONT));
947
948 Curve *cu = ob->data;
949 CurveBatchCache *cache = curve_batch_cache_get(cu);
950
951 /* Verify that all surface batches have needed attribute layers. */
952 /* TODO(fclem): We could be a bit smarter here and only do it per material. */
953 if ((cache->cd_used & cache->cd_needed) != cache->cd_needed) {
954 for (int i = 0; i < cache->mat_len; i++) {
955 /* We can't discard batches at this point as they have been
956 * referenced for drawing. Just clear them in place. */
957 GPU_BATCH_CLEAR_SAFE(cache->surf_per_mat[i]);
958 }
959
960 cache->cd_used |= cache->cd_needed;
961 cache->cd_needed = 0;
962 }
963
964 /* Init batches and request VBOs & IBOs */
965 if (DRW_batch_requested(cache->batch.surfaces, GPU_PRIM_TRIS)) {
966 DRW_vbo_request(cache->batch.surfaces, &cache->ordered.loop_pos_nor);
967 }
968 if (DRW_batch_requested(cache->batch.surfaces_edges, GPU_PRIM_LINES)) {
969 DRW_ibo_request(cache->batch.surfaces_edges, &cache->ibo.surfaces_lines);
970 DRW_vbo_request(cache->batch.surfaces_edges, &cache->ordered.pos_nor);
971 DRW_vbo_request(cache->batch.surfaces_edges, &cache->ordered.edge_fac);
972 }
973 if (DRW_batch_requested(cache->batch.curves, GPU_PRIM_LINE_STRIP)) {
974 DRW_ibo_request(cache->batch.curves, &cache->ibo.curves_lines);
975 DRW_vbo_request(cache->batch.curves, &cache->ordered.curves_pos);
976 }
977 if (DRW_batch_requested(cache->batch.edge_detection, GPU_PRIM_LINES_ADJ)) {
978 DRW_ibo_request(cache->batch.edge_detection, &cache->ibo.edges_adj_lines);
979 DRW_vbo_request(cache->batch.edge_detection, &cache->ordered.pos_nor);
980 }
981
982 /* Edit mode */
983 if (DRW_batch_requested(cache->batch.edit_edges, GPU_PRIM_LINES)) {
984 DRW_ibo_request(cache->batch.edit_edges, &cache->ibo.edit_lines);
985 DRW_vbo_request(cache->batch.edit_edges, &cache->edit.pos);
986 DRW_vbo_request(cache->batch.edit_edges, &cache->edit.data);
987 }
988 if (DRW_batch_requested(cache->batch.edit_verts, GPU_PRIM_POINTS)) {
989 DRW_ibo_request(cache->batch.edit_verts, &cache->ibo.edit_verts);
990 DRW_vbo_request(cache->batch.edit_verts, &cache->edit.pos);
991 DRW_vbo_request(cache->batch.edit_verts, &cache->edit.data);
992 }
993 if (DRW_batch_requested(cache->batch.edit_normals, GPU_PRIM_LINES)) {
994 DRW_vbo_request(cache->batch.edit_normals, &cache->edit.curves_nor);
995 }
996 for (int i = 0; i < cache->mat_len; i++) {
997 if (DRW_batch_requested(cache->surf_per_mat[i], GPU_PRIM_TRIS)) {
998 if (cache->mat_len > 1) {
999 DRW_ibo_request(cache->surf_per_mat[i], &cache->surf_per_mat_tris[i]);
1000 }
1001 if (cache->cd_used & CD_MASK_MLOOPUV) {
1002 DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_uv);
1003 }
1004 if (cache->cd_used & CD_MASK_TANGENT) {
1005 DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_tan);
1006 }
1007 DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_pos_nor);
1008 }
1009 }
1010
1011 #ifdef DRW_DEBUG_MESH_CACHE_REQUEST
1012 printf("-- %s %s --\n", __func__, ob->id.name + 2);
1013 #endif
1014
1015 /* Generate MeshRenderData flags */
1016 int mr_flag = 0;
1017 DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.pos_nor, CU_DATATYPE_SURFACE);
1018 DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.edge_fac, CU_DATATYPE_SURFACE);
1019 DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.curves_pos, CU_DATATYPE_WIRE);
1020 DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_pos_nor, CU_DATATYPE_SURFACE);
1021 DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_uv, CU_DATATYPE_SURFACE);
1022 DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_tan, CU_DATATYPE_SURFACE);
1023 DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.surfaces_tris, CU_DATATYPE_SURFACE);
1024 DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.surfaces_lines, CU_DATATYPE_SURFACE);
1025 DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.curves_lines, CU_DATATYPE_WIRE);
1026 DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edges_adj_lines, CU_DATATYPE_SURFACE);
1027
1028 DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.pos, CU_DATATYPE_OVERLAY);
1029 DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.data, CU_DATATYPE_OVERLAY);
1030 DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.curves_nor, CU_DATATYPE_NORMAL);
1031 DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.curves_weight, CU_DATATYPE_OVERLAY);
1032 DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edit_verts, CU_DATATYPE_OVERLAY);
1033 DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edit_lines, CU_DATATYPE_OVERLAY);
1034
1035 for (int i = 0; i < cache->mat_len; i++) {
1036 DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->surf_per_mat_tris[i], CU_DATATYPE_SURFACE);
1037 }
1038
1039 #ifdef DRW_DEBUG_MESH_CACHE_REQUEST
1040 printf(" mr_flag %d\n\n", mr_flag);
1041 #endif
1042
1043 CurveRenderData *rdata = curve_render_data_create(cu, ob->runtime.curve_cache, mr_flag);
1044
1045 /* DispLists */
1046 ListBase *lb = &rdata->ob_curve_cache->disp;
1047
1048 /* Generate VBOs */
1049 if (DRW_vbo_requested(cache->ordered.pos_nor)) {
1050 DRW_displist_vertbuf_create_pos_and_nor(lb, cache->ordered.pos_nor);
1051 }
1052 if (DRW_vbo_requested(cache->ordered.edge_fac)) {
1053 DRW_displist_vertbuf_create_wiredata(lb, cache->ordered.edge_fac);
1054 }
1055 if (DRW_vbo_requested(cache->ordered.curves_pos)) {
1056 curve_create_curves_pos(rdata, cache->ordered.curves_pos);
1057 }
1058
1059 if (DRW_vbo_requested(cache->ordered.loop_pos_nor) ||
1060 DRW_vbo_requested(cache->ordered.loop_uv) || DRW_vbo_requested(cache->ordered.loop_tan)) {
1061 DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(
1062 lb, cache->ordered.loop_pos_nor, cache->ordered.loop_uv, cache->ordered.loop_tan);
1063 }
1064
1065 if (DRW_ibo_requested(cache->surf_per_mat_tris[0])) {
1066 DRW_displist_indexbuf_create_triangles_loop_split_by_material(
1067 lb, cache->surf_per_mat_tris, cache->mat_len);
1068 }
1069
1070 if (DRW_ibo_requested(cache->ibo.curves_lines)) {
1071 curve_create_curves_lines(rdata, cache->ibo.curves_lines);
1072 }
1073 if (DRW_ibo_requested(cache->ibo.surfaces_tris)) {
1074 DRW_displist_indexbuf_create_triangles_in_order(lb, cache->ibo.surfaces_tris);
1075 }
1076 if (DRW_ibo_requested(cache->ibo.surfaces_lines)) {
1077 DRW_displist_indexbuf_create_lines_in_order(lb, cache->ibo.surfaces_lines);
1078 }
1079 if (DRW_ibo_requested(cache->ibo.edges_adj_lines)) {
1080 DRW_displist_indexbuf_create_edges_adjacency_lines(
1081 lb, cache->ibo.edges_adj_lines, &cache->is_manifold);
1082 }
1083
1084 if (DRW_vbo_requested(cache->edit.pos) || DRW_vbo_requested(cache->edit.data) ||
1085 DRW_ibo_requested(cache->ibo.edit_verts) || DRW_ibo_requested(cache->ibo.edit_lines)) {
1086 curve_create_edit_data_and_handles(
1087 rdata, cache->edit.pos, cache->edit.data, cache->ibo.edit_verts, cache->ibo.edit_lines);
1088 }
1089 if (DRW_vbo_requested(cache->edit.curves_nor)) {
1090 curve_create_edit_curves_nor(rdata, cache->edit.curves_nor);
1091 }
1092
1093 curve_render_data_free(rdata);
1094
1095 #ifdef DEBUG
1096 /* Make sure all requested batches have been setup. */
1097 for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); i++) {
1098 BLI_assert(!DRW_batch_requested(((GPUBatch **)&cache->batch)[i], 0));
1099 }
1100 #endif
1101 }
1102
1103 /** \} */
1104