1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2017 by Blender Foundation.
17  * All rights reserved.
18  */
19 
20 /** \file
21  * \ingroup draw
22  *
23  * \brief Mesh API for render engines
24  */
25 
26 #include "MEM_guardedalloc.h"
27 
28 #include "BLI_alloca.h"
29 #include "BLI_bitmap.h"
30 #include "BLI_buffer.h"
31 #include "BLI_edgehash.h"
32 #include "BLI_listbase.h"
33 #include "BLI_math_bits.h"
34 #include "BLI_math_vector.h"
35 #include "BLI_string.h"
36 #include "BLI_task.h"
37 #include "BLI_utildefines.h"
38 
39 #include "DNA_mesh_types.h"
40 #include "DNA_meshdata_types.h"
41 #include "DNA_object_types.h"
42 #include "DNA_scene_types.h"
43 
44 #include "BKE_customdata.h"
45 #include "BKE_deform.h"
46 #include "BKE_editmesh.h"
47 #include "BKE_editmesh_cache.h"
48 #include "BKE_editmesh_tangent.h"
49 #include "BKE_mesh.h"
50 #include "BKE_mesh_runtime.h"
51 #include "BKE_mesh_tangent.h"
52 #include "BKE_modifier.h"
53 #include "BKE_object_deform.h"
54 #include "BKE_paint.h"
55 #include "BKE_pbvh.h"
56 
57 #include "atomic_ops.h"
58 
59 #include "bmesh.h"
60 
61 #include "GPU_batch.h"
62 #include "GPU_material.h"
63 
64 #include "DRW_render.h"
65 
66 #include "ED_mesh.h"
67 #include "ED_uvedit.h"
68 
69 #include "draw_cache_extract.h"
70 #include "draw_cache_inline.h"
71 
72 #include "draw_cache_impl.h" /* own include */
73 
74 static void mesh_batch_cache_clear(Mesh *me);
75 
76 /* Return true is all layers in _b_ are inside _a_. */
mesh_cd_layers_type_overlap(DRW_MeshCDMask a,DRW_MeshCDMask b)77 BLI_INLINE bool mesh_cd_layers_type_overlap(DRW_MeshCDMask a, DRW_MeshCDMask b)
78 {
79   return (*((uint64_t *)&a) & *((uint64_t *)&b)) == *((uint64_t *)&b);
80 }
81 
mesh_cd_layers_type_equal(DRW_MeshCDMask a,DRW_MeshCDMask b)82 BLI_INLINE bool mesh_cd_layers_type_equal(DRW_MeshCDMask a, DRW_MeshCDMask b)
83 {
84   return *((uint64_t *)&a) == *((uint64_t *)&b);
85 }
86 
mesh_cd_layers_type_merge(DRW_MeshCDMask * a,DRW_MeshCDMask b)87 BLI_INLINE void mesh_cd_layers_type_merge(DRW_MeshCDMask *a, DRW_MeshCDMask b)
88 {
89   uint32_t *a_p = (uint32_t *)a;
90   uint32_t *b_p = (uint32_t *)&b;
91   atomic_fetch_and_or_uint32(a_p, *b_p);
92   atomic_fetch_and_or_uint32(a_p + 1, *(b_p + 1));
93 }
94 
mesh_cd_layers_type_clear(DRW_MeshCDMask * a)95 BLI_INLINE void mesh_cd_layers_type_clear(DRW_MeshCDMask *a)
96 {
97   *((uint64_t *)a) = 0;
98 }
99 
editmesh_final_or_this(const Mesh * me)100 BLI_INLINE const Mesh *editmesh_final_or_this(const Mesh *me)
101 {
102   return (me->edit_mesh && me->edit_mesh->mesh_eval_final) ? me->edit_mesh->mesh_eval_final : me;
103 }
104 
mesh_cd_calc_edit_uv_layer(const Mesh * UNUSED (me),DRW_MeshCDMask * cd_used)105 static void mesh_cd_calc_edit_uv_layer(const Mesh *UNUSED(me), DRW_MeshCDMask *cd_used)
106 {
107   cd_used->edit_uv = 1;
108 }
109 
mesh_cd_ldata_get_from_mesh(const Mesh * me)110 BLI_INLINE const CustomData *mesh_cd_ldata_get_from_mesh(const Mesh *me)
111 {
112   switch ((eMeshWrapperType)me->runtime.wrapper_type) {
113     case ME_WRAPPER_TYPE_MDATA:
114       return &me->ldata;
115       break;
116     case ME_WRAPPER_TYPE_BMESH:
117       return &me->edit_mesh->bm->ldata;
118       break;
119   }
120 
121   BLI_assert(0);
122   return &me->ldata;
123 }
124 
mesh_cd_vdata_get_from_mesh(const Mesh * me)125 BLI_INLINE const CustomData *mesh_cd_vdata_get_from_mesh(const Mesh *me)
126 {
127   switch ((eMeshWrapperType)me->runtime.wrapper_type) {
128     case ME_WRAPPER_TYPE_MDATA:
129       return &me->vdata;
130       break;
131     case ME_WRAPPER_TYPE_BMESH:
132       return &me->edit_mesh->bm->vdata;
133       break;
134   }
135 
136   BLI_assert(0);
137   return &me->vdata;
138 }
139 
mesh_cd_calc_active_uv_layer(const Mesh * me,DRW_MeshCDMask * cd_used)140 static void mesh_cd_calc_active_uv_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
141 {
142   const Mesh *me_final = editmesh_final_or_this(me);
143   const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
144   int layer = CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
145   if (layer != -1) {
146     cd_used->uv |= (1 << layer);
147   }
148 }
149 
mesh_cd_calc_active_mask_uv_layer(const Mesh * me,DRW_MeshCDMask * cd_used)150 static void mesh_cd_calc_active_mask_uv_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
151 {
152   const Mesh *me_final = editmesh_final_or_this(me);
153   const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
154   int layer = CustomData_get_stencil_layer(cd_ldata, CD_MLOOPUV);
155   if (layer != -1) {
156     cd_used->uv |= (1 << layer);
157   }
158 }
159 
mesh_cd_calc_active_vcol_layer(const Mesh * me,DRW_MeshCDMask * cd_used)160 static void mesh_cd_calc_active_vcol_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
161 {
162   const Mesh *me_final = editmesh_final_or_this(me);
163   const CustomData *cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
164 
165   int layer = CustomData_get_active_layer(cd_vdata, CD_PROP_COLOR);
166   if (layer != -1) {
167     cd_used->sculpt_vcol |= (1 << layer);
168   }
169 }
170 
mesh_cd_calc_active_mloopcol_layer(const Mesh * me,DRW_MeshCDMask * cd_used)171 static void mesh_cd_calc_active_mloopcol_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
172 {
173   const Mesh *me_final = editmesh_final_or_this(me);
174   const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
175 
176   int layer = CustomData_get_active_layer(cd_ldata, CD_MLOOPCOL);
177   if (layer != -1) {
178     cd_used->vcol |= (1 << layer);
179   }
180 }
181 
mesh_cd_calc_used_gpu_layers(const Mesh * me,struct GPUMaterial ** gpumat_array,int gpumat_array_len)182 static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Mesh *me,
183                                                    struct GPUMaterial **gpumat_array,
184                                                    int gpumat_array_len)
185 {
186   const Mesh *me_final = editmesh_final_or_this(me);
187   const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
188   const CustomData *cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
189 
190   /* See: DM_vertex_attributes_from_gpu for similar logic */
191   DRW_MeshCDMask cd_used;
192   mesh_cd_layers_type_clear(&cd_used);
193 
194   for (int i = 0; i < gpumat_array_len; i++) {
195     GPUMaterial *gpumat = gpumat_array[i];
196     if (gpumat) {
197       ListBase gpu_attrs = GPU_material_attributes(gpumat);
198       LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
199         const char *name = gpu_attr->name;
200         int type = gpu_attr->type;
201         int layer = -1;
202 
203         if (type == CD_AUTO_FROM_NAME) {
204           /* We need to deduct what exact layer is used.
205            *
206            * We do it based on the specified name.
207            */
208           if (name[0] != '\0') {
209             layer = CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name);
210             type = CD_MTFACE;
211 
212             if (layer == -1) {
213               if (U.experimental.use_sculpt_vertex_colors) {
214                 layer = CustomData_get_named_layer(cd_vdata, CD_PROP_COLOR, name);
215                 type = CD_PROP_COLOR;
216               }
217             }
218 
219             if (layer == -1) {
220               layer = CustomData_get_named_layer(cd_ldata, CD_MLOOPCOL, name);
221               type = CD_MCOL;
222             }
223 
224 #if 0 /* Tangents are always from UV's - this will never happen. */
225             if (layer == -1) {
226               layer = CustomData_get_named_layer(cd_ldata, CD_TANGENT, name);
227               type = CD_TANGENT;
228             }
229 #endif
230             if (layer == -1) {
231               continue;
232             }
233           }
234           else {
235             /* Fall back to the UV layer, which matches old behavior. */
236             type = CD_MTFACE;
237           }
238         }
239 
240         switch (type) {
241           case CD_MTFACE: {
242             if (layer == -1) {
243               layer = (name[0] != '\0') ? CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name) :
244                                           CustomData_get_render_layer(cd_ldata, CD_MLOOPUV);
245             }
246             if (layer != -1) {
247               cd_used.uv |= (1 << layer);
248             }
249             break;
250           }
251           case CD_TANGENT: {
252             if (layer == -1) {
253               layer = (name[0] != '\0') ? CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name) :
254                                           CustomData_get_render_layer(cd_ldata, CD_MLOOPUV);
255 
256               /* Only fallback to orco (below) when we have no UV layers, see: T56545 */
257               if (layer == -1 && name[0] != '\0') {
258                 layer = CustomData_get_render_layer(cd_ldata, CD_MLOOPUV);
259               }
260             }
261             if (layer != -1) {
262               cd_used.tan |= (1 << layer);
263             }
264             else {
265               /* no UV layers at all => requesting orco */
266               cd_used.tan_orco = 1;
267               cd_used.orco = 1;
268             }
269             break;
270           }
271           case CD_PROP_COLOR: {
272             /* Sculpt Vertex Colors */
273             bool use_mloop_cols = false;
274             if (layer == -1) {
275               layer = (name[0] != '\0') ?
276                           CustomData_get_named_layer(cd_vdata, CD_PROP_COLOR, name) :
277                           CustomData_get_render_layer(cd_vdata, CD_PROP_COLOR);
278               /* Fallback to Vertex Color data */
279               if (layer == -1) {
280                 layer = (name[0] != '\0') ?
281                             CustomData_get_named_layer(cd_ldata, CD_MLOOPCOL, name) :
282                             CustomData_get_render_layer(cd_ldata, CD_MLOOPCOL);
283                 use_mloop_cols = true;
284               }
285             }
286             if (layer != -1) {
287               if (use_mloop_cols) {
288                 cd_used.vcol |= (1 << layer);
289               }
290               else {
291                 cd_used.sculpt_vcol |= (1 << layer);
292               }
293             }
294             break;
295           }
296           case CD_MCOL: {
297             /* Vertex Color Data */
298             if (layer == -1) {
299               layer = (name[0] != '\0') ? CustomData_get_named_layer(cd_ldata, CD_MLOOPCOL, name) :
300                                           CustomData_get_render_layer(cd_ldata, CD_MLOOPCOL);
301             }
302             if (layer != -1) {
303               cd_used.vcol |= (1 << layer);
304             }
305 
306             break;
307           }
308           case CD_ORCO: {
309             cd_used.orco = 1;
310             break;
311           }
312         }
313       }
314     }
315   }
316   return cd_used;
317 }
318 
319 /** \} */
320 
321 /* ---------------------------------------------------------------------- */
322 /** \name Vertex Group Selection
323  * \{ */
324 
325 /** Reset the selection structure, deallocating heap memory as appropriate. */
drw_mesh_weight_state_clear(struct DRW_MeshWeightState * wstate)326 static void drw_mesh_weight_state_clear(struct DRW_MeshWeightState *wstate)
327 {
328   MEM_SAFE_FREE(wstate->defgroup_sel);
329   MEM_SAFE_FREE(wstate->defgroup_locked);
330   MEM_SAFE_FREE(wstate->defgroup_unlocked);
331 
332   memset(wstate, 0, sizeof(*wstate));
333 
334   wstate->defgroup_active = -1;
335 }
336 
337 /** Copy selection data from one structure to another, including heap memory. */
drw_mesh_weight_state_copy(struct DRW_MeshWeightState * wstate_dst,const struct DRW_MeshWeightState * wstate_src)338 static void drw_mesh_weight_state_copy(struct DRW_MeshWeightState *wstate_dst,
339                                        const struct DRW_MeshWeightState *wstate_src)
340 {
341   MEM_SAFE_FREE(wstate_dst->defgroup_sel);
342   MEM_SAFE_FREE(wstate_dst->defgroup_locked);
343   MEM_SAFE_FREE(wstate_dst->defgroup_unlocked);
344 
345   memcpy(wstate_dst, wstate_src, sizeof(*wstate_dst));
346 
347   if (wstate_src->defgroup_sel) {
348     wstate_dst->defgroup_sel = MEM_dupallocN(wstate_src->defgroup_sel);
349   }
350   if (wstate_src->defgroup_locked) {
351     wstate_dst->defgroup_locked = MEM_dupallocN(wstate_src->defgroup_locked);
352   }
353   if (wstate_src->defgroup_unlocked) {
354     wstate_dst->defgroup_unlocked = MEM_dupallocN(wstate_src->defgroup_unlocked);
355   }
356 }
357 
drw_mesh_flags_equal(const bool * array1,const bool * array2,int size)358 static bool drw_mesh_flags_equal(const bool *array1, const bool *array2, int size)
359 {
360   return ((!array1 && !array2) ||
361           (array1 && array2 && memcmp(array1, array2, size * sizeof(bool)) == 0));
362 }
363 
364 /** Compare two selection structures. */
drw_mesh_weight_state_compare(const struct DRW_MeshWeightState * a,const struct DRW_MeshWeightState * b)365 static bool drw_mesh_weight_state_compare(const struct DRW_MeshWeightState *a,
366                                           const struct DRW_MeshWeightState *b)
367 {
368   return a->defgroup_active == b->defgroup_active && a->defgroup_len == b->defgroup_len &&
369          a->flags == b->flags && a->alert_mode == b->alert_mode &&
370          a->defgroup_sel_count == b->defgroup_sel_count &&
371          drw_mesh_flags_equal(a->defgroup_sel, b->defgroup_sel, a->defgroup_len) &&
372          drw_mesh_flags_equal(a->defgroup_locked, b->defgroup_locked, a->defgroup_len) &&
373          drw_mesh_flags_equal(a->defgroup_unlocked, b->defgroup_unlocked, a->defgroup_len);
374 }
375 
drw_mesh_weight_state_extract(Object * ob,Mesh * me,const ToolSettings * ts,bool paint_mode,struct DRW_MeshWeightState * wstate)376 static void drw_mesh_weight_state_extract(Object *ob,
377                                           Mesh *me,
378                                           const ToolSettings *ts,
379                                           bool paint_mode,
380                                           struct DRW_MeshWeightState *wstate)
381 {
382   /* Extract complete vertex weight group selection state and mode flags. */
383   memset(wstate, 0, sizeof(*wstate));
384 
385   wstate->defgroup_active = ob->actdef - 1;
386   wstate->defgroup_len = BLI_listbase_count(&ob->defbase);
387 
388   wstate->alert_mode = ts->weightuser;
389 
390   if (paint_mode && ts->multipaint) {
391     /* Multi-paint needs to know all selected bones, not just the active group.
392      * This is actually a relatively expensive operation, but caching would be difficult. */
393     wstate->defgroup_sel = BKE_object_defgroup_selected_get(
394         ob, wstate->defgroup_len, &wstate->defgroup_sel_count);
395 
396     if (wstate->defgroup_sel_count > 1) {
397       wstate->flags |= DRW_MESH_WEIGHT_STATE_MULTIPAINT |
398                        (ts->auto_normalize ? DRW_MESH_WEIGHT_STATE_AUTO_NORMALIZE : 0);
399 
400       if (me->symmetry & ME_SYMMETRY_X) {
401         BKE_object_defgroup_mirror_selection(ob,
402                                              wstate->defgroup_len,
403                                              wstate->defgroup_sel,
404                                              wstate->defgroup_sel,
405                                              &wstate->defgroup_sel_count);
406       }
407     }
408     /* With only one selected bone Multipaint reverts to regular mode. */
409     else {
410       wstate->defgroup_sel_count = 0;
411       MEM_SAFE_FREE(wstate->defgroup_sel);
412     }
413   }
414 
415   if (paint_mode && ts->wpaint_lock_relative) {
416     /* Set of locked vertex groups for the lock relative mode. */
417     wstate->defgroup_locked = BKE_object_defgroup_lock_flags_get(ob, wstate->defgroup_len);
418     wstate->defgroup_unlocked = BKE_object_defgroup_validmap_get(ob, wstate->defgroup_len);
419 
420     /* Check that a deform group is active, and none of selected groups are locked. */
421     if (BKE_object_defgroup_check_lock_relative(
422             wstate->defgroup_locked, wstate->defgroup_unlocked, wstate->defgroup_active) &&
423         BKE_object_defgroup_check_lock_relative_multi(wstate->defgroup_len,
424                                                       wstate->defgroup_locked,
425                                                       wstate->defgroup_sel,
426                                                       wstate->defgroup_sel_count)) {
427       wstate->flags |= DRW_MESH_WEIGHT_STATE_LOCK_RELATIVE;
428 
429       /* Compute the set of locked and unlocked deform vertex groups. */
430       BKE_object_defgroup_split_locked_validmap(wstate->defgroup_len,
431                                                 wstate->defgroup_locked,
432                                                 wstate->defgroup_unlocked,
433                                                 wstate->defgroup_locked, /* out */
434                                                 wstate->defgroup_unlocked);
435     }
436     else {
437       MEM_SAFE_FREE(wstate->defgroup_unlocked);
438       MEM_SAFE_FREE(wstate->defgroup_locked);
439     }
440   }
441 }
442 
443 /** \} */
444 
445 /* ---------------------------------------------------------------------- */
446 /** \name Mesh GPUBatch Cache
447  * \{ */
448 
mesh_batch_cache_add_request(MeshBatchCache * cache,DRWBatchFlag new_flag)449 BLI_INLINE void mesh_batch_cache_add_request(MeshBatchCache *cache, DRWBatchFlag new_flag)
450 {
451   atomic_fetch_and_or_uint32((uint32_t *)(&cache->batch_requested), *(uint32_t *)&new_flag);
452 }
453 
454 /* GPUBatch cache management. */
455 
mesh_batch_cache_valid(Mesh * me)456 static bool mesh_batch_cache_valid(Mesh *me)
457 {
458   MeshBatchCache *cache = me->runtime.batch_cache;
459 
460   if (cache == NULL) {
461     return false;
462   }
463 
464   if (cache->is_editmode != (me->edit_mesh != NULL)) {
465     return false;
466   }
467 
468   if (cache->is_dirty) {
469     return false;
470   }
471 
472   if (cache->mat_len != mesh_render_mat_len_get(me)) {
473     return false;
474   }
475 
476   return true;
477 }
478 
mesh_batch_cache_init(Mesh * me)479 static void mesh_batch_cache_init(Mesh *me)
480 {
481   MeshBatchCache *cache = me->runtime.batch_cache;
482 
483   if (!cache) {
484     cache = me->runtime.batch_cache = MEM_callocN(sizeof(*cache), __func__);
485   }
486   else {
487     memset(cache, 0, sizeof(*cache));
488   }
489 
490   cache->is_editmode = me->edit_mesh != NULL;
491 
492   if (cache->is_editmode == false) {
493     // cache->edge_len = mesh_render_edges_len_get(me);
494     // cache->tri_len = mesh_render_looptri_len_get(me);
495     // cache->poly_len = mesh_render_polys_len_get(me);
496     // cache->vert_len = mesh_render_verts_len_get(me);
497   }
498 
499   cache->mat_len = mesh_render_mat_len_get(me);
500   cache->surface_per_mat = MEM_callocN(sizeof(*cache->surface_per_mat) * cache->mat_len, __func__);
501   cache->final.tris_per_mat = MEM_callocN(sizeof(*cache->final.tris_per_mat) * cache->mat_len,
502                                           __func__);
503 
504   cache->is_dirty = false;
505   cache->batch_ready = 0;
506   cache->batch_requested = 0;
507 
508   drw_mesh_weight_state_clear(&cache->weight_state);
509 }
510 
DRW_mesh_batch_cache_validate(Mesh * me)511 void DRW_mesh_batch_cache_validate(Mesh *me)
512 {
513   if (!mesh_batch_cache_valid(me)) {
514     mesh_batch_cache_clear(me);
515     mesh_batch_cache_init(me);
516   }
517 }
518 
mesh_batch_cache_get(Mesh * me)519 static MeshBatchCache *mesh_batch_cache_get(Mesh *me)
520 {
521   return me->runtime.batch_cache;
522 }
523 
mesh_batch_cache_check_vertex_group(MeshBatchCache * cache,const struct DRW_MeshWeightState * wstate)524 static void mesh_batch_cache_check_vertex_group(MeshBatchCache *cache,
525                                                 const struct DRW_MeshWeightState *wstate)
526 {
527   if (!drw_mesh_weight_state_compare(&cache->weight_state, wstate)) {
528     FOREACH_MESH_BUFFER_CACHE (cache, mbufcache) {
529       GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.weights);
530     }
531     GPU_BATCH_CLEAR_SAFE(cache->batch.surface_weights);
532 
533     cache->batch_ready &= ~MBC_SURFACE_WEIGHTS;
534 
535     drw_mesh_weight_state_clear(&cache->weight_state);
536   }
537 }
538 
mesh_batch_cache_request_surface_batches(MeshBatchCache * cache)539 static void mesh_batch_cache_request_surface_batches(MeshBatchCache *cache)
540 {
541   mesh_batch_cache_add_request(cache, MBC_SURFACE);
542   DRW_batch_request(&cache->batch.surface);
543   for (int i = 0; i < cache->mat_len; i++) {
544     DRW_batch_request(&cache->surface_per_mat[i]);
545   }
546 }
547 
mesh_batch_cache_discard_surface_batches(MeshBatchCache * cache)548 static void mesh_batch_cache_discard_surface_batches(MeshBatchCache *cache)
549 {
550   GPU_BATCH_DISCARD_SAFE(cache->batch.surface);
551   for (int i = 0; i < cache->mat_len; i++) {
552     GPU_BATCH_DISCARD_SAFE(cache->surface_per_mat[i]);
553   }
554   cache->batch_ready &= ~MBC_SURFACE;
555 }
556 
mesh_batch_cache_discard_shaded_tri(MeshBatchCache * cache)557 static void mesh_batch_cache_discard_shaded_tri(MeshBatchCache *cache)
558 {
559   FOREACH_MESH_BUFFER_CACHE (cache, mbufcache) {
560     GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.pos_nor);
561     GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.uv);
562     GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.tan);
563     GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.vcol);
564     GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.orco);
565   }
566   mesh_batch_cache_discard_surface_batches(cache);
567   mesh_cd_layers_type_clear(&cache->cd_used);
568 }
569 
mesh_batch_cache_discard_uvedit(MeshBatchCache * cache)570 static void mesh_batch_cache_discard_uvedit(MeshBatchCache *cache)
571 {
572   FOREACH_MESH_BUFFER_CACHE (cache, mbufcache) {
573     GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.stretch_angle);
574     GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.stretch_area);
575     GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.uv);
576     GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.edituv_data);
577     GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.fdots_uv);
578     GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.fdots_edituv_data);
579     GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_tris);
580     GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_lines);
581     GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_points);
582     GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_fdots);
583   }
584   GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_stretch_area);
585   GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_stretch_angle);
586   GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces);
587   GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_edges);
588   GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_verts);
589   GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_fdots);
590   GPU_BATCH_DISCARD_SAFE(cache->batch.wire_loops_uvs);
591 
592   cache->tot_area = 0.0f;
593   cache->tot_uv_area = 0.0f;
594 
595   cache->batch_ready &= ~MBC_EDITUV;
596 
597   /* We discarded the vbo.uv so we need to reset the cd_used flag. */
598   cache->cd_used.uv = 0;
599   cache->cd_used.edit_uv = 0;
600 
601   /* Discard other batches that uses vbo.uv */
602   mesh_batch_cache_discard_surface_batches(cache);
603 }
604 
mesh_batch_cache_discard_uvedit_select(MeshBatchCache * cache)605 static void mesh_batch_cache_discard_uvedit_select(MeshBatchCache *cache)
606 {
607   FOREACH_MESH_BUFFER_CACHE (cache, mbufcache) {
608     GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.edituv_data);
609     GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.fdots_edituv_data);
610     GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_tris);
611     GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_lines);
612     GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_points);
613     GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.edituv_fdots);
614   }
615   GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_stretch_area);
616   GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_stretch_angle);
617   GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces);
618   GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_edges);
619   GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_verts);
620   GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_fdots);
621   GPU_BATCH_DISCARD_SAFE(cache->batch.wire_loops_uvs);
622   cache->batch_ready &= ~MBC_EDITUV;
623 }
624 
DRW_mesh_batch_cache_dirty_tag(Mesh * me,eMeshBatchDirtyMode mode)625 void DRW_mesh_batch_cache_dirty_tag(Mesh *me, eMeshBatchDirtyMode mode)
626 {
627   MeshBatchCache *cache = me->runtime.batch_cache;
628   if (cache == NULL) {
629     return;
630   }
631   switch (mode) {
632     case BKE_MESH_BATCH_DIRTY_SELECT:
633       FOREACH_MESH_BUFFER_CACHE (cache, mbufcache) {
634         GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.edit_data);
635         GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.fdots_nor);
636       }
637       GPU_BATCH_DISCARD_SAFE(cache->batch.edit_triangles);
638       GPU_BATCH_DISCARD_SAFE(cache->batch.edit_vertices);
639       GPU_BATCH_DISCARD_SAFE(cache->batch.edit_edges);
640       GPU_BATCH_DISCARD_SAFE(cache->batch.edit_fdots);
641       GPU_BATCH_DISCARD_SAFE(cache->batch.edit_selection_verts);
642       GPU_BATCH_DISCARD_SAFE(cache->batch.edit_selection_edges);
643       GPU_BATCH_DISCARD_SAFE(cache->batch.edit_selection_faces);
644       GPU_BATCH_DISCARD_SAFE(cache->batch.edit_selection_fdots);
645       GPU_BATCH_DISCARD_SAFE(cache->batch.edit_mesh_analysis);
646       cache->batch_ready &= ~(MBC_EDIT_TRIANGLES | MBC_EDIT_VERTICES | MBC_EDIT_EDGES |
647                               MBC_EDIT_FACEDOTS | MBC_EDIT_SELECTION_FACEDOTS |
648                               MBC_EDIT_SELECTION_FACES | MBC_EDIT_SELECTION_EDGES |
649                               MBC_EDIT_SELECTION_VERTS | MBC_EDIT_MESH_ANALYSIS);
650       /* Because visible UVs depends on edit mode selection, discard topology. */
651       mesh_batch_cache_discard_uvedit_select(cache);
652       break;
653     case BKE_MESH_BATCH_DIRTY_SELECT_PAINT:
654       /* Paint mode selection flag is packed inside the nor attribute.
655        * Note that it can be slow if auto smooth is enabled. (see T63946) */
656       FOREACH_MESH_BUFFER_CACHE (cache, mbufcache) {
657         GPU_INDEXBUF_DISCARD_SAFE(mbufcache->ibo.lines_paint_mask);
658         GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.pos_nor);
659         GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.lnor);
660       }
661       GPU_BATCH_DISCARD_SAFE(cache->batch.surface);
662       GPU_BATCH_DISCARD_SAFE(cache->batch.wire_loops);
663       GPU_BATCH_DISCARD_SAFE(cache->batch.wire_edges);
664       mesh_batch_cache_discard_surface_batches(cache);
665       cache->batch_ready &= ~(MBC_SURFACE | MBC_WIRE_EDGES | MBC_WIRE_LOOPS);
666       break;
667     case BKE_MESH_BATCH_DIRTY_ALL:
668       cache->is_dirty = true;
669       break;
670     case BKE_MESH_BATCH_DIRTY_SHADING:
671       mesh_batch_cache_discard_shaded_tri(cache);
672       mesh_batch_cache_discard_uvedit(cache);
673       break;
674     case BKE_MESH_BATCH_DIRTY_UVEDIT_ALL:
675       mesh_batch_cache_discard_uvedit(cache);
676       break;
677     case BKE_MESH_BATCH_DIRTY_UVEDIT_SELECT:
678       FOREACH_MESH_BUFFER_CACHE (cache, mbufcache) {
679         GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.edituv_data);
680         GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.fdots_edituv_data);
681       }
682       GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_stretch_area);
683       GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_stretch_angle);
684       GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces);
685       GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_edges);
686       GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_verts);
687       GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_fdots);
688       cache->batch_ready &= ~MBC_EDITUV;
689       break;
690     default:
691       BLI_assert(0);
692   }
693 }
694 
mesh_batch_cache_clear(Mesh * me)695 static void mesh_batch_cache_clear(Mesh *me)
696 {
697   MeshBatchCache *cache = me->runtime.batch_cache;
698   if (!cache) {
699     return;
700   }
701   FOREACH_MESH_BUFFER_CACHE (cache, mbufcache) {
702     GPUVertBuf **vbos = (GPUVertBuf **)&mbufcache->vbo;
703     GPUIndexBuf **ibos = (GPUIndexBuf **)&mbufcache->ibo;
704     for (int i = 0; i < sizeof(mbufcache->vbo) / sizeof(void *); i++) {
705       GPU_VERTBUF_DISCARD_SAFE(vbos[i]);
706     }
707     for (int i = 0; i < sizeof(mbufcache->ibo) / sizeof(void *); i++) {
708       GPU_INDEXBUF_DISCARD_SAFE(ibos[i]);
709     }
710   }
711 
712   for (int i = 0; i < cache->mat_len; i++) {
713     GPU_INDEXBUF_DISCARD_SAFE(cache->final.tris_per_mat[i]);
714   }
715   MEM_SAFE_FREE(cache->final.tris_per_mat);
716 
717   for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); i++) {
718     GPUBatch **batch = (GPUBatch **)&cache->batch;
719     GPU_BATCH_DISCARD_SAFE(batch[i]);
720   }
721 
722   mesh_batch_cache_discard_shaded_tri(cache);
723   mesh_batch_cache_discard_uvedit(cache);
724   MEM_SAFE_FREE(cache->surface_per_mat);
725   cache->mat_len = 0;
726 
727   cache->batch_ready = 0;
728   drw_mesh_weight_state_clear(&cache->weight_state);
729 }
730 
DRW_mesh_batch_cache_free(Mesh * me)731 void DRW_mesh_batch_cache_free(Mesh *me)
732 {
733   mesh_batch_cache_clear(me);
734   MEM_SAFE_FREE(me->runtime.batch_cache);
735 }
736 
737 /** \} */
738 
739 /* ---------------------------------------------------------------------- */
740 /** \name Public API
741  * \{ */
742 
texpaint_request_active_uv(MeshBatchCache * cache,Mesh * me)743 static void texpaint_request_active_uv(MeshBatchCache *cache, Mesh *me)
744 {
745   DRW_MeshCDMask cd_needed;
746   mesh_cd_layers_type_clear(&cd_needed);
747   mesh_cd_calc_active_uv_layer(me, &cd_needed);
748 
749   BLI_assert(cd_needed.uv != 0 &&
750              "No uv layer available in texpaint, but batches requested anyway!");
751 
752   mesh_cd_calc_active_mask_uv_layer(me, &cd_needed);
753   mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
754 }
755 
texpaint_request_active_vcol(MeshBatchCache * cache,Mesh * me)756 static void texpaint_request_active_vcol(MeshBatchCache *cache, Mesh *me)
757 {
758   DRW_MeshCDMask cd_needed;
759   mesh_cd_layers_type_clear(&cd_needed);
760   mesh_cd_calc_active_mloopcol_layer(me, &cd_needed);
761 
762   BLI_assert(cd_needed.vcol != 0 &&
763              "No MLOOPCOL layer available in vertpaint, but batches requested anyway!");
764 
765   mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
766 }
767 
sculpt_request_active_vcol(MeshBatchCache * cache,Mesh * me)768 static void sculpt_request_active_vcol(MeshBatchCache *cache, Mesh *me)
769 {
770   DRW_MeshCDMask cd_needed;
771   mesh_cd_layers_type_clear(&cd_needed);
772   mesh_cd_calc_active_vcol_layer(me, &cd_needed);
773 
774   BLI_assert(cd_needed.sculpt_vcol != 0 &&
775              "No MPropCol layer available in Sculpt, but batches requested anyway!");
776 
777   mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
778 }
779 
DRW_mesh_batch_cache_get_all_verts(Mesh * me)780 GPUBatch *DRW_mesh_batch_cache_get_all_verts(Mesh *me)
781 {
782   MeshBatchCache *cache = mesh_batch_cache_get(me);
783   mesh_batch_cache_add_request(cache, MBC_ALL_VERTS);
784   return DRW_batch_request(&cache->batch.all_verts);
785 }
786 
DRW_mesh_batch_cache_get_all_edges(Mesh * me)787 GPUBatch *DRW_mesh_batch_cache_get_all_edges(Mesh *me)
788 {
789   MeshBatchCache *cache = mesh_batch_cache_get(me);
790   mesh_batch_cache_add_request(cache, MBC_ALL_EDGES);
791   return DRW_batch_request(&cache->batch.all_edges);
792 }
793 
DRW_mesh_batch_cache_get_surface(Mesh * me)794 GPUBatch *DRW_mesh_batch_cache_get_surface(Mesh *me)
795 {
796   MeshBatchCache *cache = mesh_batch_cache_get(me);
797   mesh_batch_cache_request_surface_batches(cache);
798   return cache->batch.surface;
799 }
800 
DRW_mesh_batch_cache_get_loose_edges(Mesh * me)801 GPUBatch *DRW_mesh_batch_cache_get_loose_edges(Mesh *me)
802 {
803   MeshBatchCache *cache = mesh_batch_cache_get(me);
804   mesh_batch_cache_add_request(cache, MBC_LOOSE_EDGES);
805   if (cache->no_loose_wire) {
806     return NULL;
807   }
808 
809   return DRW_batch_request(&cache->batch.loose_edges);
810 }
811 
DRW_mesh_batch_cache_get_surface_weights(Mesh * me)812 GPUBatch *DRW_mesh_batch_cache_get_surface_weights(Mesh *me)
813 {
814   MeshBatchCache *cache = mesh_batch_cache_get(me);
815   mesh_batch_cache_add_request(cache, MBC_SURFACE_WEIGHTS);
816   return DRW_batch_request(&cache->batch.surface_weights);
817 }
818 
DRW_mesh_batch_cache_get_edge_detection(Mesh * me,bool * r_is_manifold)819 GPUBatch *DRW_mesh_batch_cache_get_edge_detection(Mesh *me, bool *r_is_manifold)
820 {
821   MeshBatchCache *cache = mesh_batch_cache_get(me);
822   mesh_batch_cache_add_request(cache, MBC_EDGE_DETECTION);
823   /* Even if is_manifold is not correct (not updated),
824    * the default (not manifold) is just the worst case. */
825   if (r_is_manifold) {
826     *r_is_manifold = cache->is_manifold;
827   }
828   return DRW_batch_request(&cache->batch.edge_detection);
829 }
830 
DRW_mesh_batch_cache_get_wireframes_face(Mesh * me)831 GPUBatch *DRW_mesh_batch_cache_get_wireframes_face(Mesh *me)
832 {
833   MeshBatchCache *cache = mesh_batch_cache_get(me);
834   mesh_batch_cache_add_request(cache, MBC_WIRE_EDGES);
835   return DRW_batch_request(&cache->batch.wire_edges);
836 }
837 
DRW_mesh_batch_cache_get_edit_mesh_analysis(Mesh * me)838 GPUBatch *DRW_mesh_batch_cache_get_edit_mesh_analysis(Mesh *me)
839 {
840   MeshBatchCache *cache = mesh_batch_cache_get(me);
841   mesh_batch_cache_add_request(cache, MBC_EDIT_MESH_ANALYSIS);
842   return DRW_batch_request(&cache->batch.edit_mesh_analysis);
843 }
844 
DRW_mesh_batch_cache_get_surface_shaded(Mesh * me,struct GPUMaterial ** gpumat_array,uint gpumat_array_len)845 GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(Mesh *me,
846                                                    struct GPUMaterial **gpumat_array,
847                                                    uint gpumat_array_len)
848 {
849   MeshBatchCache *cache = mesh_batch_cache_get(me);
850   DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(me, gpumat_array, gpumat_array_len);
851 
852   BLI_assert(gpumat_array_len == cache->mat_len);
853 
854   mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
855   mesh_batch_cache_request_surface_batches(cache);
856   return cache->surface_per_mat;
857 }
858 
DRW_mesh_batch_cache_get_surface_texpaint(Mesh * me)859 GPUBatch **DRW_mesh_batch_cache_get_surface_texpaint(Mesh *me)
860 {
861   MeshBatchCache *cache = mesh_batch_cache_get(me);
862   texpaint_request_active_uv(cache, me);
863   mesh_batch_cache_request_surface_batches(cache);
864   return cache->surface_per_mat;
865 }
866 
DRW_mesh_batch_cache_get_surface_texpaint_single(Mesh * me)867 GPUBatch *DRW_mesh_batch_cache_get_surface_texpaint_single(Mesh *me)
868 {
869   MeshBatchCache *cache = mesh_batch_cache_get(me);
870   texpaint_request_active_uv(cache, me);
871   mesh_batch_cache_request_surface_batches(cache);
872   return cache->batch.surface;
873 }
874 
DRW_mesh_batch_cache_get_surface_vertpaint(Mesh * me)875 GPUBatch *DRW_mesh_batch_cache_get_surface_vertpaint(Mesh *me)
876 {
877   MeshBatchCache *cache = mesh_batch_cache_get(me);
878   texpaint_request_active_vcol(cache, me);
879   mesh_batch_cache_request_surface_batches(cache);
880   return cache->batch.surface;
881 }
882 
DRW_mesh_batch_cache_get_surface_sculpt(Mesh * me)883 GPUBatch *DRW_mesh_batch_cache_get_surface_sculpt(Mesh *me)
884 {
885   MeshBatchCache *cache = mesh_batch_cache_get(me);
886   sculpt_request_active_vcol(cache, me);
887   mesh_batch_cache_request_surface_batches(cache);
888   return cache->batch.surface;
889 }
890 
DRW_mesh_material_count_get(Mesh * me)891 int DRW_mesh_material_count_get(Mesh *me)
892 {
893   return mesh_render_mat_len_get(me);
894 }
895 
DRW_mesh_batch_cache_get_sculpt_overlays(Mesh * me)896 GPUBatch *DRW_mesh_batch_cache_get_sculpt_overlays(Mesh *me)
897 {
898   MeshBatchCache *cache = mesh_batch_cache_get(me);
899 
900   cache->cd_needed.sculpt_overlays = 1;
901   mesh_batch_cache_add_request(cache, MBC_SCULPT_OVERLAYS);
902   DRW_batch_request(&cache->batch.sculpt_overlays);
903 
904   return cache->batch.sculpt_overlays;
905 }
906 
907 /** \} */
908 
909 /* ---------------------------------------------------------------------- */
910 /** \name Edit Mode API
911  * \{ */
912 
DRW_mesh_batch_cache_pos_vertbuf_get(Mesh * me)913 GPUVertBuf *DRW_mesh_batch_cache_pos_vertbuf_get(Mesh *me)
914 {
915   MeshBatchCache *cache = mesh_batch_cache_get(me);
916   /* Request surface to trigger the vbo filling. Otherwise it may do nothing. */
917   mesh_batch_cache_request_surface_batches(cache);
918 
919   DRW_vbo_request(NULL, &cache->final.vbo.pos_nor);
920   return cache->final.vbo.pos_nor;
921 }
922 
923 /** \} */
924 
925 /* ---------------------------------------------------------------------- */
926 /** \name Edit Mode API
927  * \{ */
928 
DRW_mesh_batch_cache_get_edit_triangles(Mesh * me)929 GPUBatch *DRW_mesh_batch_cache_get_edit_triangles(Mesh *me)
930 {
931   MeshBatchCache *cache = mesh_batch_cache_get(me);
932   mesh_batch_cache_add_request(cache, MBC_EDIT_TRIANGLES);
933   return DRW_batch_request(&cache->batch.edit_triangles);
934 }
935 
DRW_mesh_batch_cache_get_edit_edges(Mesh * me)936 GPUBatch *DRW_mesh_batch_cache_get_edit_edges(Mesh *me)
937 {
938   MeshBatchCache *cache = mesh_batch_cache_get(me);
939   mesh_batch_cache_add_request(cache, MBC_EDIT_EDGES);
940   return DRW_batch_request(&cache->batch.edit_edges);
941 }
942 
DRW_mesh_batch_cache_get_edit_vertices(Mesh * me)943 GPUBatch *DRW_mesh_batch_cache_get_edit_vertices(Mesh *me)
944 {
945   MeshBatchCache *cache = mesh_batch_cache_get(me);
946   mesh_batch_cache_add_request(cache, MBC_EDIT_VERTICES);
947   return DRW_batch_request(&cache->batch.edit_vertices);
948 }
949 
DRW_mesh_batch_cache_get_edit_vnors(Mesh * me)950 GPUBatch *DRW_mesh_batch_cache_get_edit_vnors(Mesh *me)
951 {
952   MeshBatchCache *cache = mesh_batch_cache_get(me);
953   mesh_batch_cache_add_request(cache, MBC_EDIT_VNOR);
954   return DRW_batch_request(&cache->batch.edit_vnor);
955 }
956 
DRW_mesh_batch_cache_get_edit_lnors(Mesh * me)957 GPUBatch *DRW_mesh_batch_cache_get_edit_lnors(Mesh *me)
958 {
959   MeshBatchCache *cache = mesh_batch_cache_get(me);
960   mesh_batch_cache_add_request(cache, MBC_EDIT_LNOR);
961   return DRW_batch_request(&cache->batch.edit_lnor);
962 }
963 
DRW_mesh_batch_cache_get_edit_facedots(Mesh * me)964 GPUBatch *DRW_mesh_batch_cache_get_edit_facedots(Mesh *me)
965 {
966   MeshBatchCache *cache = mesh_batch_cache_get(me);
967   mesh_batch_cache_add_request(cache, MBC_EDIT_FACEDOTS);
968   return DRW_batch_request(&cache->batch.edit_fdots);
969 }
970 
DRW_mesh_batch_cache_get_edit_skin_roots(Mesh * me)971 GPUBatch *DRW_mesh_batch_cache_get_edit_skin_roots(Mesh *me)
972 {
973   MeshBatchCache *cache = mesh_batch_cache_get(me);
974   mesh_batch_cache_add_request(cache, MBC_SKIN_ROOTS);
975   return DRW_batch_request(&cache->batch.edit_skin_roots);
976 }
977 
978 /** \} */
979 
980 /* ---------------------------------------------------------------------- */
981 /** \name Edit Mode selection API
982  * \{ */
983 
DRW_mesh_batch_cache_get_triangles_with_select_id(Mesh * me)984 GPUBatch *DRW_mesh_batch_cache_get_triangles_with_select_id(Mesh *me)
985 {
986   MeshBatchCache *cache = mesh_batch_cache_get(me);
987   mesh_batch_cache_add_request(cache, MBC_EDIT_SELECTION_FACES);
988   return DRW_batch_request(&cache->batch.edit_selection_faces);
989 }
990 
DRW_mesh_batch_cache_get_facedots_with_select_id(Mesh * me)991 GPUBatch *DRW_mesh_batch_cache_get_facedots_with_select_id(Mesh *me)
992 {
993   MeshBatchCache *cache = mesh_batch_cache_get(me);
994   mesh_batch_cache_add_request(cache, MBC_EDIT_SELECTION_FACEDOTS);
995   return DRW_batch_request(&cache->batch.edit_selection_fdots);
996 }
997 
DRW_mesh_batch_cache_get_edges_with_select_id(Mesh * me)998 GPUBatch *DRW_mesh_batch_cache_get_edges_with_select_id(Mesh *me)
999 {
1000   MeshBatchCache *cache = mesh_batch_cache_get(me);
1001   mesh_batch_cache_add_request(cache, MBC_EDIT_SELECTION_EDGES);
1002   return DRW_batch_request(&cache->batch.edit_selection_edges);
1003 }
1004 
DRW_mesh_batch_cache_get_verts_with_select_id(Mesh * me)1005 GPUBatch *DRW_mesh_batch_cache_get_verts_with_select_id(Mesh *me)
1006 {
1007   MeshBatchCache *cache = mesh_batch_cache_get(me);
1008   mesh_batch_cache_add_request(cache, MBC_EDIT_SELECTION_VERTS);
1009   return DRW_batch_request(&cache->batch.edit_selection_verts);
1010 }
1011 
1012 /** \} */
1013 
1014 /* ---------------------------------------------------------------------- */
1015 /** \name UV Image editor API
1016  * \{ */
1017 
edituv_request_active_uv(MeshBatchCache * cache,Mesh * me)1018 static void edituv_request_active_uv(MeshBatchCache *cache, Mesh *me)
1019 {
1020   DRW_MeshCDMask cd_needed;
1021   mesh_cd_layers_type_clear(&cd_needed);
1022   mesh_cd_calc_active_uv_layer(me, &cd_needed);
1023   mesh_cd_calc_edit_uv_layer(me, &cd_needed);
1024 
1025   BLI_assert(cd_needed.edit_uv != 0 &&
1026              "No uv layer available in edituv, but batches requested anyway!");
1027 
1028   mesh_cd_calc_active_mask_uv_layer(me, &cd_needed);
1029   mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
1030 }
1031 
1032 /* Creates the GPUBatch for drawing the UV Stretching Area Overlay.
1033  * Optional retrieves the total area or total uv area of the mesh.
1034  *
1035  * The `cache->tot_area` and cache->tot_uv_area` update are calculation are
1036  * only valid after calling `DRW_mesh_batch_cache_create_requested`. */
DRW_mesh_batch_cache_get_edituv_faces_stretch_area(Mesh * me,float ** tot_area,float ** tot_uv_area)1037 GPUBatch *DRW_mesh_batch_cache_get_edituv_faces_stretch_area(Mesh *me,
1038                                                              float **tot_area,
1039                                                              float **tot_uv_area)
1040 {
1041   MeshBatchCache *cache = mesh_batch_cache_get(me);
1042   edituv_request_active_uv(cache, me);
1043   mesh_batch_cache_add_request(cache, MBC_EDITUV_FACES_STRETCH_AREA);
1044 
1045   if (tot_area != NULL) {
1046     *tot_area = &cache->tot_area;
1047   }
1048   if (tot_uv_area != NULL) {
1049     *tot_uv_area = &cache->tot_uv_area;
1050   }
1051   return DRW_batch_request(&cache->batch.edituv_faces_stretch_area);
1052 }
1053 
DRW_mesh_batch_cache_get_edituv_faces_stretch_angle(Mesh * me)1054 GPUBatch *DRW_mesh_batch_cache_get_edituv_faces_stretch_angle(Mesh *me)
1055 {
1056   MeshBatchCache *cache = mesh_batch_cache_get(me);
1057   edituv_request_active_uv(cache, me);
1058   mesh_batch_cache_add_request(cache, MBC_EDITUV_FACES_STRETCH_ANGLE);
1059   return DRW_batch_request(&cache->batch.edituv_faces_stretch_angle);
1060 }
1061 
DRW_mesh_batch_cache_get_edituv_faces(Mesh * me)1062 GPUBatch *DRW_mesh_batch_cache_get_edituv_faces(Mesh *me)
1063 {
1064   MeshBatchCache *cache = mesh_batch_cache_get(me);
1065   edituv_request_active_uv(cache, me);
1066   mesh_batch_cache_add_request(cache, MBC_EDITUV_FACES);
1067   return DRW_batch_request(&cache->batch.edituv_faces);
1068 }
1069 
DRW_mesh_batch_cache_get_edituv_edges(Mesh * me)1070 GPUBatch *DRW_mesh_batch_cache_get_edituv_edges(Mesh *me)
1071 {
1072   MeshBatchCache *cache = mesh_batch_cache_get(me);
1073   edituv_request_active_uv(cache, me);
1074   mesh_batch_cache_add_request(cache, MBC_EDITUV_EDGES);
1075   return DRW_batch_request(&cache->batch.edituv_edges);
1076 }
1077 
DRW_mesh_batch_cache_get_edituv_verts(Mesh * me)1078 GPUBatch *DRW_mesh_batch_cache_get_edituv_verts(Mesh *me)
1079 {
1080   MeshBatchCache *cache = mesh_batch_cache_get(me);
1081   edituv_request_active_uv(cache, me);
1082   mesh_batch_cache_add_request(cache, MBC_EDITUV_VERTS);
1083   return DRW_batch_request(&cache->batch.edituv_verts);
1084 }
1085 
DRW_mesh_batch_cache_get_edituv_facedots(Mesh * me)1086 GPUBatch *DRW_mesh_batch_cache_get_edituv_facedots(Mesh *me)
1087 {
1088   MeshBatchCache *cache = mesh_batch_cache_get(me);
1089   edituv_request_active_uv(cache, me);
1090   mesh_batch_cache_add_request(cache, MBC_EDITUV_FACEDOTS);
1091   return DRW_batch_request(&cache->batch.edituv_fdots);
1092 }
1093 
DRW_mesh_batch_cache_get_uv_edges(Mesh * me)1094 GPUBatch *DRW_mesh_batch_cache_get_uv_edges(Mesh *me)
1095 {
1096   MeshBatchCache *cache = mesh_batch_cache_get(me);
1097   edituv_request_active_uv(cache, me);
1098   mesh_batch_cache_add_request(cache, MBC_WIRE_LOOPS_UVS);
1099   return DRW_batch_request(&cache->batch.wire_loops_uvs);
1100 }
1101 
DRW_mesh_batch_cache_get_surface_edges(Mesh * me)1102 GPUBatch *DRW_mesh_batch_cache_get_surface_edges(Mesh *me)
1103 {
1104   MeshBatchCache *cache = mesh_batch_cache_get(me);
1105   texpaint_request_active_uv(cache, me);
1106   mesh_batch_cache_add_request(cache, MBC_WIRE_LOOPS);
1107   return DRW_batch_request(&cache->batch.wire_loops);
1108 }
1109 
1110 /** \} */
1111 
1112 /* ---------------------------------------------------------------------- */
1113 /** \name Grouped batch generation
1114  * \{ */
1115 
1116 /* Thread safety need to be assured by caller. Don't call this during drawing.
1117  * Note: For now this only free the shading batches / vbo if any cd layers is
1118  * not needed anymore. */
DRW_mesh_batch_cache_free_old(Mesh * me,int ctime)1119 void DRW_mesh_batch_cache_free_old(Mesh *me, int ctime)
1120 {
1121   MeshBatchCache *cache = me->runtime.batch_cache;
1122 
1123   if (cache == NULL) {
1124     return;
1125   }
1126 
1127   if (mesh_cd_layers_type_equal(cache->cd_used_over_time, cache->cd_used)) {
1128     cache->lastmatch = ctime;
1129   }
1130 
1131   if (ctime - cache->lastmatch > U.vbotimeout) {
1132     mesh_batch_cache_discard_shaded_tri(cache);
1133   }
1134 
1135   mesh_cd_layers_type_clear(&cache->cd_used_over_time);
1136 }
1137 
1138 #ifdef DEBUG
1139 /* Sanity check function to test if all requested batches are available. */
drw_mesh_batch_cache_check_available(struct TaskGraph * task_graph,Mesh * me)1140 static void drw_mesh_batch_cache_check_available(struct TaskGraph *task_graph, Mesh *me)
1141 {
1142   MeshBatchCache *cache = mesh_batch_cache_get(me);
1143   /* Make sure all requested batches have been setup. */
1144   /* Note: The next line creates a different scheduling than during release builds what can lead to
1145    * some issues (See T77867 where we needed to disable this function in order to debug what was
1146    * happening in release builds). */
1147   BLI_task_graph_work_and_wait(task_graph);
1148   for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); i++) {
1149     BLI_assert(!DRW_batch_requested(((GPUBatch **)&cache->batch)[i], 0));
1150   }
1151   for (int i = 0; i < sizeof(cache->final.vbo) / sizeof(void *); i++) {
1152     BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->final.vbo)[i]));
1153   }
1154   for (int i = 0; i < sizeof(cache->final.ibo) / sizeof(void *); i++) {
1155     BLI_assert(!DRW_ibo_requested(((GPUIndexBuf **)&cache->final.ibo)[i]));
1156   }
1157   for (int i = 0; i < sizeof(cache->cage.vbo) / sizeof(void *); i++) {
1158     BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->cage.vbo)[i]));
1159   }
1160   for (int i = 0; i < sizeof(cache->cage.ibo) / sizeof(void *); i++) {
1161     BLI_assert(!DRW_ibo_requested(((GPUIndexBuf **)&cache->cage.ibo)[i]));
1162   }
1163   for (int i = 0; i < sizeof(cache->uv_cage.vbo) / sizeof(void *); i++) {
1164     BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->uv_cage.vbo)[i]));
1165   }
1166   for (int i = 0; i < sizeof(cache->uv_cage.ibo) / sizeof(void *); i++) {
1167     BLI_assert(!DRW_ibo_requested(((GPUIndexBuf **)&cache->uv_cage.ibo)[i]));
1168   }
1169 }
1170 #endif
1171 
1172 /* Can be called for any surface type. Mesh *me is the final mesh. */
DRW_mesh_batch_cache_create_requested(struct TaskGraph * task_graph,Object * ob,Mesh * me,const Scene * scene,const bool is_paint_mode,const bool use_hide)1173 void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
1174                                            Object *ob,
1175                                            Mesh *me,
1176                                            const Scene *scene,
1177                                            const bool is_paint_mode,
1178                                            const bool use_hide)
1179 {
1180   BLI_assert(task_graph);
1181   const ToolSettings *ts = NULL;
1182   if (scene) {
1183     ts = scene->toolsettings;
1184   }
1185   MeshBatchCache *cache = mesh_batch_cache_get(me);
1186   bool cd_uv_update = false;
1187 
1188   /* Early out */
1189   if (cache->batch_requested == 0) {
1190 #ifdef DEBUG
1191     drw_mesh_batch_cache_check_available(task_graph, me);
1192 #endif
1193     return;
1194   }
1195 
1196   /* Sanity check. */
1197   if ((me->edit_mesh != NULL) && (ob->mode & OB_MODE_EDIT)) {
1198     BLI_assert(me->edit_mesh->mesh_eval_final != NULL);
1199   }
1200 
1201   /* Don't check `DRW_object_is_in_edit_mode(ob)` here because it means the same mesh
1202    * may draw with edit-mesh data and regular mesh data.
1203    * In this case the custom-data layers used wont always match in `me->runtime.batch_cache`.
1204    * If we want to display regular mesh data, we should have a separate cache for the edit-mesh.
1205    * See T77359. */
1206   const bool is_editmode = (me->edit_mesh != NULL) /* && DRW_object_is_in_edit_mode(ob) */;
1207 
1208   /* This could be set for paint mode too, currently it's only used for edit-mode. */
1209   const bool is_mode_active = is_editmode && DRW_object_is_in_edit_mode(ob);
1210 
1211   DRWBatchFlag batch_requested = cache->batch_requested;
1212   cache->batch_requested = 0;
1213 
1214   if (batch_requested & MBC_SURFACE_WEIGHTS) {
1215     /* Check vertex weights. */
1216     if ((cache->batch.surface_weights != NULL) && (ts != NULL)) {
1217       struct DRW_MeshWeightState wstate;
1218       BLI_assert(ob->type == OB_MESH);
1219       drw_mesh_weight_state_extract(ob, me, ts, is_paint_mode, &wstate);
1220       mesh_batch_cache_check_vertex_group(cache, &wstate);
1221       drw_mesh_weight_state_copy(&cache->weight_state, &wstate);
1222       drw_mesh_weight_state_clear(&wstate);
1223     }
1224   }
1225 
1226   if (batch_requested &
1227       (MBC_SURFACE | MBC_WIRE_LOOPS_UVS | MBC_EDITUV_FACES_STRETCH_AREA |
1228        MBC_EDITUV_FACES_STRETCH_ANGLE | MBC_EDITUV_FACES | MBC_EDITUV_EDGES | MBC_EDITUV_VERTS)) {
1229     /* Modifiers will only generate an orco layer if the mesh is deformed. */
1230     if (cache->cd_needed.orco != 0) {
1231       /* Orco is always extracted from final mesh. */
1232       Mesh *me_final = (me->edit_mesh) ? me->edit_mesh->mesh_eval_final : me;
1233       if (CustomData_get_layer(&me_final->vdata, CD_ORCO) == NULL) {
1234         /* Skip orco calculation */
1235         cache->cd_needed.orco = 0;
1236       }
1237     }
1238 
1239     /* Verify that all surface batches have needed attribute layers.
1240      */
1241     /* TODO(fclem): We could be a bit smarter here and only do it per
1242      * material. */
1243     bool cd_overlap = mesh_cd_layers_type_overlap(cache->cd_used, cache->cd_needed);
1244     if (cd_overlap == false) {
1245       FOREACH_MESH_BUFFER_CACHE (cache, mbuffercache) {
1246         if ((cache->cd_used.uv & cache->cd_needed.uv) != cache->cd_needed.uv) {
1247           GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.uv);
1248           cd_uv_update = true;
1249         }
1250         if ((cache->cd_used.tan & cache->cd_needed.tan) != cache->cd_needed.tan ||
1251             cache->cd_used.tan_orco != cache->cd_needed.tan_orco) {
1252           GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.tan);
1253         }
1254         if (cache->cd_used.orco != cache->cd_needed.orco) {
1255           GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.orco);
1256         }
1257         if (cache->cd_used.sculpt_overlays != cache->cd_needed.sculpt_overlays) {
1258           GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.sculpt_data);
1259         }
1260         if (((cache->cd_used.vcol & cache->cd_needed.vcol) != cache->cd_needed.vcol) ||
1261             ((cache->cd_used.sculpt_vcol & cache->cd_needed.sculpt_vcol) !=
1262              cache->cd_needed.sculpt_vcol)) {
1263           GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.vcol);
1264         }
1265       }
1266       /* We can't discard batches at this point as they have been
1267        * referenced for drawing. Just clear them in place. */
1268       for (int i = 0; i < cache->mat_len; i++) {
1269         GPU_BATCH_CLEAR_SAFE(cache->surface_per_mat[i]);
1270       }
1271       GPU_BATCH_CLEAR_SAFE(cache->batch.surface);
1272       cache->batch_ready &= ~(MBC_SURFACE);
1273 
1274       mesh_cd_layers_type_merge(&cache->cd_used, cache->cd_needed);
1275     }
1276     mesh_cd_layers_type_merge(&cache->cd_used_over_time, cache->cd_needed);
1277     mesh_cd_layers_type_clear(&cache->cd_needed);
1278   }
1279 
1280   if (batch_requested & MBC_EDITUV) {
1281     /* Discard UV batches if sync_selection changes */
1282     const bool is_uvsyncsel = ts && (ts->uv_flag & UV_SYNC_SELECTION);
1283     if (cd_uv_update || (cache->is_uvsyncsel != is_uvsyncsel)) {
1284       cache->is_uvsyncsel = is_uvsyncsel;
1285       FOREACH_MESH_BUFFER_CACHE (cache, mbuffercache) {
1286         GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.edituv_data);
1287         GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.fdots_uv);
1288         GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.fdots_edituv_data);
1289         GPU_INDEXBUF_DISCARD_SAFE(mbuffercache->ibo.edituv_tris);
1290         GPU_INDEXBUF_DISCARD_SAFE(mbuffercache->ibo.edituv_lines);
1291         GPU_INDEXBUF_DISCARD_SAFE(mbuffercache->ibo.edituv_points);
1292         GPU_INDEXBUF_DISCARD_SAFE(mbuffercache->ibo.edituv_fdots);
1293       }
1294       /* We only clear the batches as they may already have been
1295        * referenced. */
1296       GPU_BATCH_CLEAR_SAFE(cache->batch.wire_loops_uvs);
1297       GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_faces_stretch_area);
1298       GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_faces_stretch_angle);
1299       GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_faces);
1300       GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_edges);
1301       GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_verts);
1302       GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_fdots);
1303       cache->batch_ready &= ~MBC_EDITUV;
1304     }
1305   }
1306 
1307   /* Second chance to early out */
1308   if ((batch_requested & ~cache->batch_ready) == 0) {
1309 #ifdef DEBUG
1310     drw_mesh_batch_cache_check_available(task_graph, me);
1311 #endif
1312     return;
1313   }
1314 
1315   /* TODO(pablodp606): This always updates the sculpt normals for regular drawing (non-PBVH).
1316    * This makes tools that sample the surface per step get wrong normals until a redraw happens.
1317    * Normal updates should be part of the brush loop and only run during the stroke when the
1318    * brush needs to sample the surface. The drawing code should only update the normals
1319    * per redraw when smooth shading is enabled. */
1320   const bool do_update_sculpt_normals = ob->sculpt && ob->sculpt->pbvh;
1321   if (do_update_sculpt_normals) {
1322     Mesh *mesh = ob->data;
1323     BKE_pbvh_update_normals(ob->sculpt->pbvh, mesh->runtime.subdiv_ccg);
1324   }
1325 
1326   cache->batch_ready |= batch_requested;
1327 
1328   const bool do_cage = (is_editmode &&
1329                         (me->edit_mesh->mesh_eval_final != me->edit_mesh->mesh_eval_cage));
1330 
1331   const bool do_uvcage = is_editmode && !me->edit_mesh->mesh_eval_final->runtime.is_original;
1332 
1333   MeshBufferCache *mbufcache = &cache->final;
1334 
1335   /* Initialize batches and request VBO's & IBO's. */
1336   if (DRW_batch_requested(cache->batch.surface, GPU_PRIM_TRIS)) {
1337     DRW_ibo_request(cache->batch.surface, &mbufcache->ibo.tris);
1338     /* Order matters. First ones override latest VBO's attributes. */
1339     DRW_vbo_request(cache->batch.surface, &mbufcache->vbo.lnor);
1340     DRW_vbo_request(cache->batch.surface, &mbufcache->vbo.pos_nor);
1341     if (cache->cd_used.uv != 0) {
1342       DRW_vbo_request(cache->batch.surface, &mbufcache->vbo.uv);
1343     }
1344     if (cache->cd_used.vcol != 0 || cache->cd_used.sculpt_vcol != 0) {
1345       DRW_vbo_request(cache->batch.surface, &mbufcache->vbo.vcol);
1346     }
1347   }
1348   if (DRW_batch_requested(cache->batch.all_verts, GPU_PRIM_POINTS)) {
1349     DRW_vbo_request(cache->batch.all_verts, &mbufcache->vbo.pos_nor);
1350   }
1351   if (DRW_batch_requested(cache->batch.sculpt_overlays, GPU_PRIM_TRIS)) {
1352     DRW_ibo_request(cache->batch.sculpt_overlays, &mbufcache->ibo.tris);
1353     DRW_vbo_request(cache->batch.sculpt_overlays, &mbufcache->vbo.pos_nor);
1354     DRW_vbo_request(cache->batch.sculpt_overlays, &mbufcache->vbo.sculpt_data);
1355   }
1356   if (DRW_batch_requested(cache->batch.all_edges, GPU_PRIM_LINES)) {
1357     DRW_ibo_request(cache->batch.all_edges, &mbufcache->ibo.lines);
1358     DRW_vbo_request(cache->batch.all_edges, &mbufcache->vbo.pos_nor);
1359   }
1360   if (DRW_batch_requested(cache->batch.loose_edges, GPU_PRIM_LINES)) {
1361     DRW_ibo_request(NULL, &mbufcache->ibo.lines);
1362     DRW_ibo_request(cache->batch.loose_edges, &mbufcache->ibo.lines_loose);
1363     DRW_vbo_request(cache->batch.loose_edges, &mbufcache->vbo.pos_nor);
1364   }
1365   if (DRW_batch_requested(cache->batch.edge_detection, GPU_PRIM_LINES_ADJ)) {
1366     DRW_ibo_request(cache->batch.edge_detection, &mbufcache->ibo.lines_adjacency);
1367     DRW_vbo_request(cache->batch.edge_detection, &mbufcache->vbo.pos_nor);
1368   }
1369   if (DRW_batch_requested(cache->batch.surface_weights, GPU_PRIM_TRIS)) {
1370     DRW_ibo_request(cache->batch.surface_weights, &mbufcache->ibo.tris);
1371     DRW_vbo_request(cache->batch.surface_weights, &mbufcache->vbo.pos_nor);
1372     DRW_vbo_request(cache->batch.surface_weights, &mbufcache->vbo.weights);
1373   }
1374   if (DRW_batch_requested(cache->batch.wire_loops, GPU_PRIM_LINES)) {
1375     DRW_ibo_request(cache->batch.wire_loops, &mbufcache->ibo.lines_paint_mask);
1376     /* Order matters. First ones override latest VBO's attributes. */
1377     DRW_vbo_request(cache->batch.wire_loops, &mbufcache->vbo.lnor);
1378     DRW_vbo_request(cache->batch.wire_loops, &mbufcache->vbo.pos_nor);
1379   }
1380   if (DRW_batch_requested(cache->batch.wire_edges, GPU_PRIM_LINES)) {
1381     DRW_ibo_request(cache->batch.wire_edges, &mbufcache->ibo.lines);
1382     DRW_vbo_request(cache->batch.wire_edges, &mbufcache->vbo.pos_nor);
1383     DRW_vbo_request(cache->batch.wire_edges, &mbufcache->vbo.edge_fac);
1384   }
1385   if (DRW_batch_requested(cache->batch.wire_loops_uvs, GPU_PRIM_LINES)) {
1386     DRW_ibo_request(cache->batch.wire_loops_uvs, &mbufcache->ibo.edituv_lines);
1387     /* For paint overlay. Active layer should have been queried. */
1388     if (cache->cd_used.uv != 0) {
1389       DRW_vbo_request(cache->batch.wire_loops_uvs, &mbufcache->vbo.uv);
1390     }
1391   }
1392   if (DRW_batch_requested(cache->batch.edit_mesh_analysis, GPU_PRIM_TRIS)) {
1393     DRW_ibo_request(cache->batch.edit_mesh_analysis, &mbufcache->ibo.tris);
1394     DRW_vbo_request(cache->batch.edit_mesh_analysis, &mbufcache->vbo.pos_nor);
1395     DRW_vbo_request(cache->batch.edit_mesh_analysis, &mbufcache->vbo.mesh_analysis);
1396   }
1397 
1398   /* Per Material */
1399   for (int i = 0; i < cache->mat_len; i++) {
1400     if (DRW_batch_requested(cache->surface_per_mat[i], GPU_PRIM_TRIS)) {
1401       DRW_ibo_request(cache->surface_per_mat[i], &mbufcache->tris_per_mat[i]);
1402       /* Order matters. First ones override latest VBO's attributes. */
1403       DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.lnor);
1404       DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.pos_nor);
1405       if (cache->cd_used.uv != 0) {
1406         DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.uv);
1407       }
1408       if ((cache->cd_used.tan != 0) || (cache->cd_used.tan_orco != 0)) {
1409         DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.tan);
1410       }
1411       if (cache->cd_used.vcol != 0 || cache->cd_used.sculpt_vcol != 0) {
1412         DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.vcol);
1413       }
1414       if (cache->cd_used.orco != 0) {
1415         DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.orco);
1416       }
1417     }
1418   }
1419 
1420   mbufcache = (do_cage) ? &cache->cage : &cache->final;
1421 
1422   /* Edit Mesh */
1423   if (DRW_batch_requested(cache->batch.edit_triangles, GPU_PRIM_TRIS)) {
1424     DRW_ibo_request(cache->batch.edit_triangles, &mbufcache->ibo.tris);
1425     DRW_vbo_request(cache->batch.edit_triangles, &mbufcache->vbo.pos_nor);
1426     DRW_vbo_request(cache->batch.edit_triangles, &mbufcache->vbo.edit_data);
1427   }
1428   if (DRW_batch_requested(cache->batch.edit_vertices, GPU_PRIM_POINTS)) {
1429     DRW_ibo_request(cache->batch.edit_vertices, &mbufcache->ibo.points);
1430     DRW_vbo_request(cache->batch.edit_vertices, &mbufcache->vbo.pos_nor);
1431     DRW_vbo_request(cache->batch.edit_vertices, &mbufcache->vbo.edit_data);
1432   }
1433   if (DRW_batch_requested(cache->batch.edit_edges, GPU_PRIM_LINES)) {
1434     DRW_ibo_request(cache->batch.edit_edges, &mbufcache->ibo.lines);
1435     DRW_vbo_request(cache->batch.edit_edges, &mbufcache->vbo.pos_nor);
1436     DRW_vbo_request(cache->batch.edit_edges, &mbufcache->vbo.edit_data);
1437   }
1438   if (DRW_batch_requested(cache->batch.edit_vnor, GPU_PRIM_POINTS)) {
1439     DRW_ibo_request(cache->batch.edit_vnor, &mbufcache->ibo.points);
1440     DRW_vbo_request(cache->batch.edit_vnor, &mbufcache->vbo.pos_nor);
1441   }
1442   if (DRW_batch_requested(cache->batch.edit_lnor, GPU_PRIM_POINTS)) {
1443     DRW_ibo_request(cache->batch.edit_lnor, &mbufcache->ibo.tris);
1444     DRW_vbo_request(cache->batch.edit_lnor, &mbufcache->vbo.pos_nor);
1445     DRW_vbo_request(cache->batch.edit_lnor, &mbufcache->vbo.lnor);
1446   }
1447   if (DRW_batch_requested(cache->batch.edit_fdots, GPU_PRIM_POINTS)) {
1448     DRW_ibo_request(cache->batch.edit_fdots, &mbufcache->ibo.fdots);
1449     DRW_vbo_request(cache->batch.edit_fdots, &mbufcache->vbo.fdots_pos);
1450     DRW_vbo_request(cache->batch.edit_fdots, &mbufcache->vbo.fdots_nor);
1451   }
1452   if (DRW_batch_requested(cache->batch.edit_skin_roots, GPU_PRIM_POINTS)) {
1453     DRW_vbo_request(cache->batch.edit_skin_roots, &mbufcache->vbo.skin_roots);
1454   }
1455 
1456   /* Selection */
1457   if (DRW_batch_requested(cache->batch.edit_selection_verts, GPU_PRIM_POINTS)) {
1458     DRW_ibo_request(cache->batch.edit_selection_verts, &mbufcache->ibo.points);
1459     DRW_vbo_request(cache->batch.edit_selection_verts, &mbufcache->vbo.pos_nor);
1460     DRW_vbo_request(cache->batch.edit_selection_verts, &mbufcache->vbo.vert_idx);
1461   }
1462   if (DRW_batch_requested(cache->batch.edit_selection_edges, GPU_PRIM_LINES)) {
1463     DRW_ibo_request(cache->batch.edit_selection_edges, &mbufcache->ibo.lines);
1464     DRW_vbo_request(cache->batch.edit_selection_edges, &mbufcache->vbo.pos_nor);
1465     DRW_vbo_request(cache->batch.edit_selection_edges, &mbufcache->vbo.edge_idx);
1466   }
1467   if (DRW_batch_requested(cache->batch.edit_selection_faces, GPU_PRIM_TRIS)) {
1468     DRW_ibo_request(cache->batch.edit_selection_faces, &mbufcache->ibo.tris);
1469     DRW_vbo_request(cache->batch.edit_selection_faces, &mbufcache->vbo.pos_nor);
1470     DRW_vbo_request(cache->batch.edit_selection_faces, &mbufcache->vbo.poly_idx);
1471   }
1472   if (DRW_batch_requested(cache->batch.edit_selection_fdots, GPU_PRIM_POINTS)) {
1473     DRW_ibo_request(cache->batch.edit_selection_fdots, &mbufcache->ibo.fdots);
1474     DRW_vbo_request(cache->batch.edit_selection_fdots, &mbufcache->vbo.fdots_pos);
1475     DRW_vbo_request(cache->batch.edit_selection_fdots, &mbufcache->vbo.fdot_idx);
1476   }
1477 
1478   /**
1479    * TODO: The code and data structure is ready to support modified UV display
1480    * but the selection code for UVs needs to support it first. So for now, only
1481    * display the cage in all cases.
1482    */
1483   mbufcache = (do_uvcage) ? &cache->uv_cage : &cache->final;
1484 
1485   /* Edit UV */
1486   if (DRW_batch_requested(cache->batch.edituv_faces, GPU_PRIM_TRIS)) {
1487     DRW_ibo_request(cache->batch.edituv_faces, &mbufcache->ibo.edituv_tris);
1488     DRW_vbo_request(cache->batch.edituv_faces, &mbufcache->vbo.uv);
1489     DRW_vbo_request(cache->batch.edituv_faces, &mbufcache->vbo.edituv_data);
1490   }
1491   if (DRW_batch_requested(cache->batch.edituv_faces_stretch_area, GPU_PRIM_TRIS)) {
1492     DRW_ibo_request(cache->batch.edituv_faces_stretch_area, &mbufcache->ibo.edituv_tris);
1493     DRW_vbo_request(cache->batch.edituv_faces_stretch_area, &mbufcache->vbo.uv);
1494     DRW_vbo_request(cache->batch.edituv_faces_stretch_area, &mbufcache->vbo.edituv_data);
1495     DRW_vbo_request(cache->batch.edituv_faces_stretch_area, &mbufcache->vbo.stretch_area);
1496   }
1497   if (DRW_batch_requested(cache->batch.edituv_faces_stretch_angle, GPU_PRIM_TRIS)) {
1498     DRW_ibo_request(cache->batch.edituv_faces_stretch_angle, &mbufcache->ibo.edituv_tris);
1499     DRW_vbo_request(cache->batch.edituv_faces_stretch_angle, &mbufcache->vbo.uv);
1500     DRW_vbo_request(cache->batch.edituv_faces_stretch_angle, &mbufcache->vbo.edituv_data);
1501     DRW_vbo_request(cache->batch.edituv_faces_stretch_angle, &mbufcache->vbo.stretch_angle);
1502   }
1503   if (DRW_batch_requested(cache->batch.edituv_edges, GPU_PRIM_LINES)) {
1504     DRW_ibo_request(cache->batch.edituv_edges, &mbufcache->ibo.edituv_lines);
1505     DRW_vbo_request(cache->batch.edituv_edges, &mbufcache->vbo.uv);
1506     DRW_vbo_request(cache->batch.edituv_edges, &mbufcache->vbo.edituv_data);
1507   }
1508   if (DRW_batch_requested(cache->batch.edituv_verts, GPU_PRIM_POINTS)) {
1509     DRW_ibo_request(cache->batch.edituv_verts, &mbufcache->ibo.edituv_points);
1510     DRW_vbo_request(cache->batch.edituv_verts, &mbufcache->vbo.uv);
1511     DRW_vbo_request(cache->batch.edituv_verts, &mbufcache->vbo.edituv_data);
1512   }
1513   if (DRW_batch_requested(cache->batch.edituv_fdots, GPU_PRIM_POINTS)) {
1514     DRW_ibo_request(cache->batch.edituv_fdots, &mbufcache->ibo.edituv_fdots);
1515     DRW_vbo_request(cache->batch.edituv_fdots, &mbufcache->vbo.fdots_uv);
1516     DRW_vbo_request(cache->batch.edituv_fdots, &mbufcache->vbo.fdots_edituv_data);
1517   }
1518 
1519   /* Meh loose Scene const correctness here. */
1520   const bool use_subsurf_fdots = scene ? BKE_modifiers_uses_subsurf_facedots((Scene *)scene, ob) :
1521                                          false;
1522 
1523   if (do_uvcage) {
1524     mesh_buffer_cache_create_requested(task_graph,
1525                                        cache,
1526                                        cache->uv_cage,
1527                                        me,
1528                                        is_editmode,
1529                                        is_paint_mode,
1530                                        is_mode_active,
1531                                        ob->obmat,
1532                                        false,
1533                                        true,
1534                                        false,
1535                                        &cache->cd_used,
1536                                        scene,
1537                                        ts,
1538                                        true);
1539   }
1540 
1541   if (do_cage) {
1542     mesh_buffer_cache_create_requested(task_graph,
1543                                        cache,
1544                                        cache->cage,
1545                                        me,
1546                                        is_editmode,
1547                                        is_paint_mode,
1548                                        is_mode_active,
1549                                        ob->obmat,
1550                                        false,
1551                                        false,
1552                                        use_subsurf_fdots,
1553                                        &cache->cd_used,
1554                                        scene,
1555                                        ts,
1556                                        true);
1557   }
1558 
1559   mesh_buffer_cache_create_requested(task_graph,
1560                                      cache,
1561                                      cache->final,
1562                                      me,
1563                                      is_editmode,
1564                                      is_paint_mode,
1565                                      is_mode_active,
1566                                      ob->obmat,
1567                                      true,
1568                                      false,
1569                                      use_subsurf_fdots,
1570                                      &cache->cd_used,
1571                                      scene,
1572                                      ts,
1573                                      use_hide);
1574 
1575   /* Ensure that all requested batches have finished.
1576    * Ideally we want to remove this sync, but there are cases where this doesn't work.
1577    * See T79038 for example.
1578    *
1579    * An idea to improve this is to separate the Object mode from the edit mode draw caches. And
1580    * based on the mode the correct one will be updated. Other option is to look into using
1581    * drw_batch_cache_generate_requested_delayed. */
1582   BLI_task_graph_work_and_wait(task_graph);
1583 #ifdef DEBUG
1584   drw_mesh_batch_cache_check_available(task_graph, me);
1585 #endif
1586 }
1587 
1588 /** \} */
1589