1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  */
16 
17 /** \file
18  * \ingroup bmesh
19  *
20  * BM mesh level functions.
21  */
22 
23 #include "MEM_guardedalloc.h"
24 
25 #include "DNA_listBase.h"
26 #include "DNA_scene_types.h"
27 
28 #include "BLI_bitmap.h"
29 #include "BLI_linklist_stack.h"
30 #include "BLI_listbase.h"
31 #include "BLI_math.h"
32 #include "BLI_stack.h"
33 #include "BLI_task.h"
34 #include "BLI_utildefines.h"
35 
36 #include "BKE_editmesh.h"
37 #include "BKE_global.h"
38 #include "BKE_mesh.h"
39 #include "BKE_multires.h"
40 
41 #include "atomic_ops.h"
42 
43 #include "intern/bmesh_private.h"
44 
45 /* used as an extern, defined in bmesh.h */
46 const BMAllocTemplate bm_mesh_allocsize_default = {512, 1024, 2048, 512};
47 const BMAllocTemplate bm_mesh_chunksize_default = {512, 1024, 2048, 512};
48 
bm_mempool_init_ex(const BMAllocTemplate * allocsize,const bool use_toolflags,BLI_mempool ** r_vpool,BLI_mempool ** r_epool,BLI_mempool ** r_lpool,BLI_mempool ** r_fpool)49 static void bm_mempool_init_ex(const BMAllocTemplate *allocsize,
50                                const bool use_toolflags,
51                                BLI_mempool **r_vpool,
52                                BLI_mempool **r_epool,
53                                BLI_mempool **r_lpool,
54                                BLI_mempool **r_fpool)
55 {
56   size_t vert_size, edge_size, loop_size, face_size;
57 
58   if (use_toolflags == true) {
59     vert_size = sizeof(BMVert_OFlag);
60     edge_size = sizeof(BMEdge_OFlag);
61     loop_size = sizeof(BMLoop);
62     face_size = sizeof(BMFace_OFlag);
63   }
64   else {
65     vert_size = sizeof(BMVert);
66     edge_size = sizeof(BMEdge);
67     loop_size = sizeof(BMLoop);
68     face_size = sizeof(BMFace);
69   }
70 
71   if (r_vpool) {
72     *r_vpool = BLI_mempool_create(
73         vert_size, allocsize->totvert, bm_mesh_chunksize_default.totvert, BLI_MEMPOOL_ALLOW_ITER);
74   }
75   if (r_epool) {
76     *r_epool = BLI_mempool_create(
77         edge_size, allocsize->totedge, bm_mesh_chunksize_default.totedge, BLI_MEMPOOL_ALLOW_ITER);
78   }
79   if (r_lpool) {
80     *r_lpool = BLI_mempool_create(
81         loop_size, allocsize->totloop, bm_mesh_chunksize_default.totloop, BLI_MEMPOOL_NOP);
82   }
83   if (r_fpool) {
84     *r_fpool = BLI_mempool_create(
85         face_size, allocsize->totface, bm_mesh_chunksize_default.totface, BLI_MEMPOOL_ALLOW_ITER);
86   }
87 }
88 
bm_mempool_init(BMesh * bm,const BMAllocTemplate * allocsize,const bool use_toolflags)89 static void bm_mempool_init(BMesh *bm, const BMAllocTemplate *allocsize, const bool use_toolflags)
90 {
91   bm_mempool_init_ex(allocsize, use_toolflags, &bm->vpool, &bm->epool, &bm->lpool, &bm->fpool);
92 
93 #ifdef USE_BMESH_HOLES
94   bm->looplistpool = BLI_mempool_create(sizeof(BMLoopList), 512, 512, BLI_MEMPOOL_NOP);
95 #endif
96 }
97 
BM_mesh_elem_toolflags_ensure(BMesh * bm)98 void BM_mesh_elem_toolflags_ensure(BMesh *bm)
99 {
100   BLI_assert(bm->use_toolflags);
101 
102   if (bm->vtoolflagpool && bm->etoolflagpool && bm->ftoolflagpool) {
103     return;
104   }
105 
106   bm->vtoolflagpool = BLI_mempool_create(sizeof(BMFlagLayer), bm->totvert, 512, BLI_MEMPOOL_NOP);
107   bm->etoolflagpool = BLI_mempool_create(sizeof(BMFlagLayer), bm->totedge, 512, BLI_MEMPOOL_NOP);
108   bm->ftoolflagpool = BLI_mempool_create(sizeof(BMFlagLayer), bm->totface, 512, BLI_MEMPOOL_NOP);
109 
110   BMIter iter;
111   BMVert_OFlag *v_olfag;
112   BLI_mempool *toolflagpool = bm->vtoolflagpool;
113   BM_ITER_MESH (v_olfag, &iter, bm, BM_VERTS_OF_MESH) {
114     v_olfag->oflags = BLI_mempool_calloc(toolflagpool);
115   }
116 
117   BMEdge_OFlag *e_olfag;
118   toolflagpool = bm->etoolflagpool;
119   BM_ITER_MESH (e_olfag, &iter, bm, BM_EDGES_OF_MESH) {
120     e_olfag->oflags = BLI_mempool_calloc(toolflagpool);
121   }
122 
123   BMFace_OFlag *f_olfag;
124   toolflagpool = bm->ftoolflagpool;
125   BM_ITER_MESH (f_olfag, &iter, bm, BM_FACES_OF_MESH) {
126     f_olfag->oflags = BLI_mempool_calloc(toolflagpool);
127   }
128 
129   bm->totflags = 1;
130 }
131 
BM_mesh_elem_toolflags_clear(BMesh * bm)132 void BM_mesh_elem_toolflags_clear(BMesh *bm)
133 {
134   if (bm->vtoolflagpool) {
135     BLI_mempool_destroy(bm->vtoolflagpool);
136     bm->vtoolflagpool = NULL;
137   }
138   if (bm->etoolflagpool) {
139     BLI_mempool_destroy(bm->etoolflagpool);
140     bm->etoolflagpool = NULL;
141   }
142   if (bm->ftoolflagpool) {
143     BLI_mempool_destroy(bm->ftoolflagpool);
144     bm->ftoolflagpool = NULL;
145   }
146 }
147 
148 /**
149  * \brief BMesh Make Mesh
150  *
151  * Allocates a new BMesh structure.
152  *
153  * \return The New bmesh
154  *
155  * \note ob is needed by multires
156  */
BM_mesh_create(const BMAllocTemplate * allocsize,const struct BMeshCreateParams * params)157 BMesh *BM_mesh_create(const BMAllocTemplate *allocsize, const struct BMeshCreateParams *params)
158 {
159   /* allocate the structure */
160   BMesh *bm = MEM_callocN(sizeof(BMesh), __func__);
161 
162   /* allocate the memory pools for the mesh elements */
163   bm_mempool_init(bm, allocsize, params->use_toolflags);
164 
165   /* allocate one flag pool that we don't get rid of. */
166   bm->use_toolflags = params->use_toolflags;
167   bm->toolflag_index = 0;
168   bm->totflags = 0;
169 
170   CustomData_reset(&bm->vdata);
171   CustomData_reset(&bm->edata);
172   CustomData_reset(&bm->ldata);
173   CustomData_reset(&bm->pdata);
174 
175   return bm;
176 }
177 
178 /**
179  * \brief BMesh Free Mesh Data
180  *
181  * Frees a BMesh structure.
182  *
183  * \note frees mesh, but not actual BMesh struct
184  */
BM_mesh_data_free(BMesh * bm)185 void BM_mesh_data_free(BMesh *bm)
186 {
187   BMVert *v;
188   BMEdge *e;
189   BMLoop *l;
190   BMFace *f;
191 
192   BMIter iter;
193   BMIter itersub;
194 
195   const bool is_ldata_free = CustomData_bmesh_has_free(&bm->ldata);
196   const bool is_pdata_free = CustomData_bmesh_has_free(&bm->pdata);
197 
198   /* Check if we have to call free, if not we can avoid a lot of looping */
199   if (CustomData_bmesh_has_free(&(bm->vdata))) {
200     BM_ITER_MESH (v, &iter, bm, BM_VERTS_OF_MESH) {
201       CustomData_bmesh_free_block(&(bm->vdata), &(v->head.data));
202     }
203   }
204   if (CustomData_bmesh_has_free(&(bm->edata))) {
205     BM_ITER_MESH (e, &iter, bm, BM_EDGES_OF_MESH) {
206       CustomData_bmesh_free_block(&(bm->edata), &(e->head.data));
207     }
208   }
209 
210   if (is_ldata_free || is_pdata_free) {
211     BM_ITER_MESH (f, &iter, bm, BM_FACES_OF_MESH) {
212       if (is_pdata_free) {
213         CustomData_bmesh_free_block(&(bm->pdata), &(f->head.data));
214       }
215       if (is_ldata_free) {
216         BM_ITER_ELEM (l, &itersub, f, BM_LOOPS_OF_FACE) {
217           CustomData_bmesh_free_block(&(bm->ldata), &(l->head.data));
218         }
219       }
220     }
221   }
222 
223   /* Free custom data pools, This should probably go in CustomData_free? */
224   if (bm->vdata.totlayer) {
225     BLI_mempool_destroy(bm->vdata.pool);
226   }
227   if (bm->edata.totlayer) {
228     BLI_mempool_destroy(bm->edata.pool);
229   }
230   if (bm->ldata.totlayer) {
231     BLI_mempool_destroy(bm->ldata.pool);
232   }
233   if (bm->pdata.totlayer) {
234     BLI_mempool_destroy(bm->pdata.pool);
235   }
236 
237   /* free custom data */
238   CustomData_free(&bm->vdata, 0);
239   CustomData_free(&bm->edata, 0);
240   CustomData_free(&bm->ldata, 0);
241   CustomData_free(&bm->pdata, 0);
242 
243   /* destroy element pools */
244   BLI_mempool_destroy(bm->vpool);
245   BLI_mempool_destroy(bm->epool);
246   BLI_mempool_destroy(bm->lpool);
247   BLI_mempool_destroy(bm->fpool);
248 
249   if (bm->vtable) {
250     MEM_freeN(bm->vtable);
251   }
252   if (bm->etable) {
253     MEM_freeN(bm->etable);
254   }
255   if (bm->ftable) {
256     MEM_freeN(bm->ftable);
257   }
258 
259   /* destroy flag pool */
260   BM_mesh_elem_toolflags_clear(bm);
261 
262 #ifdef USE_BMESH_HOLES
263   BLI_mempool_destroy(bm->looplistpool);
264 #endif
265 
266   BLI_freelistN(&bm->selected);
267 
268   if (bm->lnor_spacearr) {
269     BKE_lnor_spacearr_free(bm->lnor_spacearr);
270     MEM_freeN(bm->lnor_spacearr);
271   }
272 
273   BMO_error_clear(bm);
274 }
275 
276 /**
277  * \brief BMesh Clear Mesh
278  *
279  * Clear all data in bm
280  */
BM_mesh_clear(BMesh * bm)281 void BM_mesh_clear(BMesh *bm)
282 {
283   const bool use_toolflags = bm->use_toolflags;
284 
285   /* free old mesh */
286   BM_mesh_data_free(bm);
287   memset(bm, 0, sizeof(BMesh));
288 
289   /* allocate the memory pools for the mesh elements */
290   bm_mempool_init(bm, &bm_mesh_allocsize_default, use_toolflags);
291 
292   bm->use_toolflags = use_toolflags;
293   bm->toolflag_index = 0;
294   bm->totflags = 0;
295 
296   CustomData_reset(&bm->vdata);
297   CustomData_reset(&bm->edata);
298   CustomData_reset(&bm->ldata);
299   CustomData_reset(&bm->pdata);
300 }
301 
302 /**
303  * \brief BMesh Free Mesh
304  *
305  * Frees a BMesh data and its structure.
306  */
BM_mesh_free(BMesh * bm)307 void BM_mesh_free(BMesh *bm)
308 {
309   BM_mesh_data_free(bm);
310 
311   if (bm->py_handle) {
312     /* keep this out of 'BM_mesh_data_free' because we want python
313      * to be able to clear the mesh and maintain access. */
314     bpy_bm_generic_invalidate(bm->py_handle);
315     bm->py_handle = NULL;
316   }
317 
318   MEM_freeN(bm);
319 }
320 
321 /**
322  * Helpers for #BM_mesh_normals_update and #BM_verts_calc_normal_vcos
323  */
324 
325 /* We use that existing internal API flag,
326  * assuming no other tool using it would run concurrently to clnors editing. */
327 #define BM_LNORSPACE_UPDATE _FLAG_MF
328 
329 typedef struct BMEdgesCalcVectorsData {
330   /* Read-only data. */
331   const float (*vcos)[3];
332 
333   /* Read-write data, but no need to protect it, no concurrency to fear here. */
334   float (*edgevec)[3];
335 } BMEdgesCalcVectorsData;
336 
mesh_edges_calc_vectors_cb(void * userdata,MempoolIterData * mp_e)337 static void mesh_edges_calc_vectors_cb(void *userdata, MempoolIterData *mp_e)
338 {
339   BMEdgesCalcVectorsData *data = userdata;
340   BMEdge *e = (BMEdge *)mp_e;
341 
342   if (e->l) {
343     const float *v1_co = data->vcos ? data->vcos[BM_elem_index_get(e->v1)] : e->v1->co;
344     const float *v2_co = data->vcos ? data->vcos[BM_elem_index_get(e->v2)] : e->v2->co;
345     sub_v3_v3v3(data->edgevec[BM_elem_index_get(e)], v2_co, v1_co);
346     normalize_v3(data->edgevec[BM_elem_index_get(e)]);
347   }
348   else {
349     /* the edge vector will not be needed when the edge has no radial */
350   }
351 }
352 
bm_mesh_edges_calc_vectors(BMesh * bm,float (* edgevec)[3],const float (* vcos)[3])353 static void bm_mesh_edges_calc_vectors(BMesh *bm, float (*edgevec)[3], const float (*vcos)[3])
354 {
355   BM_mesh_elem_index_ensure(bm, BM_EDGE | (vcos ? BM_VERT : 0));
356 
357   BMEdgesCalcVectorsData data = {
358       .vcos = vcos,
359       .edgevec = edgevec,
360   };
361 
362   BM_iter_parallel(
363       bm, BM_EDGES_OF_MESH, mesh_edges_calc_vectors_cb, &data, bm->totedge >= BM_OMP_LIMIT);
364 }
365 
366 typedef struct BMVertsCalcNormalsData {
367   /* Read-only data. */
368   const float (*fnos)[3];
369   const float (*edgevec)[3];
370   const float (*vcos)[3];
371 
372   /* Read-write data, protected by an atomic-based fake spin-lock like system. */
373   float (*vnos)[3];
374 } BMVertsCalcNormalsData;
375 
mesh_verts_calc_normals_accum_cb(void * userdata,MempoolIterData * mp_f)376 static void mesh_verts_calc_normals_accum_cb(void *userdata, MempoolIterData *mp_f)
377 {
378 #define FLT_EQ_NONAN(_fa, _fb) (*((const uint32_t *)&_fa) == *((const uint32_t *)&_fb))
379 
380   BMVertsCalcNormalsData *data = userdata;
381   BMFace *f = (BMFace *)mp_f;
382 
383   const float *f_no = data->fnos ? data->fnos[BM_elem_index_get(f)] : f->no;
384 
385   BMLoop *l_first, *l_iter;
386   l_iter = l_first = BM_FACE_FIRST_LOOP(f);
387   do {
388     const float *e1diff, *e2diff;
389     float dotprod;
390     float fac;
391 
392     /* calculate the dot product of the two edges that
393      * meet at the loop's vertex */
394     e1diff = data->edgevec[BM_elem_index_get(l_iter->prev->e)];
395     e2diff = data->edgevec[BM_elem_index_get(l_iter->e)];
396     dotprod = dot_v3v3(e1diff, e2diff);
397 
398     /* edge vectors are calculated from e->v1 to e->v2, so
399      * adjust the dot product if one but not both loops
400      * actually runs from from e->v2 to e->v1 */
401     if ((l_iter->prev->e->v1 == l_iter->prev->v) ^ (l_iter->e->v1 == l_iter->v)) {
402       dotprod = -dotprod;
403     }
404 
405     fac = saacos(-dotprod);
406 
407     if (fac != fac) { /* NAN detection. */
408       /* Degenerated case, nothing to do here, just ignore that vertex. */
409       continue;
410     }
411 
412     /* accumulate weighted face normal into the vertex's normal */
413     float *v_no = data->vnos ? data->vnos[BM_elem_index_get(l_iter->v)] : l_iter->v->no;
414 
415     /* This block is a lockless threadsafe madd_v3_v3fl.
416      * It uses the first float of the vector as a sort of cheap spin-lock,
417      * assuming FLT_MAX is a safe 'illegal' value that cannot be set here otherwise.
418      * It also assumes that collisions between threads are highly unlikely,
419      * else performances would be quite bad here. */
420     float virtual_lock = v_no[0];
421     while (true) {
422       /* This loops until following conditions are met:
423        *   - v_no[0] has same value as virtual_lock (i.e. it did not change since last try).
424        *   - v_no[0] was not FLT_MAX, i.e. it was not locked by another thread.
425        */
426       const float vl = atomic_cas_float(&v_no[0], virtual_lock, FLT_MAX);
427       if (FLT_EQ_NONAN(vl, virtual_lock) && vl != FLT_MAX) {
428         break;
429       }
430       virtual_lock = vl;
431     }
432     BLI_assert(v_no[0] == FLT_MAX);
433     /* Now we own that normal value, and can change it.
434      * But first scalar of the vector must not be changed yet, it's our lock! */
435     virtual_lock += f_no[0] * fac;
436     v_no[1] += f_no[1] * fac;
437     v_no[2] += f_no[2] * fac;
438     /* Second atomic operation to 'release'
439      * our lock on that vector and set its first scalar value. */
440     /* Note that we do not need to loop here, since we 'locked' v_no[0],
441      * nobody should have changed it in the mean time. */
442     virtual_lock = atomic_cas_float(&v_no[0], FLT_MAX, virtual_lock);
443     BLI_assert(virtual_lock == FLT_MAX);
444 
445   } while ((l_iter = l_iter->next) != l_first);
446 
447 #undef FLT_EQ_NONAN
448 }
449 
mesh_verts_calc_normals_normalize_cb(void * userdata,MempoolIterData * mp_v)450 static void mesh_verts_calc_normals_normalize_cb(void *userdata, MempoolIterData *mp_v)
451 {
452   BMVertsCalcNormalsData *data = userdata;
453   BMVert *v = (BMVert *)mp_v;
454 
455   float *v_no = data->vnos ? data->vnos[BM_elem_index_get(v)] : v->no;
456   if (UNLIKELY(normalize_v3(v_no) == 0.0f)) {
457     const float *v_co = data->vcos ? data->vcos[BM_elem_index_get(v)] : v->co;
458     normalize_v3_v3(v_no, v_co);
459   }
460 }
461 
bm_mesh_verts_calc_normals(BMesh * bm,const float (* edgevec)[3],const float (* fnos)[3],const float (* vcos)[3],float (* vnos)[3])462 static void bm_mesh_verts_calc_normals(BMesh *bm,
463                                        const float (*edgevec)[3],
464                                        const float (*fnos)[3],
465                                        const float (*vcos)[3],
466                                        float (*vnos)[3])
467 {
468   BM_mesh_elem_index_ensure(bm, (BM_EDGE | BM_FACE) | ((vnos || vcos) ? BM_VERT : 0));
469 
470   BMVertsCalcNormalsData data = {
471       .fnos = fnos,
472       .edgevec = edgevec,
473       .vcos = vcos,
474       .vnos = vnos,
475   };
476 
477   BM_iter_parallel(
478       bm, BM_FACES_OF_MESH, mesh_verts_calc_normals_accum_cb, &data, bm->totface >= BM_OMP_LIMIT);
479 
480   /* normalize the accumulated vertex normals */
481   BM_iter_parallel(bm,
482                    BM_VERTS_OF_MESH,
483                    mesh_verts_calc_normals_normalize_cb,
484                    &data,
485                    bm->totvert >= BM_OMP_LIMIT);
486 }
487 
mesh_faces_calc_normals_cb(void * UNUSED (userdata),MempoolIterData * mp_f)488 static void mesh_faces_calc_normals_cb(void *UNUSED(userdata), MempoolIterData *mp_f)
489 {
490   BMFace *f = (BMFace *)mp_f;
491 
492   BM_face_normal_update(f);
493 }
494 
495 /**
496  * \brief BMesh Compute Normals
497  *
498  * Updates the normals of a mesh.
499  */
BM_mesh_normals_update(BMesh * bm)500 void BM_mesh_normals_update(BMesh *bm)
501 {
502   float(*edgevec)[3] = MEM_mallocN(sizeof(*edgevec) * bm->totedge, __func__);
503 
504   /* Parallel mempool iteration does not allow to generate indices inline anymore... */
505   BM_mesh_elem_index_ensure(bm, (BM_EDGE | BM_FACE));
506 
507   /* calculate all face normals */
508   BM_iter_parallel(
509       bm, BM_FACES_OF_MESH, mesh_faces_calc_normals_cb, NULL, bm->totface >= BM_OMP_LIMIT);
510 
511   /* Zero out vertex normals */
512   BMIter viter;
513   BMVert *v;
514   int i;
515 
516   BM_ITER_MESH_INDEX (v, &viter, bm, BM_VERTS_OF_MESH, i) {
517     BM_elem_index_set(v, i); /* set_inline */
518     zero_v3(v->no);
519   }
520   bm->elem_index_dirty &= ~BM_VERT;
521 
522   /* Compute normalized direction vectors for each edge.
523    * Directions will be used for calculating the weights of the face normals on the vertex normals.
524    */
525   bm_mesh_edges_calc_vectors(bm, edgevec, NULL);
526 
527   /* Add weighted face normals to vertices, and normalize vert normals. */
528   bm_mesh_verts_calc_normals(bm, (const float(*)[3])edgevec, NULL, NULL, NULL);
529   MEM_freeN(edgevec);
530 }
531 
532 /**
533  * \brief BMesh Compute Normals from/to external data.
534  *
535  * Computes the vertex normals of a mesh into vnos,
536  * using given vertex coordinates (vcos) and polygon normals (fnos).
537  */
BM_verts_calc_normal_vcos(BMesh * bm,const float (* fnos)[3],const float (* vcos)[3],float (* vnos)[3])538 void BM_verts_calc_normal_vcos(BMesh *bm,
539                                const float (*fnos)[3],
540                                const float (*vcos)[3],
541                                float (*vnos)[3])
542 {
543   float(*edgevec)[3] = MEM_mallocN(sizeof(*edgevec) * bm->totedge, __func__);
544 
545   /* Compute normalized direction vectors for each edge.
546    * Directions will be used for calculating the weights of the face normals on the vertex normals.
547    */
548   bm_mesh_edges_calc_vectors(bm, edgevec, vcos);
549 
550   /* Add weighted face normals to vertices, and normalize vert normals. */
551   bm_mesh_verts_calc_normals(bm, (const float(*)[3])edgevec, fnos, vcos, vnos);
552   MEM_freeN(edgevec);
553 }
554 
555 /**
556  * Helpers for #BM_mesh_loop_normals_update and #BM_loops_calc_normal_vcos
557  */
bm_mesh_edges_sharp_tag(BMesh * bm,const float (* vnos)[3],const float (* fnos)[3],float (* r_lnos)[3],const float split_angle,const bool do_sharp_edges_tag)558 static void bm_mesh_edges_sharp_tag(BMesh *bm,
559                                     const float (*vnos)[3],
560                                     const float (*fnos)[3],
561                                     float (*r_lnos)[3],
562                                     const float split_angle,
563                                     const bool do_sharp_edges_tag)
564 {
565   BMIter eiter;
566   BMEdge *e;
567   int i;
568 
569   const bool check_angle = (split_angle < (float)M_PI);
570   const float split_angle_cos = check_angle ? cosf(split_angle) : -1.0f;
571 
572   {
573     char htype = BM_VERT | BM_LOOP;
574     if (fnos) {
575       htype |= BM_FACE;
576     }
577     BM_mesh_elem_index_ensure(bm, htype);
578   }
579 
580   /* This first loop checks which edges are actually smooth,
581    * and pre-populate lnos with vnos (as if they were all smooth). */
582   BM_ITER_MESH_INDEX (e, &eiter, bm, BM_EDGES_OF_MESH, i) {
583     BMLoop *l_a, *l_b;
584 
585     BM_elem_index_set(e, i);              /* set_inline */
586     BM_elem_flag_disable(e, BM_ELEM_TAG); /* Clear tag (means edge is sharp). */
587 
588     /* An edge with only two loops, might be smooth... */
589     if (BM_edge_loop_pair(e, &l_a, &l_b)) {
590       bool is_angle_smooth = true;
591       if (check_angle) {
592         const float *no_a = fnos ? fnos[BM_elem_index_get(l_a->f)] : l_a->f->no;
593         const float *no_b = fnos ? fnos[BM_elem_index_get(l_b->f)] : l_b->f->no;
594         is_angle_smooth = (dot_v3v3(no_a, no_b) >= split_angle_cos);
595       }
596 
597       /* We only tag edges that are *really* smooth:
598        * If the angle between both its polys' normals is below split_angle value,
599        * and it is tagged as such,
600        * and both its faces are smooth,
601        * and both its faces have compatible (non-flipped) normals,
602        * i.e. both loops on the same edge do not share the same vertex.
603        */
604       if (BM_elem_flag_test(e, BM_ELEM_SMOOTH) && BM_elem_flag_test(l_a->f, BM_ELEM_SMOOTH) &&
605           BM_elem_flag_test(l_b->f, BM_ELEM_SMOOTH) && l_a->v != l_b->v) {
606         if (is_angle_smooth) {
607           const float *no;
608           BM_elem_flag_enable(e, BM_ELEM_TAG);
609 
610           /* linked vertices might be fully smooth, copy their normals to loop ones. */
611           if (r_lnos) {
612             no = vnos ? vnos[BM_elem_index_get(l_a->v)] : l_a->v->no;
613             copy_v3_v3(r_lnos[BM_elem_index_get(l_a)], no);
614             no = vnos ? vnos[BM_elem_index_get(l_b->v)] : l_b->v->no;
615             copy_v3_v3(r_lnos[BM_elem_index_get(l_b)], no);
616           }
617         }
618         else if (do_sharp_edges_tag) {
619           /* Note that we do not care about the other sharp-edge cases
620            * (sharp poly, non-manifold edge, etc.),
621            * only tag edge as sharp when it is due to angle threshold. */
622           BM_elem_flag_disable(e, BM_ELEM_SMOOTH);
623         }
624       }
625     }
626   }
627 
628   bm->elem_index_dirty &= ~BM_EDGE;
629 }
630 
631 /**
632  * Check whether given loop is part of an unknown-so-far cyclic smooth fan, or not.
633  * Needed because cyclic smooth fans have no obvious 'entry point',
634  * and yet we need to walk them once, and only once.
635  */
BM_loop_check_cyclic_smooth_fan(BMLoop * l_curr)636 bool BM_loop_check_cyclic_smooth_fan(BMLoop *l_curr)
637 {
638   BMLoop *lfan_pivot_next = l_curr;
639   BMEdge *e_next = l_curr->e;
640 
641   BLI_assert(!BM_elem_flag_test(lfan_pivot_next, BM_ELEM_TAG));
642   BM_elem_flag_enable(lfan_pivot_next, BM_ELEM_TAG);
643 
644   while (true) {
645     /* Much simpler than in sibling code with basic Mesh data! */
646     lfan_pivot_next = BM_vert_step_fan_loop(lfan_pivot_next, &e_next);
647 
648     if (!lfan_pivot_next || !BM_elem_flag_test(e_next, BM_ELEM_TAG)) {
649       /* Sharp loop/edge, so not a cyclic smooth fan... */
650       return false;
651     }
652     /* Smooth loop/edge... */
653     if (BM_elem_flag_test(lfan_pivot_next, BM_ELEM_TAG)) {
654       if (lfan_pivot_next == l_curr) {
655         /* We walked around a whole cyclic smooth fan
656          * without finding any already-processed loop,
657          * means we can use initial l_curr/l_prev edge as start for this smooth fan. */
658         return true;
659       }
660       /* ... already checked in some previous looping, we can abort. */
661       return false;
662     }
663     /* ... we can skip it in future, and keep checking the smooth fan. */
664     BM_elem_flag_enable(lfan_pivot_next, BM_ELEM_TAG);
665   }
666 }
667 
668 /**
669  * BMesh version of BKE_mesh_normals_loop_split() in mesh_evaluate.c
670  * Will use first clnors_data array, and fallback to cd_loop_clnors_offset
671  * (use NULL and -1 to not use clnors).
672  */
bm_mesh_loops_calc_normals(BMesh * bm,const float (* vcos)[3],const float (* fnos)[3],float (* r_lnos)[3],MLoopNorSpaceArray * r_lnors_spacearr,const short (* clnors_data)[2],const int cd_loop_clnors_offset,const bool do_rebuild)673 static void bm_mesh_loops_calc_normals(BMesh *bm,
674                                        const float (*vcos)[3],
675                                        const float (*fnos)[3],
676                                        float (*r_lnos)[3],
677                                        MLoopNorSpaceArray *r_lnors_spacearr,
678                                        const short (*clnors_data)[2],
679                                        const int cd_loop_clnors_offset,
680                                        const bool do_rebuild)
681 {
682   BMIter fiter;
683   BMFace *f_curr;
684   const bool has_clnors = clnors_data || (cd_loop_clnors_offset != -1);
685 
686   MLoopNorSpaceArray _lnors_spacearr = {NULL};
687 
688   /* Temp normal stack. */
689   BLI_SMALLSTACK_DECLARE(normal, float *);
690   /* Temp clnors stack. */
691   BLI_SMALLSTACK_DECLARE(clnors, short *);
692   /* Temp edge vectors stack, only used when computing lnor spacearr. */
693   BLI_Stack *edge_vectors = NULL;
694 
695   {
696     char htype = 0;
697     if (vcos) {
698       htype |= BM_VERT;
699     }
700     /* Face/Loop indices are set inline below. */
701     BM_mesh_elem_index_ensure(bm, htype);
702   }
703 
704   if (!r_lnors_spacearr && has_clnors) {
705     /* We need to compute lnor spacearr if some custom lnor data are given to us! */
706     r_lnors_spacearr = &_lnors_spacearr;
707   }
708   if (r_lnors_spacearr) {
709     BKE_lnor_spacearr_init(r_lnors_spacearr, bm->totloop, MLNOR_SPACEARR_BMLOOP_PTR);
710     edge_vectors = BLI_stack_new(sizeof(float[3]), __func__);
711   }
712 
713   /* Clear all loops' tags (means none are to be skipped for now). */
714   int index_face, index_loop = 0;
715   BM_ITER_MESH_INDEX (f_curr, &fiter, bm, BM_FACES_OF_MESH, index_face) {
716     BMLoop *l_curr, *l_first;
717 
718     BM_elem_index_set(f_curr, index_face); /* set_inline */
719 
720     l_curr = l_first = BM_FACE_FIRST_LOOP(f_curr);
721     do {
722       BM_elem_index_set(l_curr, index_loop++); /* set_inline */
723       BM_elem_flag_disable(l_curr, BM_ELEM_TAG);
724     } while ((l_curr = l_curr->next) != l_first);
725   }
726   bm->elem_index_dirty &= ~(BM_FACE | BM_LOOP);
727 
728   /* We now know edges that can be smoothed (they are tagged),
729    * and edges that will be hard (they aren't).
730    * Now, time to generate the normals.
731    */
732   BM_ITER_MESH (f_curr, &fiter, bm, BM_FACES_OF_MESH) {
733     BMLoop *l_curr, *l_first;
734 
735     l_curr = l_first = BM_FACE_FIRST_LOOP(f_curr);
736     do {
737       if (do_rebuild && !BM_ELEM_API_FLAG_TEST(l_curr, BM_LNORSPACE_UPDATE) &&
738           !(bm->spacearr_dirty & BM_SPACEARR_DIRTY_ALL)) {
739         continue;
740       }
741       /* A smooth edge, we have to check for cyclic smooth fan case.
742        * If we find a new, never-processed cyclic smooth fan, we can do it now using that loop/edge
743        * as 'entry point', otherwise we can skip it. */
744 
745       /* Note: In theory, we could make bm_mesh_loop_check_cyclic_smooth_fan() store
746        * mlfan_pivot's in a stack, to avoid having to fan again around
747        * the vert during actual computation of clnor & clnorspace. However, this would complicate
748        * the code, add more memory usage, and
749        * BM_vert_step_fan_loop() is quite cheap in term of CPU cycles,
750        * so really think it's not worth it. */
751       if (BM_elem_flag_test(l_curr->e, BM_ELEM_TAG) &&
752           (BM_elem_flag_test(l_curr, BM_ELEM_TAG) || !BM_loop_check_cyclic_smooth_fan(l_curr))) {
753       }
754       else if (!BM_elem_flag_test(l_curr->e, BM_ELEM_TAG) &&
755                !BM_elem_flag_test(l_curr->prev->e, BM_ELEM_TAG)) {
756         /* Simple case (both edges around that vertex are sharp in related polygon),
757          * this vertex just takes its poly normal.
758          */
759         const int l_curr_index = BM_elem_index_get(l_curr);
760         const float *no = fnos ? fnos[BM_elem_index_get(f_curr)] : f_curr->no;
761         copy_v3_v3(r_lnos[l_curr_index], no);
762 
763         /* If needed, generate this (simple!) lnor space. */
764         if (r_lnors_spacearr) {
765           float vec_curr[3], vec_prev[3];
766           MLoopNorSpace *lnor_space = BKE_lnor_space_create(r_lnors_spacearr);
767 
768           {
769             const BMVert *v_pivot = l_curr->v;
770             const float *co_pivot = vcos ? vcos[BM_elem_index_get(v_pivot)] : v_pivot->co;
771             const BMVert *v_1 = BM_edge_other_vert(l_curr->e, v_pivot);
772             const float *co_1 = vcos ? vcos[BM_elem_index_get(v_1)] : v_1->co;
773             const BMVert *v_2 = BM_edge_other_vert(l_curr->prev->e, v_pivot);
774             const float *co_2 = vcos ? vcos[BM_elem_index_get(v_2)] : v_2->co;
775 
776             sub_v3_v3v3(vec_curr, co_1, co_pivot);
777             normalize_v3(vec_curr);
778             sub_v3_v3v3(vec_prev, co_2, co_pivot);
779             normalize_v3(vec_prev);
780           }
781 
782           BKE_lnor_space_define(lnor_space, r_lnos[l_curr_index], vec_curr, vec_prev, NULL);
783           /* We know there is only one loop in this space,
784            * no need to create a linklist in this case... */
785           BKE_lnor_space_add_loop(r_lnors_spacearr, lnor_space, l_curr_index, l_curr, true);
786 
787           if (has_clnors) {
788             const short(*clnor)[2] = clnors_data ? &clnors_data[l_curr_index] :
789                                                    (const void *)BM_ELEM_CD_GET_VOID_P(
790                                                        l_curr, cd_loop_clnors_offset);
791             BKE_lnor_space_custom_data_to_normal(lnor_space, *clnor, r_lnos[l_curr_index]);
792           }
793         }
794       }
795       /* We *do not need* to check/tag loops as already computed!
796        * Due to the fact a loop only links to one of its two edges,
797        * a same fan *will never be walked more than once!*
798        * Since we consider edges having neighbor faces with inverted (flipped) normals as sharp,
799        * we are sure that no fan will be skipped, even only considering the case
800        * (sharp curr_edge, smooth prev_edge), and not the alternative
801        * (smooth curr_edge, sharp prev_edge).
802        * All this due/thanks to link between normals and loop ordering.
803        */
804       else {
805         /* We have to fan around current vertex, until we find the other non-smooth edge,
806          * and accumulate face normals into the vertex!
807          * Note in case this vertex has only one sharp edge,
808          * this is a waste because the normal is the same as the vertex normal,
809          * but I do not see any easy way to detect that (would need to count number of sharp edges
810          * per vertex, I doubt the additional memory usage would be worth it, especially as it
811          * should not be a common case in real-life meshes anyway).
812          */
813         BMVert *v_pivot = l_curr->v;
814         BMEdge *e_next;
815         const BMEdge *e_org = l_curr->e;
816         BMLoop *lfan_pivot, *lfan_pivot_next;
817         int lfan_pivot_index;
818         float lnor[3] = {0.0f, 0.0f, 0.0f};
819         float vec_curr[3], vec_next[3], vec_org[3];
820 
821         /* We validate clnors data on the fly - cheapest way to do! */
822         int clnors_avg[2] = {0, 0};
823         const short(*clnor_ref)[2] = NULL;
824         int clnors_nbr = 0;
825         bool clnors_invalid = false;
826 
827         const float *co_pivot = vcos ? vcos[BM_elem_index_get(v_pivot)] : v_pivot->co;
828 
829         MLoopNorSpace *lnor_space = r_lnors_spacearr ? BKE_lnor_space_create(r_lnors_spacearr) :
830                                                        NULL;
831 
832         BLI_assert((edge_vectors == NULL) || BLI_stack_is_empty(edge_vectors));
833 
834         lfan_pivot = l_curr;
835         lfan_pivot_index = BM_elem_index_get(lfan_pivot);
836         e_next = lfan_pivot->e; /* Current edge here, actually! */
837 
838         /* Only need to compute previous edge's vector once,
839          * then we can just reuse old current one! */
840         {
841           const BMVert *v_2 = BM_edge_other_vert(e_next, v_pivot);
842           const float *co_2 = vcos ? vcos[BM_elem_index_get(v_2)] : v_2->co;
843 
844           sub_v3_v3v3(vec_org, co_2, co_pivot);
845           normalize_v3(vec_org);
846           copy_v3_v3(vec_curr, vec_org);
847 
848           if (r_lnors_spacearr) {
849             BLI_stack_push(edge_vectors, vec_org);
850           }
851         }
852 
853         while (true) {
854           /* Much simpler than in sibling code with basic Mesh data! */
855           lfan_pivot_next = BM_vert_step_fan_loop(lfan_pivot, &e_next);
856           if (lfan_pivot_next) {
857             BLI_assert(lfan_pivot_next->v == v_pivot);
858           }
859           else {
860             /* next edge is non-manifold, we have to find it ourselves! */
861             e_next = (lfan_pivot->e == e_next) ? lfan_pivot->prev->e : lfan_pivot->e;
862           }
863 
864           /* Compute edge vector.
865            * NOTE: We could pre-compute those into an array, in the first iteration,
866            * instead of computing them twice (or more) here.
867            * However, time gained is not worth memory and time lost,
868            * given the fact that this code should not be called that much in real-life meshes.
869            */
870           {
871             const BMVert *v_2 = BM_edge_other_vert(e_next, v_pivot);
872             const float *co_2 = vcos ? vcos[BM_elem_index_get(v_2)] : v_2->co;
873 
874             sub_v3_v3v3(vec_next, co_2, co_pivot);
875             normalize_v3(vec_next);
876           }
877 
878           {
879             /* Code similar to accumulate_vertex_normals_poly_v3. */
880             /* Calculate angle between the two poly edges incident on this vertex. */
881             const BMFace *f = lfan_pivot->f;
882             const float fac = saacos(dot_v3v3(vec_next, vec_curr));
883             const float *no = fnos ? fnos[BM_elem_index_get(f)] : f->no;
884             /* Accumulate */
885             madd_v3_v3fl(lnor, no, fac);
886 
887             if (has_clnors) {
888               /* Accumulate all clnors, if they are not all equal we have to fix that! */
889               const short(*clnor)[2] = clnors_data ? &clnors_data[lfan_pivot_index] :
890                                                      (const void *)BM_ELEM_CD_GET_VOID_P(
891                                                          lfan_pivot, cd_loop_clnors_offset);
892               if (clnors_nbr) {
893                 clnors_invalid |= ((*clnor_ref)[0] != (*clnor)[0] ||
894                                    (*clnor_ref)[1] != (*clnor)[1]);
895               }
896               else {
897                 clnor_ref = clnor;
898               }
899               clnors_avg[0] += (*clnor)[0];
900               clnors_avg[1] += (*clnor)[1];
901               clnors_nbr++;
902               /* We store here a pointer to all custom lnors processed. */
903               BLI_SMALLSTACK_PUSH(clnors, (short *)*clnor);
904             }
905           }
906 
907           /* We store here a pointer to all loop-normals processed. */
908           BLI_SMALLSTACK_PUSH(normal, (float *)r_lnos[lfan_pivot_index]);
909 
910           if (r_lnors_spacearr) {
911             /* Assign current lnor space to current 'vertex' loop. */
912             BKE_lnor_space_add_loop(
913                 r_lnors_spacearr, lnor_space, lfan_pivot_index, lfan_pivot, false);
914             if (e_next != e_org) {
915               /* We store here all edges-normalized vectors processed. */
916               BLI_stack_push(edge_vectors, vec_next);
917             }
918           }
919 
920           if (!BM_elem_flag_test(e_next, BM_ELEM_TAG) || (e_next == e_org)) {
921             /* Next edge is sharp, we have finished with this fan of faces around this vert! */
922             break;
923           }
924 
925           /* Copy next edge vector to current one. */
926           copy_v3_v3(vec_curr, vec_next);
927           /* Next pivot loop to current one. */
928           lfan_pivot = lfan_pivot_next;
929           lfan_pivot_index = BM_elem_index_get(lfan_pivot);
930         }
931 
932         {
933           float lnor_len = normalize_v3(lnor);
934 
935           /* If we are generating lnor spacearr, we can now define the one for this fan. */
936           if (r_lnors_spacearr) {
937             if (UNLIKELY(lnor_len == 0.0f)) {
938               /* Use vertex normal as fallback! */
939               copy_v3_v3(lnor, r_lnos[lfan_pivot_index]);
940               lnor_len = 1.0f;
941             }
942 
943             BKE_lnor_space_define(lnor_space, lnor, vec_org, vec_next, edge_vectors);
944 
945             if (has_clnors) {
946               if (clnors_invalid) {
947                 short *clnor;
948 
949                 clnors_avg[0] /= clnors_nbr;
950                 clnors_avg[1] /= clnors_nbr;
951                 /* Fix/update all clnors of this fan with computed average value. */
952 
953                 /* Prints continuously when merge custom normals, so commenting. */
954                 /* printf("Invalid clnors in this fan!\n"); */
955 
956                 while ((clnor = BLI_SMALLSTACK_POP(clnors))) {
957                   // print_v2("org clnor", clnor);
958                   clnor[0] = (short)clnors_avg[0];
959                   clnor[1] = (short)clnors_avg[1];
960                 }
961                 // print_v2("new clnors", clnors_avg);
962               }
963               else {
964                 /* We still have to consume the stack! */
965                 while (BLI_SMALLSTACK_POP(clnors)) {
966                   /* pass */
967                 }
968               }
969               BKE_lnor_space_custom_data_to_normal(lnor_space, *clnor_ref, lnor);
970             }
971           }
972 
973           /* In case we get a zero normal here, just use vertex normal already set! */
974           if (LIKELY(lnor_len != 0.0f)) {
975             /* Copy back the final computed normal into all related loop-normals. */
976             float *nor;
977 
978             while ((nor = BLI_SMALLSTACK_POP(normal))) {
979               copy_v3_v3(nor, lnor);
980             }
981           }
982           else {
983             /* We still have to consume the stack! */
984             while (BLI_SMALLSTACK_POP(normal)) {
985               /* pass */
986             }
987           }
988         }
989 
990         /* Tag related vertex as sharp, to avoid fanning around it again
991          * (in case it was a smooth one). */
992         if (r_lnors_spacearr) {
993           BM_elem_flag_enable(l_curr->v, BM_ELEM_TAG);
994         }
995       }
996     } while ((l_curr = l_curr->next) != l_first);
997   }
998 
999   if (r_lnors_spacearr) {
1000     BLI_stack_free(edge_vectors);
1001     if (r_lnors_spacearr == &_lnors_spacearr) {
1002       BKE_lnor_spacearr_free(r_lnors_spacearr);
1003     }
1004   }
1005 }
1006 
1007 /* This threshold is a bit touchy (usual float precision issue), this value seems OK. */
1008 #define LNOR_SPACE_TRIGO_THRESHOLD (1.0f - 1e-4f)
1009 
1010 /**
1011  * Check each current smooth fan (one lnor space per smooth fan!), and if all its
1012  * matching custom lnors are not (enough) equal, add sharp edges as needed.
1013  */
bm_mesh_loops_split_lnor_fans(BMesh * bm,MLoopNorSpaceArray * lnors_spacearr,const float (* new_lnors)[3])1014 static bool bm_mesh_loops_split_lnor_fans(BMesh *bm,
1015                                           MLoopNorSpaceArray *lnors_spacearr,
1016                                           const float (*new_lnors)[3])
1017 {
1018   BLI_bitmap *done_loops = BLI_BITMAP_NEW((size_t)bm->totloop, __func__);
1019   bool changed = false;
1020 
1021   BLI_assert(lnors_spacearr->data_type == MLNOR_SPACEARR_BMLOOP_PTR);
1022 
1023   for (int i = 0; i < bm->totloop; i++) {
1024     if (!lnors_spacearr->lspacearr[i]) {
1025       /* This should not happen in theory, but in some rare case (probably ugly geometry)
1026        * we can get some NULL loopspacearr at this point. :/
1027        * Maybe we should set those loops' edges as sharp?
1028        */
1029       BLI_BITMAP_ENABLE(done_loops, i);
1030       if (G.debug & G_DEBUG) {
1031         printf("WARNING! Getting invalid NULL loop space for loop %d!\n", i);
1032       }
1033       continue;
1034     }
1035 
1036     if (!BLI_BITMAP_TEST(done_loops, i)) {
1037       /* Notes:
1038        * * In case of mono-loop smooth fan, we have nothing to do.
1039        * * Loops in this linklist are ordered (in reversed order compared to how they were
1040        *   discovered by BKE_mesh_normals_loop_split(), but this is not a problem).
1041        *   Which means if we find a mismatching clnor,
1042        *   we know all remaining loops will have to be in a new, different smooth fan/lnor space.
1043        * * In smooth fan case, we compare each clnor against a ref one,
1044        *   to avoid small differences adding up into a real big one in the end!
1045        */
1046       if (lnors_spacearr->lspacearr[i]->flags & MLNOR_SPACE_IS_SINGLE) {
1047         BLI_BITMAP_ENABLE(done_loops, i);
1048         continue;
1049       }
1050 
1051       LinkNode *loops = lnors_spacearr->lspacearr[i]->loops;
1052       BMLoop *prev_ml = NULL;
1053       const float *org_nor = NULL;
1054 
1055       while (loops) {
1056         BMLoop *ml = loops->link;
1057         const int lidx = BM_elem_index_get(ml);
1058         const float *nor = new_lnors[lidx];
1059 
1060         if (!org_nor) {
1061           org_nor = nor;
1062         }
1063         else if (dot_v3v3(org_nor, nor) < LNOR_SPACE_TRIGO_THRESHOLD) {
1064           /* Current normal differs too much from org one, we have to tag the edge between
1065            * previous loop's face and current's one as sharp.
1066            * We know those two loops do not point to the same edge,
1067            * since we do not allow reversed winding in a same smooth fan.
1068            */
1069           BMEdge *e = (prev_ml->e == ml->prev->e) ? prev_ml->e : ml->e;
1070 
1071           BM_elem_flag_disable(e, BM_ELEM_TAG | BM_ELEM_SMOOTH);
1072           changed = true;
1073 
1074           org_nor = nor;
1075         }
1076 
1077         prev_ml = ml;
1078         loops = loops->next;
1079         BLI_BITMAP_ENABLE(done_loops, lidx);
1080       }
1081 
1082       /* We also have to check between last and first loops,
1083        * otherwise we may miss some sharp edges here!
1084        * This is just a simplified version of above while loop.
1085        * See T45984. */
1086       loops = lnors_spacearr->lspacearr[i]->loops;
1087       if (loops && org_nor) {
1088         BMLoop *ml = loops->link;
1089         const int lidx = BM_elem_index_get(ml);
1090         const float *nor = new_lnors[lidx];
1091 
1092         if (dot_v3v3(org_nor, nor) < LNOR_SPACE_TRIGO_THRESHOLD) {
1093           BMEdge *e = (prev_ml->e == ml->prev->e) ? prev_ml->e : ml->e;
1094 
1095           BM_elem_flag_disable(e, BM_ELEM_TAG | BM_ELEM_SMOOTH);
1096           changed = true;
1097         }
1098       }
1099     }
1100   }
1101 
1102   MEM_freeN(done_loops);
1103   return changed;
1104 }
1105 
1106 /**
1107  * Assign custom normal data from given normal vectors, averaging normals
1108  * from one smooth fan as necessary.
1109  */
bm_mesh_loops_assign_normal_data(BMesh * bm,MLoopNorSpaceArray * lnors_spacearr,short (* r_clnors_data)[2],const int cd_loop_clnors_offset,const float (* new_lnors)[3])1110 static void bm_mesh_loops_assign_normal_data(BMesh *bm,
1111                                              MLoopNorSpaceArray *lnors_spacearr,
1112                                              short (*r_clnors_data)[2],
1113                                              const int cd_loop_clnors_offset,
1114                                              const float (*new_lnors)[3])
1115 {
1116   BLI_bitmap *done_loops = BLI_BITMAP_NEW((size_t)bm->totloop, __func__);
1117 
1118   BLI_SMALLSTACK_DECLARE(clnors_data, short *);
1119 
1120   BLI_assert(lnors_spacearr->data_type == MLNOR_SPACEARR_BMLOOP_PTR);
1121 
1122   for (int i = 0; i < bm->totloop; i++) {
1123     if (!lnors_spacearr->lspacearr[i]) {
1124       BLI_BITMAP_ENABLE(done_loops, i);
1125       if (G.debug & G_DEBUG) {
1126         printf("WARNING! Still getting invalid NULL loop space in second loop for loop %d!\n", i);
1127       }
1128       continue;
1129     }
1130 
1131     if (!BLI_BITMAP_TEST(done_loops, i)) {
1132       /* Note we accumulate and average all custom normals in current smooth fan,
1133        * to avoid getting different clnors data (tiny differences in plain custom normals can
1134        * give rather huge differences in computed 2D factors).
1135        */
1136       LinkNode *loops = lnors_spacearr->lspacearr[i]->loops;
1137 
1138       if (lnors_spacearr->lspacearr[i]->flags & MLNOR_SPACE_IS_SINGLE) {
1139         BMLoop *ml = (BMLoop *)loops;
1140         const int lidx = BM_elem_index_get(ml);
1141 
1142         BLI_assert(lidx == i);
1143 
1144         const float *nor = new_lnors[lidx];
1145         short *clnor = r_clnors_data ? &r_clnors_data[lidx] :
1146                                        BM_ELEM_CD_GET_VOID_P(ml, cd_loop_clnors_offset);
1147 
1148         BKE_lnor_space_custom_normal_to_data(lnors_spacearr->lspacearr[i], nor, clnor);
1149         BLI_BITMAP_ENABLE(done_loops, i);
1150       }
1151       else {
1152         int nbr_nors = 0;
1153         float avg_nor[3];
1154         short clnor_data_tmp[2], *clnor_data;
1155 
1156         zero_v3(avg_nor);
1157 
1158         while (loops) {
1159           BMLoop *ml = loops->link;
1160           const int lidx = BM_elem_index_get(ml);
1161           const float *nor = new_lnors[lidx];
1162           short *clnor = r_clnors_data ? &r_clnors_data[lidx] :
1163                                          BM_ELEM_CD_GET_VOID_P(ml, cd_loop_clnors_offset);
1164 
1165           nbr_nors++;
1166           add_v3_v3(avg_nor, nor);
1167           BLI_SMALLSTACK_PUSH(clnors_data, clnor);
1168 
1169           loops = loops->next;
1170           BLI_BITMAP_ENABLE(done_loops, lidx);
1171         }
1172 
1173         mul_v3_fl(avg_nor, 1.0f / (float)nbr_nors);
1174         BKE_lnor_space_custom_normal_to_data(
1175             lnors_spacearr->lspacearr[i], avg_nor, clnor_data_tmp);
1176 
1177         while ((clnor_data = BLI_SMALLSTACK_POP(clnors_data))) {
1178           clnor_data[0] = clnor_data_tmp[0];
1179           clnor_data[1] = clnor_data_tmp[1];
1180         }
1181       }
1182     }
1183   }
1184 
1185   MEM_freeN(done_loops);
1186 }
1187 
1188 /**
1189  * Compute internal representation of given custom normals (as an array of float[2] or data layer).
1190  *
1191  * It also makes sure the mesh matches those custom normals, by marking new sharp edges to split
1192  * the smooth fans when loop normals for the same vertex are different, or averaging the normals
1193  * instead, depending on the do_split_fans parameter.
1194  */
bm_mesh_loops_custom_normals_set(BMesh * bm,const float (* vcos)[3],const float (* vnos)[3],const float (* fnos)[3],MLoopNorSpaceArray * r_lnors_spacearr,short (* r_clnors_data)[2],const int cd_loop_clnors_offset,float (* new_lnors)[3],const int cd_new_lnors_offset,bool do_split_fans)1195 static void bm_mesh_loops_custom_normals_set(BMesh *bm,
1196                                              const float (*vcos)[3],
1197                                              const float (*vnos)[3],
1198                                              const float (*fnos)[3],
1199                                              MLoopNorSpaceArray *r_lnors_spacearr,
1200                                              short (*r_clnors_data)[2],
1201                                              const int cd_loop_clnors_offset,
1202                                              float (*new_lnors)[3],
1203                                              const int cd_new_lnors_offset,
1204                                              bool do_split_fans)
1205 {
1206   BMFace *f;
1207   BMLoop *l;
1208   BMIter liter, fiter;
1209   float(*cur_lnors)[3] = MEM_mallocN(sizeof(*cur_lnors) * bm->totloop, __func__);
1210 
1211   BKE_lnor_spacearr_clear(r_lnors_spacearr);
1212 
1213   /* Tag smooth edges and set lnos from vnos when they might be completely smooth...
1214    * When using custom loop normals, disable the angle feature! */
1215   bm_mesh_edges_sharp_tag(bm, vnos, fnos, cur_lnors, (float)M_PI, false);
1216 
1217   /* Finish computing lnos by accumulating face normals
1218    * in each fan of faces defined by sharp edges. */
1219   bm_mesh_loops_calc_normals(
1220       bm, vcos, fnos, cur_lnors, r_lnors_spacearr, r_clnors_data, cd_loop_clnors_offset, false);
1221 
1222   /* Extract new normals from the data layer if necessary. */
1223   float(*custom_lnors)[3] = new_lnors;
1224 
1225   if (new_lnors == NULL) {
1226     custom_lnors = MEM_mallocN(sizeof(*new_lnors) * bm->totloop, __func__);
1227 
1228     BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
1229       BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
1230         const float *normal = BM_ELEM_CD_GET_VOID_P(l, cd_new_lnors_offset);
1231         copy_v3_v3(custom_lnors[BM_elem_index_get(l)], normal);
1232       }
1233     }
1234   }
1235 
1236   /* Validate the new normals. */
1237   for (int i = 0; i < bm->totloop; i++) {
1238     if (is_zero_v3(custom_lnors[i])) {
1239       copy_v3_v3(custom_lnors[i], cur_lnors[i]);
1240     }
1241     else {
1242       normalize_v3(custom_lnors[i]);
1243     }
1244   }
1245 
1246   /* Now, check each current smooth fan (one lnor space per smooth fan!),
1247    * and if all its matching custom lnors are not equal, add sharp edges as needed. */
1248   if (do_split_fans && bm_mesh_loops_split_lnor_fans(bm, r_lnors_spacearr, custom_lnors)) {
1249     /* If any sharp edges were added, run bm_mesh_loops_calc_normals() again to get lnor
1250      * spacearr/smooth fans matching the given custom lnors. */
1251     BKE_lnor_spacearr_clear(r_lnors_spacearr);
1252 
1253     bm_mesh_loops_calc_normals(
1254         bm, vcos, fnos, cur_lnors, r_lnors_spacearr, r_clnors_data, cd_loop_clnors_offset, false);
1255   }
1256 
1257   /* And we just have to convert plain object-space custom normals to our
1258    * lnor space-encoded ones. */
1259   bm_mesh_loops_assign_normal_data(
1260       bm, r_lnors_spacearr, r_clnors_data, cd_loop_clnors_offset, custom_lnors);
1261 
1262   MEM_freeN(cur_lnors);
1263 
1264   if (custom_lnors != new_lnors) {
1265     MEM_freeN(custom_lnors);
1266   }
1267 }
1268 
bm_mesh_loops_calc_normals_no_autosmooth(BMesh * bm,const float (* vnos)[3],const float (* fnos)[3],float (* r_lnos)[3])1269 static void bm_mesh_loops_calc_normals_no_autosmooth(BMesh *bm,
1270                                                      const float (*vnos)[3],
1271                                                      const float (*fnos)[3],
1272                                                      float (*r_lnos)[3])
1273 {
1274   BMIter fiter;
1275   BMFace *f_curr;
1276 
1277   {
1278     char htype = BM_LOOP;
1279     if (vnos) {
1280       htype |= BM_VERT;
1281     }
1282     if (fnos) {
1283       htype |= BM_FACE;
1284     }
1285     BM_mesh_elem_index_ensure(bm, htype);
1286   }
1287 
1288   BM_ITER_MESH (f_curr, &fiter, bm, BM_FACES_OF_MESH) {
1289     BMLoop *l_curr, *l_first;
1290     const bool is_face_flat = !BM_elem_flag_test(f_curr, BM_ELEM_SMOOTH);
1291 
1292     l_curr = l_first = BM_FACE_FIRST_LOOP(f_curr);
1293     do {
1294       const float *no = is_face_flat ? (fnos ? fnos[BM_elem_index_get(f_curr)] : f_curr->no) :
1295                                        (vnos ? vnos[BM_elem_index_get(l_curr->v)] : l_curr->v->no);
1296       copy_v3_v3(r_lnos[BM_elem_index_get(l_curr)], no);
1297 
1298     } while ((l_curr = l_curr->next) != l_first);
1299   }
1300 }
1301 
1302 #if 0 /* Unused currently */
1303 /**
1304  * \brief BMesh Compute Loop Normals
1305  *
1306  * Updates the loop normals of a mesh.
1307  * Assumes vertex and face normals are valid (else call BM_mesh_normals_update() first)!
1308  */
1309 void BM_mesh_loop_normals_update(BMesh *bm,
1310                                  const bool use_split_normals,
1311                                  const float split_angle,
1312                                  float (*r_lnos)[3],
1313                                  MLoopNorSpaceArray *r_lnors_spacearr,
1314                                  const short (*clnors_data)[2],
1315                                  const int cd_loop_clnors_offset)
1316 {
1317   const bool has_clnors = clnors_data || (cd_loop_clnors_offset != -1);
1318 
1319   if (use_split_normals) {
1320     /* Tag smooth edges and set lnos from vnos when they might be completely smooth...
1321      * When using custom loop normals, disable the angle feature! */
1322     bm_mesh_edges_sharp_tag(bm, NULL, NULL, has_clnors ? (float)M_PI : split_angle, r_lnos);
1323 
1324     /* Finish computing lnos by accumulating face normals
1325      * in each fan of faces defined by sharp edges. */
1326     bm_mesh_loops_calc_normals(
1327         bm, NULL, NULL, r_lnos, r_lnors_spacearr, clnors_data, cd_loop_clnors_offset);
1328   }
1329   else {
1330     BLI_assert(!r_lnors_spacearr);
1331     bm_mesh_loops_calc_normals_no_autosmooth(bm, NULL, NULL, r_lnos);
1332   }
1333 }
1334 #endif
1335 
1336 /**
1337  * \brief BMesh Compute Loop Normals from/to external data.
1338  *
1339  * Compute split normals, i.e. vertex normals associated with each poly (hence 'loop normals').
1340  * Useful to materialize sharp edges (or non-smooth faces) without actually modifying the geometry
1341  * (splitting edges).
1342  */
BM_loops_calc_normal_vcos(BMesh * bm,const float (* vcos)[3],const float (* vnos)[3],const float (* fnos)[3],const bool use_split_normals,const float split_angle,float (* r_lnos)[3],MLoopNorSpaceArray * r_lnors_spacearr,short (* clnors_data)[2],const int cd_loop_clnors_offset,const bool do_rebuild)1343 void BM_loops_calc_normal_vcos(BMesh *bm,
1344                                const float (*vcos)[3],
1345                                const float (*vnos)[3],
1346                                const float (*fnos)[3],
1347                                const bool use_split_normals,
1348                                const float split_angle,
1349                                float (*r_lnos)[3],
1350                                MLoopNorSpaceArray *r_lnors_spacearr,
1351                                short (*clnors_data)[2],
1352                                const int cd_loop_clnors_offset,
1353                                const bool do_rebuild)
1354 {
1355   const bool has_clnors = clnors_data || (cd_loop_clnors_offset != -1);
1356 
1357   if (use_split_normals) {
1358     /* Tag smooth edges and set lnos from vnos when they might be completely smooth...
1359      * When using custom loop normals, disable the angle feature! */
1360     bm_mesh_edges_sharp_tag(bm, vnos, fnos, r_lnos, has_clnors ? (float)M_PI : split_angle, false);
1361 
1362     /* Finish computing lnos by accumulating face normals
1363      * in each fan of faces defined by sharp edges. */
1364     bm_mesh_loops_calc_normals(
1365         bm, vcos, fnos, r_lnos, r_lnors_spacearr, clnors_data, cd_loop_clnors_offset, do_rebuild);
1366   }
1367   else {
1368     BLI_assert(!r_lnors_spacearr);
1369     bm_mesh_loops_calc_normals_no_autosmooth(bm, vnos, fnos, r_lnos);
1370   }
1371 }
1372 
1373 /**
1374  * Define sharp edges as needed to mimic 'autosmooth' from angle threshold.
1375  *
1376  * Used when defining an empty custom loop normals data layer,
1377  * to keep same shading as with autosmooth!
1378  */
BM_edges_sharp_from_angle_set(BMesh * bm,const float split_angle)1379 void BM_edges_sharp_from_angle_set(BMesh *bm, const float split_angle)
1380 {
1381   if (split_angle >= (float)M_PI) {
1382     /* Nothing to do! */
1383     return;
1384   }
1385 
1386   bm_mesh_edges_sharp_tag(bm, NULL, NULL, NULL, split_angle, true);
1387 }
1388 
BM_lnorspacearr_store(BMesh * bm,float (* r_lnors)[3])1389 void BM_lnorspacearr_store(BMesh *bm, float (*r_lnors)[3])
1390 {
1391   BLI_assert(bm->lnor_spacearr != NULL);
1392 
1393   if (!CustomData_has_layer(&bm->ldata, CD_CUSTOMLOOPNORMAL)) {
1394     BM_data_layer_add(bm, &bm->ldata, CD_CUSTOMLOOPNORMAL);
1395   }
1396 
1397   int cd_loop_clnors_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
1398 
1399   BM_loops_calc_normal_vcos(bm,
1400                             NULL,
1401                             NULL,
1402                             NULL,
1403                             true,
1404                             M_PI,
1405                             r_lnors,
1406                             bm->lnor_spacearr,
1407                             NULL,
1408                             cd_loop_clnors_offset,
1409                             false);
1410   bm->spacearr_dirty &= ~(BM_SPACEARR_DIRTY | BM_SPACEARR_DIRTY_ALL);
1411 }
1412 
1413 #define CLEAR_SPACEARRAY_THRESHOLD(x) ((x) / 2)
1414 
BM_lnorspace_invalidate(BMesh * bm,const bool do_invalidate_all)1415 void BM_lnorspace_invalidate(BMesh *bm, const bool do_invalidate_all)
1416 {
1417   if (bm->spacearr_dirty & BM_SPACEARR_DIRTY_ALL) {
1418     return;
1419   }
1420   if (do_invalidate_all || bm->totvertsel > CLEAR_SPACEARRAY_THRESHOLD(bm->totvert)) {
1421     bm->spacearr_dirty |= BM_SPACEARR_DIRTY_ALL;
1422     return;
1423   }
1424   if (bm->lnor_spacearr == NULL) {
1425     bm->spacearr_dirty |= BM_SPACEARR_DIRTY_ALL;
1426     return;
1427   }
1428 
1429   BMVert *v;
1430   BMLoop *l;
1431   BMIter viter, liter;
1432   /* Note: we could use temp tag of BMItem for that,
1433    * but probably better not use it in such a low-level func?
1434    * --mont29 */
1435   BLI_bitmap *done_verts = BLI_BITMAP_NEW(bm->totvert, __func__);
1436 
1437   BM_mesh_elem_index_ensure(bm, BM_VERT);
1438 
1439   /* When we affect a given vertex, we may affect following smooth fans:
1440    *     - all smooth fans of said vertex;
1441    *     - all smooth fans of all immediate loop-neighbors vertices;
1442    * This can be simplified as 'all loops of selected vertices and their immediate neighbors'
1443    * need to be tagged for update.
1444    */
1445   BM_ITER_MESH (v, &viter, bm, BM_VERTS_OF_MESH) {
1446     if (BM_elem_flag_test(v, BM_ELEM_SELECT)) {
1447       BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
1448         BM_ELEM_API_FLAG_ENABLE(l, BM_LNORSPACE_UPDATE);
1449 
1450         /* Note that we only handle unselected neighbor vertices here, main loop will take care of
1451          * selected ones. */
1452         if ((!BM_elem_flag_test(l->prev->v, BM_ELEM_SELECT)) &&
1453             !BLI_BITMAP_TEST(done_verts, BM_elem_index_get(l->prev->v))) {
1454 
1455           BMLoop *l_prev;
1456           BMIter liter_prev;
1457           BM_ITER_ELEM (l_prev, &liter_prev, l->prev->v, BM_LOOPS_OF_VERT) {
1458             BM_ELEM_API_FLAG_ENABLE(l_prev, BM_LNORSPACE_UPDATE);
1459           }
1460           BLI_BITMAP_ENABLE(done_verts, BM_elem_index_get(l_prev->v));
1461         }
1462 
1463         if ((!BM_elem_flag_test(l->next->v, BM_ELEM_SELECT)) &&
1464             !BLI_BITMAP_TEST(done_verts, BM_elem_index_get(l->next->v))) {
1465 
1466           BMLoop *l_next;
1467           BMIter liter_next;
1468           BM_ITER_ELEM (l_next, &liter_next, l->next->v, BM_LOOPS_OF_VERT) {
1469             BM_ELEM_API_FLAG_ENABLE(l_next, BM_LNORSPACE_UPDATE);
1470           }
1471           BLI_BITMAP_ENABLE(done_verts, BM_elem_index_get(l_next->v));
1472         }
1473       }
1474 
1475       BLI_BITMAP_ENABLE(done_verts, BM_elem_index_get(v));
1476     }
1477   }
1478 
1479   MEM_freeN(done_verts);
1480   bm->spacearr_dirty |= BM_SPACEARR_DIRTY;
1481 }
1482 
BM_lnorspace_rebuild(BMesh * bm,bool preserve_clnor)1483 void BM_lnorspace_rebuild(BMesh *bm, bool preserve_clnor)
1484 {
1485   BLI_assert(bm->lnor_spacearr != NULL);
1486 
1487   if (!(bm->spacearr_dirty & (BM_SPACEARR_DIRTY | BM_SPACEARR_DIRTY_ALL))) {
1488     return;
1489   }
1490   BMFace *f;
1491   BMLoop *l;
1492   BMIter fiter, liter;
1493 
1494   float(*r_lnors)[3] = MEM_callocN(sizeof(*r_lnors) * bm->totloop, __func__);
1495   float(*oldnors)[3] = preserve_clnor ? MEM_mallocN(sizeof(*oldnors) * bm->totloop, __func__) :
1496                                         NULL;
1497 
1498   int cd_loop_clnors_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
1499 
1500   BM_mesh_elem_index_ensure(bm, BM_LOOP);
1501 
1502   if (preserve_clnor) {
1503     BLI_assert(bm->lnor_spacearr->lspacearr != NULL);
1504 
1505     BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
1506       BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
1507         if (BM_ELEM_API_FLAG_TEST(l, BM_LNORSPACE_UPDATE) ||
1508             bm->spacearr_dirty & BM_SPACEARR_DIRTY_ALL) {
1509           short(*clnor)[2] = BM_ELEM_CD_GET_VOID_P(l, cd_loop_clnors_offset);
1510           int l_index = BM_elem_index_get(l);
1511 
1512           BKE_lnor_space_custom_data_to_normal(
1513               bm->lnor_spacearr->lspacearr[l_index], *clnor, oldnors[l_index]);
1514         }
1515       }
1516     }
1517   }
1518 
1519   if (bm->spacearr_dirty & BM_SPACEARR_DIRTY_ALL) {
1520     BKE_lnor_spacearr_clear(bm->lnor_spacearr);
1521   }
1522   BM_loops_calc_normal_vcos(bm,
1523                             NULL,
1524                             NULL,
1525                             NULL,
1526                             true,
1527                             M_PI,
1528                             r_lnors,
1529                             bm->lnor_spacearr,
1530                             NULL,
1531                             cd_loop_clnors_offset,
1532                             true);
1533   MEM_freeN(r_lnors);
1534 
1535   BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
1536     BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
1537       if (BM_ELEM_API_FLAG_TEST(l, BM_LNORSPACE_UPDATE) ||
1538           bm->spacearr_dirty & BM_SPACEARR_DIRTY_ALL) {
1539         if (preserve_clnor) {
1540           short(*clnor)[2] = BM_ELEM_CD_GET_VOID_P(l, cd_loop_clnors_offset);
1541           int l_index = BM_elem_index_get(l);
1542           BKE_lnor_space_custom_normal_to_data(
1543               bm->lnor_spacearr->lspacearr[l_index], oldnors[l_index], *clnor);
1544         }
1545         BM_ELEM_API_FLAG_DISABLE(l, BM_LNORSPACE_UPDATE);
1546       }
1547     }
1548   }
1549 
1550   MEM_SAFE_FREE(oldnors);
1551   bm->spacearr_dirty &= ~(BM_SPACEARR_DIRTY | BM_SPACEARR_DIRTY_ALL);
1552 
1553 #ifndef NDEBUG
1554   BM_lnorspace_err(bm);
1555 #endif
1556 }
1557 
BM_lnorspace_update(BMesh * bm)1558 void BM_lnorspace_update(BMesh *bm)
1559 {
1560   if (bm->lnor_spacearr == NULL) {
1561     bm->lnor_spacearr = MEM_callocN(sizeof(*bm->lnor_spacearr), __func__);
1562   }
1563   if (bm->lnor_spacearr->lspacearr == NULL) {
1564     float(*lnors)[3] = MEM_callocN(sizeof(*lnors) * bm->totloop, __func__);
1565 
1566     BM_lnorspacearr_store(bm, lnors);
1567 
1568     MEM_freeN(lnors);
1569   }
1570   else if (bm->spacearr_dirty & (BM_SPACEARR_DIRTY | BM_SPACEARR_DIRTY_ALL)) {
1571     BM_lnorspace_rebuild(bm, false);
1572   }
1573 }
1574 
BM_normals_loops_edges_tag(BMesh * bm,const bool do_edges)1575 void BM_normals_loops_edges_tag(BMesh *bm, const bool do_edges)
1576 {
1577   BMFace *f;
1578   BMEdge *e;
1579   BMIter fiter, eiter;
1580   BMLoop *l_curr, *l_first;
1581 
1582   if (do_edges) {
1583     int index_edge;
1584     BM_ITER_MESH_INDEX (e, &eiter, bm, BM_EDGES_OF_MESH, index_edge) {
1585       BMLoop *l_a, *l_b;
1586 
1587       BM_elem_index_set(e, index_edge); /* set_inline */
1588       BM_elem_flag_disable(e, BM_ELEM_TAG);
1589       if (BM_edge_loop_pair(e, &l_a, &l_b)) {
1590         if (BM_elem_flag_test(e, BM_ELEM_SMOOTH) && l_a->v != l_b->v) {
1591           BM_elem_flag_enable(e, BM_ELEM_TAG);
1592         }
1593       }
1594     }
1595     bm->elem_index_dirty &= ~BM_EDGE;
1596   }
1597 
1598   int index_face, index_loop = 0;
1599   BM_ITER_MESH_INDEX (f, &fiter, bm, BM_FACES_OF_MESH, index_face) {
1600     BM_elem_index_set(f, index_face); /* set_inline */
1601     l_curr = l_first = BM_FACE_FIRST_LOOP(f);
1602     do {
1603       BM_elem_index_set(l_curr, index_loop++); /* set_inline */
1604       BM_elem_flag_disable(l_curr, BM_ELEM_TAG);
1605     } while ((l_curr = l_curr->next) != l_first);
1606   }
1607   bm->elem_index_dirty &= ~(BM_FACE | BM_LOOP);
1608 }
1609 
1610 /**
1611  * Auxiliary function only used by rebuild to detect if any spaces were not marked as invalid.
1612  * Reports error if any of the lnor spaces change after rebuilding, meaning that all the possible
1613  * lnor spaces to be rebuilt were not correctly marked.
1614  */
1615 #ifndef NDEBUG
BM_lnorspace_err(BMesh * bm)1616 void BM_lnorspace_err(BMesh *bm)
1617 {
1618   bm->spacearr_dirty |= BM_SPACEARR_DIRTY_ALL;
1619   bool clear = true;
1620 
1621   MLoopNorSpaceArray *temp = MEM_callocN(sizeof(*temp), __func__);
1622   temp->lspacearr = NULL;
1623 
1624   BKE_lnor_spacearr_init(temp, bm->totloop, MLNOR_SPACEARR_BMLOOP_PTR);
1625 
1626   int cd_loop_clnors_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
1627   float(*lnors)[3] = MEM_callocN(sizeof(*lnors) * bm->totloop, __func__);
1628   BM_loops_calc_normal_vcos(
1629       bm, NULL, NULL, NULL, true, M_PI, lnors, temp, NULL, cd_loop_clnors_offset, true);
1630 
1631   for (int i = 0; i < bm->totloop; i++) {
1632     int j = 0;
1633     j += compare_ff(
1634         temp->lspacearr[i]->ref_alpha, bm->lnor_spacearr->lspacearr[i]->ref_alpha, 1e-4f);
1635     j += compare_ff(
1636         temp->lspacearr[i]->ref_beta, bm->lnor_spacearr->lspacearr[i]->ref_beta, 1e-4f);
1637     j += compare_v3v3(
1638         temp->lspacearr[i]->vec_lnor, bm->lnor_spacearr->lspacearr[i]->vec_lnor, 1e-4f);
1639     j += compare_v3v3(
1640         temp->lspacearr[i]->vec_ortho, bm->lnor_spacearr->lspacearr[i]->vec_ortho, 1e-4f);
1641     j += compare_v3v3(
1642         temp->lspacearr[i]->vec_ref, bm->lnor_spacearr->lspacearr[i]->vec_ref, 1e-4f);
1643 
1644     if (j != 5) {
1645       clear = false;
1646       break;
1647     }
1648   }
1649   BKE_lnor_spacearr_free(temp);
1650   MEM_freeN(temp);
1651   MEM_freeN(lnors);
1652   BLI_assert(clear);
1653 
1654   bm->spacearr_dirty &= ~BM_SPACEARR_DIRTY_ALL;
1655 }
1656 #endif
1657 
bm_loop_normal_mark_indiv_do_loop(BMLoop * l,BLI_bitmap * loops,MLoopNorSpaceArray * lnor_spacearr,int * totloopsel,const bool do_all_loops_of_vert)1658 static void bm_loop_normal_mark_indiv_do_loop(BMLoop *l,
1659                                               BLI_bitmap *loops,
1660                                               MLoopNorSpaceArray *lnor_spacearr,
1661                                               int *totloopsel,
1662                                               const bool do_all_loops_of_vert)
1663 {
1664   if (l != NULL) {
1665     const int l_idx = BM_elem_index_get(l);
1666 
1667     if (!BLI_BITMAP_TEST(loops, l_idx)) {
1668       /* If vert and face selected share a loop, mark it for editing. */
1669       BLI_BITMAP_ENABLE(loops, l_idx);
1670       (*totloopsel)++;
1671 
1672       if (do_all_loops_of_vert) {
1673         /* If required, also mark all loops shared by that vertex.
1674          * This is needed when loop spaces may change
1675          * (i.e. when some faces or edges might change of smooth/sharp status). */
1676         BMIter liter;
1677         BMLoop *lfan;
1678         BM_ITER_ELEM (lfan, &liter, l->v, BM_LOOPS_OF_VERT) {
1679           const int lfan_idx = BM_elem_index_get(lfan);
1680           if (!BLI_BITMAP_TEST(loops, lfan_idx)) {
1681             BLI_BITMAP_ENABLE(loops, lfan_idx);
1682             (*totloopsel)++;
1683           }
1684         }
1685       }
1686       else {
1687         /* Mark all loops in same loop normal space (aka smooth fan). */
1688         if ((lnor_spacearr->lspacearr[l_idx]->flags & MLNOR_SPACE_IS_SINGLE) == 0) {
1689           for (LinkNode *node = lnor_spacearr->lspacearr[l_idx]->loops; node; node = node->next) {
1690             const int lfan_idx = BM_elem_index_get((BMLoop *)node->link);
1691             if (!BLI_BITMAP_TEST(loops, lfan_idx)) {
1692               BLI_BITMAP_ENABLE(loops, lfan_idx);
1693               (*totloopsel)++;
1694             }
1695           }
1696         }
1697       }
1698     }
1699   }
1700 }
1701 
1702 /* Mark the individual clnors to be edited, if multiple selection methods are used. */
bm_loop_normal_mark_indiv(BMesh * bm,BLI_bitmap * loops,const bool do_all_loops_of_vert)1703 static int bm_loop_normal_mark_indiv(BMesh *bm, BLI_bitmap *loops, const bool do_all_loops_of_vert)
1704 {
1705   BMEditSelection *ese, *ese_prev;
1706   int totloopsel = 0;
1707 
1708   const bool sel_verts = (bm->selectmode & SCE_SELECT_VERTEX) != 0;
1709   const bool sel_edges = (bm->selectmode & SCE_SELECT_EDGE) != 0;
1710   const bool sel_faces = (bm->selectmode & SCE_SELECT_FACE) != 0;
1711   const bool use_sel_face_history = sel_faces && (sel_edges || sel_verts);
1712 
1713   BM_mesh_elem_index_ensure(bm, BM_LOOP);
1714 
1715   BLI_assert(bm->lnor_spacearr != NULL);
1716   BLI_assert(bm->lnor_spacearr->data_type == MLNOR_SPACEARR_BMLOOP_PTR);
1717 
1718   if (use_sel_face_history) {
1719     /* Using face history allows to select a single loop from a single face...
1720      * Note that this is On² piece of code,
1721      * but it is not designed to be used with huge selection sets,
1722      * rather with only a few items selected at most.*/
1723     /* Goes from last selected to the first selected element. */
1724     for (ese = bm->selected.last; ese; ese = ese->prev) {
1725       if (ese->htype == BM_FACE) {
1726         /* If current face is selected,
1727          * then any verts to be edited must have been selected before it. */
1728         for (ese_prev = ese->prev; ese_prev; ese_prev = ese_prev->prev) {
1729           if (ese_prev->htype == BM_VERT) {
1730             bm_loop_normal_mark_indiv_do_loop(
1731                 BM_face_vert_share_loop((BMFace *)ese->ele, (BMVert *)ese_prev->ele),
1732                 loops,
1733                 bm->lnor_spacearr,
1734                 &totloopsel,
1735                 do_all_loops_of_vert);
1736           }
1737           else if (ese_prev->htype == BM_EDGE) {
1738             BMEdge *e = (BMEdge *)ese_prev->ele;
1739             bm_loop_normal_mark_indiv_do_loop(BM_face_vert_share_loop((BMFace *)ese->ele, e->v1),
1740                                               loops,
1741                                               bm->lnor_spacearr,
1742                                               &totloopsel,
1743                                               do_all_loops_of_vert);
1744 
1745             bm_loop_normal_mark_indiv_do_loop(BM_face_vert_share_loop((BMFace *)ese->ele, e->v2),
1746                                               loops,
1747                                               bm->lnor_spacearr,
1748                                               &totloopsel,
1749                                               do_all_loops_of_vert);
1750           }
1751         }
1752       }
1753     }
1754   }
1755   else {
1756     if (sel_faces) {
1757       /* Only select all loops of selected faces. */
1758       BMLoop *l;
1759       BMFace *f;
1760       BMIter liter, fiter;
1761       BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
1762         if (BM_elem_flag_test(f, BM_ELEM_SELECT)) {
1763           BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
1764             bm_loop_normal_mark_indiv_do_loop(
1765                 l, loops, bm->lnor_spacearr, &totloopsel, do_all_loops_of_vert);
1766           }
1767         }
1768       }
1769     }
1770     if (sel_edges) {
1771       /* Only select all loops of selected edges. */
1772       BMLoop *l;
1773       BMEdge *e;
1774       BMIter liter, eiter;
1775       BM_ITER_MESH (e, &eiter, bm, BM_EDGES_OF_MESH) {
1776         if (BM_elem_flag_test(e, BM_ELEM_SELECT)) {
1777           BM_ITER_ELEM (l, &liter, e, BM_LOOPS_OF_EDGE) {
1778             bm_loop_normal_mark_indiv_do_loop(
1779                 l, loops, bm->lnor_spacearr, &totloopsel, do_all_loops_of_vert);
1780             /* Loops actually 'have' two edges, or said otherwise, a selected edge actually selects
1781              * *two* loops in each of its faces. We have to find the other one too. */
1782             if (BM_vert_in_edge(e, l->next->v)) {
1783               bm_loop_normal_mark_indiv_do_loop(
1784                   l->next, loops, bm->lnor_spacearr, &totloopsel, do_all_loops_of_vert);
1785             }
1786             else {
1787               BLI_assert(BM_vert_in_edge(e, l->prev->v));
1788               bm_loop_normal_mark_indiv_do_loop(
1789                   l->prev, loops, bm->lnor_spacearr, &totloopsel, do_all_loops_of_vert);
1790             }
1791           }
1792         }
1793       }
1794     }
1795     if (sel_verts) {
1796       /* Select all loops of selected verts. */
1797       BMLoop *l;
1798       BMVert *v;
1799       BMIter liter, viter;
1800       BM_ITER_MESH (v, &viter, bm, BM_VERTS_OF_MESH) {
1801         if (BM_elem_flag_test(v, BM_ELEM_SELECT)) {
1802           BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
1803             bm_loop_normal_mark_indiv_do_loop(
1804                 l, loops, bm->lnor_spacearr, &totloopsel, do_all_loops_of_vert);
1805           }
1806         }
1807       }
1808     }
1809   }
1810 
1811   return totloopsel;
1812 }
1813 
loop_normal_editdata_init(BMesh * bm,BMLoopNorEditData * lnor_ed,BMVert * v,BMLoop * l,const int offset)1814 static void loop_normal_editdata_init(
1815     BMesh *bm, BMLoopNorEditData *lnor_ed, BMVert *v, BMLoop *l, const int offset)
1816 {
1817   BLI_assert(bm->lnor_spacearr != NULL);
1818   BLI_assert(bm->lnor_spacearr->lspacearr != NULL);
1819 
1820   const int l_index = BM_elem_index_get(l);
1821   short *clnors_data = BM_ELEM_CD_GET_VOID_P(l, offset);
1822 
1823   lnor_ed->loop_index = l_index;
1824   lnor_ed->loop = l;
1825 
1826   float custom_normal[3];
1827   BKE_lnor_space_custom_data_to_normal(
1828       bm->lnor_spacearr->lspacearr[l_index], clnors_data, custom_normal);
1829 
1830   lnor_ed->clnors_data = clnors_data;
1831   copy_v3_v3(lnor_ed->nloc, custom_normal);
1832   copy_v3_v3(lnor_ed->niloc, custom_normal);
1833 
1834   lnor_ed->loc = v->co;
1835 }
1836 
BM_loop_normal_editdata_array_init(BMesh * bm,const bool do_all_loops_of_vert)1837 BMLoopNorEditDataArray *BM_loop_normal_editdata_array_init(BMesh *bm,
1838                                                            const bool do_all_loops_of_vert)
1839 {
1840   BMLoop *l;
1841   BMVert *v;
1842   BMIter liter, viter;
1843 
1844   int totloopsel = 0;
1845 
1846   BLI_assert(bm->spacearr_dirty == 0);
1847 
1848   BMLoopNorEditDataArray *lnors_ed_arr = MEM_callocN(sizeof(*lnors_ed_arr), __func__);
1849   lnors_ed_arr->lidx_to_lnor_editdata = MEM_callocN(
1850       sizeof(*lnors_ed_arr->lidx_to_lnor_editdata) * bm->totloop, __func__);
1851 
1852   if (!CustomData_has_layer(&bm->ldata, CD_CUSTOMLOOPNORMAL)) {
1853     BM_data_layer_add(bm, &bm->ldata, CD_CUSTOMLOOPNORMAL);
1854   }
1855   const int cd_custom_normal_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
1856 
1857   BM_mesh_elem_index_ensure(bm, BM_LOOP);
1858 
1859   BLI_bitmap *loops = BLI_BITMAP_NEW(bm->totloop, __func__);
1860 
1861   /* This function define loop normals to edit, based on selection modes and history. */
1862   totloopsel = bm_loop_normal_mark_indiv(bm, loops, do_all_loops_of_vert);
1863 
1864   if (totloopsel) {
1865     BMLoopNorEditData *lnor_ed = lnors_ed_arr->lnor_editdata = MEM_mallocN(
1866         sizeof(*lnor_ed) * totloopsel, __func__);
1867 
1868     BM_ITER_MESH (v, &viter, bm, BM_VERTS_OF_MESH) {
1869       BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
1870         if (BLI_BITMAP_TEST(loops, BM_elem_index_get(l))) {
1871           loop_normal_editdata_init(bm, lnor_ed, v, l, cd_custom_normal_offset);
1872           lnors_ed_arr->lidx_to_lnor_editdata[BM_elem_index_get(l)] = lnor_ed;
1873           lnor_ed++;
1874         }
1875       }
1876     }
1877     lnors_ed_arr->totloop = totloopsel;
1878   }
1879 
1880   MEM_freeN(loops);
1881   lnors_ed_arr->cd_custom_normal_offset = cd_custom_normal_offset;
1882   return lnors_ed_arr;
1883 }
1884 
BM_loop_normal_editdata_array_free(BMLoopNorEditDataArray * lnors_ed_arr)1885 void BM_loop_normal_editdata_array_free(BMLoopNorEditDataArray *lnors_ed_arr)
1886 {
1887   MEM_SAFE_FREE(lnors_ed_arr->lnor_editdata);
1888   MEM_SAFE_FREE(lnors_ed_arr->lidx_to_lnor_editdata);
1889   MEM_freeN(lnors_ed_arr);
1890 }
1891 
BM_custom_loop_normals_to_vector_layer(BMesh * bm)1892 bool BM_custom_loop_normals_to_vector_layer(BMesh *bm)
1893 {
1894   BMFace *f;
1895   BMLoop *l;
1896   BMIter liter, fiter;
1897 
1898   if (!CustomData_has_layer(&bm->ldata, CD_CUSTOMLOOPNORMAL)) {
1899     return false;
1900   }
1901 
1902   BM_lnorspace_update(bm);
1903   BM_mesh_elem_index_ensure(bm, BM_LOOP);
1904 
1905   /* Create a loop normal layer. */
1906   if (!CustomData_has_layer(&bm->ldata, CD_NORMAL)) {
1907     BM_data_layer_add(bm, &bm->ldata, CD_NORMAL);
1908 
1909     CustomData_set_layer_flag(&bm->ldata, CD_NORMAL, CD_FLAG_TEMPORARY);
1910   }
1911 
1912   const int cd_custom_normal_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
1913   const int cd_normal_offset = CustomData_get_offset(&bm->ldata, CD_NORMAL);
1914 
1915   BM_ITER_MESH (f, &fiter, bm, BM_FACES_OF_MESH) {
1916     BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
1917       const int l_index = BM_elem_index_get(l);
1918       const short *clnors_data = BM_ELEM_CD_GET_VOID_P(l, cd_custom_normal_offset);
1919       float *normal = BM_ELEM_CD_GET_VOID_P(l, cd_normal_offset);
1920 
1921       BKE_lnor_space_custom_data_to_normal(
1922           bm->lnor_spacearr->lspacearr[l_index], clnors_data, normal);
1923     }
1924   }
1925 
1926   return true;
1927 }
1928 
BM_custom_loop_normals_from_vector_layer(BMesh * bm,bool add_sharp_edges)1929 void BM_custom_loop_normals_from_vector_layer(BMesh *bm, bool add_sharp_edges)
1930 {
1931   if (!CustomData_has_layer(&bm->ldata, CD_CUSTOMLOOPNORMAL) ||
1932       !CustomData_has_layer(&bm->ldata, CD_NORMAL)) {
1933     return;
1934   }
1935 
1936   const int cd_custom_normal_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
1937   const int cd_normal_offset = CustomData_get_offset(&bm->ldata, CD_NORMAL);
1938 
1939   if (bm->lnor_spacearr == NULL) {
1940     bm->lnor_spacearr = MEM_callocN(sizeof(*bm->lnor_spacearr), __func__);
1941   }
1942 
1943   bm_mesh_loops_custom_normals_set(bm,
1944                                    NULL,
1945                                    NULL,
1946                                    NULL,
1947                                    bm->lnor_spacearr,
1948                                    NULL,
1949                                    cd_custom_normal_offset,
1950                                    NULL,
1951                                    cd_normal_offset,
1952                                    add_sharp_edges);
1953 
1954   bm->spacearr_dirty &= ~(BM_SPACEARR_DIRTY | BM_SPACEARR_DIRTY_ALL);
1955 }
1956 
1957 /**
1958  * \brief BMesh Begin Edit
1959  *
1960  * Functions for setting up a mesh for editing and cleaning up after
1961  * the editing operations are done. These are called by the tools/operator
1962  * API for each time a tool is executed.
1963  */
bmesh_edit_begin(BMesh * UNUSED (bm),BMOpTypeFlag UNUSED (type_flag))1964 void bmesh_edit_begin(BMesh *UNUSED(bm), BMOpTypeFlag UNUSED(type_flag))
1965 {
1966   /* Most operators seem to be using BMO_OPTYPE_FLAG_UNTAN_MULTIRES to change the MDisps to
1967    * absolute space during mesh edits. With this enabled, changes to the topology
1968    * (loop cuts, edge subdivides, etc) are not reflected in the higher levels of
1969    * the mesh at all, which doesn't seem right. Turning off completely for now,
1970    * until this is shown to be better for certain types of mesh edits. */
1971 #ifdef BMOP_UNTAN_MULTIRES_ENABLED
1972   /* switch multires data out of tangent space */
1973   if ((type_flag & BMO_OPTYPE_FLAG_UNTAN_MULTIRES) &&
1974       CustomData_has_layer(&bm->ldata, CD_MDISPS)) {
1975     bmesh_mdisps_space_set(bm, MULTIRES_SPACE_TANGENT, MULTIRES_SPACE_ABSOLUTE);
1976 
1977     /* ensure correct normals, if possible */
1978     bmesh_rationalize_normals(bm, 0);
1979     BM_mesh_normals_update(bm);
1980   }
1981 #endif
1982 }
1983 
1984 /**
1985  * \brief BMesh End Edit
1986  */
bmesh_edit_end(BMesh * bm,BMOpTypeFlag type_flag)1987 void bmesh_edit_end(BMesh *bm, BMOpTypeFlag type_flag)
1988 {
1989   ListBase select_history;
1990 
1991   /* BMO_OPTYPE_FLAG_UNTAN_MULTIRES disabled for now, see comment above in bmesh_edit_begin. */
1992 #ifdef BMOP_UNTAN_MULTIRES_ENABLED
1993   /* switch multires data into tangent space */
1994   if ((flag & BMO_OPTYPE_FLAG_UNTAN_MULTIRES) && CustomData_has_layer(&bm->ldata, CD_MDISPS)) {
1995     /* set normals to their previous winding */
1996     bmesh_rationalize_normals(bm, 1);
1997     bmesh_mdisps_space_set(bm, MULTIRES_SPACE_ABSOLUTE, MULTIRES_SPACE_TANGENT);
1998   }
1999   else if (flag & BMO_OP_FLAG_RATIONALIZE_NORMALS) {
2000     bmesh_rationalize_normals(bm, 1);
2001   }
2002 #endif
2003 
2004   /* compute normals, clear temp flags and flush selections */
2005   if (type_flag & BMO_OPTYPE_FLAG_NORMALS_CALC) {
2006     bm->spacearr_dirty |= BM_SPACEARR_DIRTY_ALL;
2007     BM_mesh_normals_update(bm);
2008   }
2009 
2010   if ((type_flag & BMO_OPTYPE_FLAG_SELECT_VALIDATE) == 0) {
2011     select_history = bm->selected;
2012     BLI_listbase_clear(&bm->selected);
2013   }
2014 
2015   if (type_flag & BMO_OPTYPE_FLAG_SELECT_FLUSH) {
2016     BM_mesh_select_mode_flush(bm);
2017   }
2018 
2019   if ((type_flag & BMO_OPTYPE_FLAG_SELECT_VALIDATE) == 0) {
2020     bm->selected = select_history;
2021   }
2022   if (type_flag & BMO_OPTYPE_FLAG_INVALIDATE_CLNOR_ALL) {
2023     bm->spacearr_dirty |= BM_SPACEARR_DIRTY_ALL;
2024   }
2025 }
2026 
BM_mesh_elem_index_ensure_ex(BMesh * bm,const char htype,int elem_offset[4])2027 void BM_mesh_elem_index_ensure_ex(BMesh *bm, const char htype, int elem_offset[4])
2028 {
2029 
2030 #ifdef DEBUG
2031   BM_ELEM_INDEX_VALIDATE(bm, "Should Never Fail!", __func__);
2032 #endif
2033 
2034   if (elem_offset == NULL) {
2035     /* Simple case. */
2036     const char htype_needed = bm->elem_index_dirty & htype;
2037     if (htype_needed == 0) {
2038       goto finally;
2039     }
2040   }
2041 
2042   if (htype & BM_VERT) {
2043     if ((bm->elem_index_dirty & BM_VERT) || (elem_offset && elem_offset[0])) {
2044       BMIter iter;
2045       BMElem *ele;
2046 
2047       int index = elem_offset ? elem_offset[0] : 0;
2048       BM_ITER_MESH (ele, &iter, bm, BM_VERTS_OF_MESH) {
2049         BM_elem_index_set(ele, index++); /* set_ok */
2050       }
2051       BLI_assert(elem_offset || index == bm->totvert);
2052     }
2053     else {
2054       // printf("%s: skipping vert index calc!\n", __func__);
2055     }
2056   }
2057 
2058   if (htype & BM_EDGE) {
2059     if ((bm->elem_index_dirty & BM_EDGE) || (elem_offset && elem_offset[1])) {
2060       BMIter iter;
2061       BMElem *ele;
2062 
2063       int index = elem_offset ? elem_offset[1] : 0;
2064       BM_ITER_MESH (ele, &iter, bm, BM_EDGES_OF_MESH) {
2065         BM_elem_index_set(ele, index++); /* set_ok */
2066       }
2067       BLI_assert(elem_offset || index == bm->totedge);
2068     }
2069     else {
2070       // printf("%s: skipping edge index calc!\n", __func__);
2071     }
2072   }
2073 
2074   if (htype & (BM_FACE | BM_LOOP)) {
2075     if ((bm->elem_index_dirty & (BM_FACE | BM_LOOP)) ||
2076         (elem_offset && (elem_offset[2] || elem_offset[3]))) {
2077       BMIter iter;
2078       BMElem *ele;
2079 
2080       const bool update_face = (htype & BM_FACE) && (bm->elem_index_dirty & BM_FACE);
2081       const bool update_loop = (htype & BM_LOOP) && (bm->elem_index_dirty & BM_LOOP);
2082 
2083       int index_loop = elem_offset ? elem_offset[2] : 0;
2084       int index = elem_offset ? elem_offset[3] : 0;
2085 
2086       BM_ITER_MESH (ele, &iter, bm, BM_FACES_OF_MESH) {
2087         if (update_face) {
2088           BM_elem_index_set(ele, index++); /* set_ok */
2089         }
2090 
2091         if (update_loop) {
2092           BMLoop *l_iter, *l_first;
2093 
2094           l_iter = l_first = BM_FACE_FIRST_LOOP((BMFace *)ele);
2095           do {
2096             BM_elem_index_set(l_iter, index_loop++); /* set_ok */
2097           } while ((l_iter = l_iter->next) != l_first);
2098         }
2099       }
2100 
2101       BLI_assert(elem_offset || !update_face || index == bm->totface);
2102       if (update_loop) {
2103         BLI_assert(elem_offset || !update_loop || index_loop == bm->totloop);
2104       }
2105     }
2106     else {
2107       // printf("%s: skipping face/loop index calc!\n", __func__);
2108     }
2109   }
2110 
2111 finally:
2112   bm->elem_index_dirty &= ~htype;
2113   if (elem_offset) {
2114     if (htype & BM_VERT) {
2115       elem_offset[0] += bm->totvert;
2116       if (elem_offset[0] != bm->totvert) {
2117         bm->elem_index_dirty |= BM_VERT;
2118       }
2119     }
2120     if (htype & BM_EDGE) {
2121       elem_offset[1] += bm->totedge;
2122       if (elem_offset[1] != bm->totedge) {
2123         bm->elem_index_dirty |= BM_EDGE;
2124       }
2125     }
2126     if (htype & BM_LOOP) {
2127       elem_offset[2] += bm->totloop;
2128       if (elem_offset[2] != bm->totloop) {
2129         bm->elem_index_dirty |= BM_LOOP;
2130       }
2131     }
2132     if (htype & BM_FACE) {
2133       elem_offset[3] += bm->totface;
2134       if (elem_offset[3] != bm->totface) {
2135         bm->elem_index_dirty |= BM_FACE;
2136       }
2137     }
2138   }
2139 }
2140 
BM_mesh_elem_index_ensure(BMesh * bm,const char htype)2141 void BM_mesh_elem_index_ensure(BMesh *bm, const char htype)
2142 {
2143   BM_mesh_elem_index_ensure_ex(bm, htype, NULL);
2144 }
2145 
2146 /**
2147  * Array checking/setting macros
2148  *
2149  * Currently vert/edge/loop/face index data is being abused, in a few areas of the code.
2150  *
2151  * To avoid correcting them afterwards, set 'bm->elem_index_dirty' however its possible
2152  * this flag is set incorrectly which could crash blender.
2153  *
2154  * Code that calls this functions may depend on dirty indices on being set.
2155  * Keep this function read-only.
2156  */
2157 
BM_mesh_elem_index_validate(BMesh * bm,const char * location,const char * func,const char * msg_a,const char * msg_b)2158 void BM_mesh_elem_index_validate(
2159     BMesh *bm, const char *location, const char *func, const char *msg_a, const char *msg_b)
2160 {
2161   const char iter_types[3] = {BM_VERTS_OF_MESH, BM_EDGES_OF_MESH, BM_FACES_OF_MESH};
2162 
2163   const char flag_types[3] = {BM_VERT, BM_EDGE, BM_FACE};
2164   const char *type_names[3] = {"vert", "edge", "face"};
2165 
2166   BMIter iter;
2167   BMElem *ele;
2168   int i;
2169   bool is_any_error = 0;
2170 
2171   for (i = 0; i < 3; i++) {
2172     const bool is_dirty = (flag_types[i] & bm->elem_index_dirty) != 0;
2173     int index = 0;
2174     bool is_error = false;
2175     int err_val = 0;
2176     int err_idx = 0;
2177 
2178     BM_ITER_MESH (ele, &iter, bm, iter_types[i]) {
2179       if (!is_dirty) {
2180         if (BM_elem_index_get(ele) != index) {
2181           err_val = BM_elem_index_get(ele);
2182           err_idx = index;
2183           is_error = true;
2184           break;
2185         }
2186       }
2187       index++;
2188     }
2189 
2190     if ((is_error == true) && (is_dirty == false)) {
2191       is_any_error = true;
2192       fprintf(stderr,
2193               "Invalid Index: at %s, %s, %s[%d] invalid index %d, '%s', '%s'\n",
2194               location,
2195               func,
2196               type_names[i],
2197               err_idx,
2198               err_val,
2199               msg_a,
2200               msg_b);
2201     }
2202     else if ((is_error == false) && (is_dirty == true)) {
2203 
2204 #if 0 /* mostly annoying */
2205 
2206       /* dirty may have been incorrectly set */
2207       fprintf(stderr,
2208               "Invalid Dirty: at %s, %s (%s), dirty flag was set but all index values are "
2209               "correct, '%s', '%s'\n",
2210               location,
2211               func,
2212               type_names[i],
2213               msg_a,
2214               msg_b);
2215 #endif
2216     }
2217   }
2218 
2219 #if 0 /* mostly annoying, even in debug mode */
2220 #  ifdef DEBUG
2221   if (is_any_error == 0) {
2222     fprintf(stderr, "Valid Index Success: at %s, %s, '%s', '%s'\n", location, func, msg_a, msg_b);
2223   }
2224 #  endif
2225 #endif
2226   (void)is_any_error; /* shut up the compiler */
2227 }
2228 
2229 /* debug check only - no need to optimize */
2230 #ifndef NDEBUG
BM_mesh_elem_table_check(BMesh * bm)2231 bool BM_mesh_elem_table_check(BMesh *bm)
2232 {
2233   BMIter iter;
2234   BMElem *ele;
2235   int i;
2236 
2237   if (bm->vtable && ((bm->elem_table_dirty & BM_VERT) == 0)) {
2238     BM_ITER_MESH_INDEX (ele, &iter, bm, BM_VERTS_OF_MESH, i) {
2239       if (ele != (BMElem *)bm->vtable[i]) {
2240         return false;
2241       }
2242     }
2243   }
2244 
2245   if (bm->etable && ((bm->elem_table_dirty & BM_EDGE) == 0)) {
2246     BM_ITER_MESH_INDEX (ele, &iter, bm, BM_EDGES_OF_MESH, i) {
2247       if (ele != (BMElem *)bm->etable[i]) {
2248         return false;
2249       }
2250     }
2251   }
2252 
2253   if (bm->ftable && ((bm->elem_table_dirty & BM_FACE) == 0)) {
2254     BM_ITER_MESH_INDEX (ele, &iter, bm, BM_FACES_OF_MESH, i) {
2255       if (ele != (BMElem *)bm->ftable[i]) {
2256         return false;
2257       }
2258     }
2259   }
2260 
2261   return true;
2262 }
2263 #endif
2264 
BM_mesh_elem_table_ensure(BMesh * bm,const char htype)2265 void BM_mesh_elem_table_ensure(BMesh *bm, const char htype)
2266 {
2267   /* assume if the array is non-null then its valid and no need to recalc */
2268   const char htype_needed =
2269       (((bm->vtable && ((bm->elem_table_dirty & BM_VERT) == 0)) ? 0 : BM_VERT) |
2270        ((bm->etable && ((bm->elem_table_dirty & BM_EDGE) == 0)) ? 0 : BM_EDGE) |
2271        ((bm->ftable && ((bm->elem_table_dirty & BM_FACE) == 0)) ? 0 : BM_FACE)) &
2272       htype;
2273 
2274   BLI_assert((htype & ~BM_ALL_NOLOOP) == 0);
2275 
2276   /* in debug mode double check we didn't need to recalculate */
2277   BLI_assert(BM_mesh_elem_table_check(bm) == true);
2278 
2279   if (htype_needed == 0) {
2280     goto finally;
2281   }
2282 
2283   if (htype_needed & BM_VERT) {
2284     if (bm->vtable && bm->totvert <= bm->vtable_tot && bm->totvert * 2 >= bm->vtable_tot) {
2285       /* pass (re-use the array) */
2286     }
2287     else {
2288       if (bm->vtable) {
2289         MEM_freeN(bm->vtable);
2290       }
2291       bm->vtable = MEM_mallocN(sizeof(void **) * bm->totvert, "bm->vtable");
2292       bm->vtable_tot = bm->totvert;
2293     }
2294   }
2295   if (htype_needed & BM_EDGE) {
2296     if (bm->etable && bm->totedge <= bm->etable_tot && bm->totedge * 2 >= bm->etable_tot) {
2297       /* pass (re-use the array) */
2298     }
2299     else {
2300       if (bm->etable) {
2301         MEM_freeN(bm->etable);
2302       }
2303       bm->etable = MEM_mallocN(sizeof(void **) * bm->totedge, "bm->etable");
2304       bm->etable_tot = bm->totedge;
2305     }
2306   }
2307   if (htype_needed & BM_FACE) {
2308     if (bm->ftable && bm->totface <= bm->ftable_tot && bm->totface * 2 >= bm->ftable_tot) {
2309       /* pass (re-use the array) */
2310     }
2311     else {
2312       if (bm->ftable) {
2313         MEM_freeN(bm->ftable);
2314       }
2315       bm->ftable = MEM_mallocN(sizeof(void **) * bm->totface, "bm->ftable");
2316       bm->ftable_tot = bm->totface;
2317     }
2318   }
2319 
2320   if (htype_needed & BM_VERT) {
2321     BM_iter_as_array(bm, BM_VERTS_OF_MESH, NULL, (void **)bm->vtable, bm->totvert);
2322   }
2323 
2324   if (htype_needed & BM_EDGE) {
2325     BM_iter_as_array(bm, BM_EDGES_OF_MESH, NULL, (void **)bm->etable, bm->totedge);
2326   }
2327 
2328   if (htype_needed & BM_FACE) {
2329     BM_iter_as_array(bm, BM_FACES_OF_MESH, NULL, (void **)bm->ftable, bm->totface);
2330   }
2331 
2332 finally:
2333   /* Only clear dirty flags when all the pointers and data are actually valid.
2334    * This prevents possible threading issues when dirty flag check failed but
2335    * data wasn't ready still.
2336    */
2337   bm->elem_table_dirty &= ~htype_needed;
2338 }
2339 
2340 /* use BM_mesh_elem_table_ensure where possible to avoid full rebuild */
BM_mesh_elem_table_init(BMesh * bm,const char htype)2341 void BM_mesh_elem_table_init(BMesh *bm, const char htype)
2342 {
2343   BLI_assert((htype & ~BM_ALL_NOLOOP) == 0);
2344 
2345   /* force recalc */
2346   BM_mesh_elem_table_free(bm, BM_ALL_NOLOOP);
2347   BM_mesh_elem_table_ensure(bm, htype);
2348 }
2349 
BM_mesh_elem_table_free(BMesh * bm,const char htype)2350 void BM_mesh_elem_table_free(BMesh *bm, const char htype)
2351 {
2352   if (htype & BM_VERT) {
2353     MEM_SAFE_FREE(bm->vtable);
2354   }
2355 
2356   if (htype & BM_EDGE) {
2357     MEM_SAFE_FREE(bm->etable);
2358   }
2359 
2360   if (htype & BM_FACE) {
2361     MEM_SAFE_FREE(bm->ftable);
2362   }
2363 }
2364 
BM_vert_at_index_find(BMesh * bm,const int index)2365 BMVert *BM_vert_at_index_find(BMesh *bm, const int index)
2366 {
2367   return BLI_mempool_findelem(bm->vpool, index);
2368 }
2369 
BM_edge_at_index_find(BMesh * bm,const int index)2370 BMEdge *BM_edge_at_index_find(BMesh *bm, const int index)
2371 {
2372   return BLI_mempool_findelem(bm->epool, index);
2373 }
2374 
BM_face_at_index_find(BMesh * bm,const int index)2375 BMFace *BM_face_at_index_find(BMesh *bm, const int index)
2376 {
2377   return BLI_mempool_findelem(bm->fpool, index);
2378 }
2379 
BM_loop_at_index_find(BMesh * bm,const int index)2380 BMLoop *BM_loop_at_index_find(BMesh *bm, const int index)
2381 {
2382   BMIter iter;
2383   BMFace *f;
2384   int i = index;
2385   BM_ITER_MESH (f, &iter, bm, BM_FACES_OF_MESH) {
2386     if (i < f->len) {
2387       BMLoop *l_first, *l_iter;
2388       l_iter = l_first = BM_FACE_FIRST_LOOP(f);
2389       do {
2390         if (i == 0) {
2391           return l_iter;
2392         }
2393         i -= 1;
2394       } while ((l_iter = l_iter->next) != l_first);
2395     }
2396     i -= f->len;
2397   }
2398   return NULL;
2399 }
2400 
2401 /**
2402  * Use lookup table when available, else use slower find functions.
2403  *
2404  * \note Try to use #BM_mesh_elem_table_ensure instead.
2405  */
BM_vert_at_index_find_or_table(BMesh * bm,const int index)2406 BMVert *BM_vert_at_index_find_or_table(BMesh *bm, const int index)
2407 {
2408   if ((bm->elem_table_dirty & BM_VERT) == 0) {
2409     return (index < bm->totvert) ? bm->vtable[index] : NULL;
2410   }
2411   return BM_vert_at_index_find(bm, index);
2412 }
2413 
BM_edge_at_index_find_or_table(BMesh * bm,const int index)2414 BMEdge *BM_edge_at_index_find_or_table(BMesh *bm, const int index)
2415 {
2416   if ((bm->elem_table_dirty & BM_EDGE) == 0) {
2417     return (index < bm->totedge) ? bm->etable[index] : NULL;
2418   }
2419   return BM_edge_at_index_find(bm, index);
2420 }
2421 
BM_face_at_index_find_or_table(BMesh * bm,const int index)2422 BMFace *BM_face_at_index_find_or_table(BMesh *bm, const int index)
2423 {
2424   if ((bm->elem_table_dirty & BM_FACE) == 0) {
2425     return (index < bm->totface) ? bm->ftable[index] : NULL;
2426   }
2427   return BM_face_at_index_find(bm, index);
2428 }
2429 
2430 /**
2431  * Return the amount of element of type 'type' in a given bmesh.
2432  */
BM_mesh_elem_count(BMesh * bm,const char htype)2433 int BM_mesh_elem_count(BMesh *bm, const char htype)
2434 {
2435   BLI_assert((htype & ~BM_ALL_NOLOOP) == 0);
2436 
2437   switch (htype) {
2438     case BM_VERT:
2439       return bm->totvert;
2440     case BM_EDGE:
2441       return bm->totedge;
2442     case BM_FACE:
2443       return bm->totface;
2444     default: {
2445       BLI_assert(0);
2446       return 0;
2447     }
2448   }
2449 }
2450 
2451 /**
2452  * Remaps the vertices, edges and/or faces of the bmesh as indicated by vert/edge/face_idx arrays
2453  * (xxx_idx[org_index] = new_index).
2454  *
2455  * A NULL array means no changes.
2456  *
2457  * \note
2458  * - Does not mess with indices, just sets elem_index_dirty flag.
2459  * - For verts/edges/faces only (as loops must remain "ordered" and "aligned"
2460  *   on a per-face basis...).
2461  *
2462  * \warning Be careful if you keep pointers to affected BM elements,
2463  * or arrays, when using this func!
2464  */
BM_mesh_remap(BMesh * bm,const uint * vert_idx,const uint * edge_idx,const uint * face_idx)2465 void BM_mesh_remap(BMesh *bm, const uint *vert_idx, const uint *edge_idx, const uint *face_idx)
2466 {
2467   /* Mapping old to new pointers. */
2468   GHash *vptr_map = NULL, *eptr_map = NULL, *fptr_map = NULL;
2469   BMIter iter, iterl;
2470   BMVert *ve;
2471   BMEdge *ed;
2472   BMFace *fa;
2473   BMLoop *lo;
2474 
2475   if (!(vert_idx || edge_idx || face_idx)) {
2476     return;
2477   }
2478 
2479   BM_mesh_elem_table_ensure(
2480       bm, (vert_idx ? BM_VERT : 0) | (edge_idx ? BM_EDGE : 0) | (face_idx ? BM_FACE : 0));
2481 
2482   /* Remap Verts */
2483   if (vert_idx) {
2484     BMVert **verts_pool, *verts_copy, **vep;
2485     int i, totvert = bm->totvert;
2486     const uint *new_idx;
2487     /* Special case: Python uses custom - data layers to hold PyObject references.
2488      * These have to be kept in - place, else the PyObject's we point to, wont point back to us. */
2489     const int cd_vert_pyptr = CustomData_get_offset(&bm->vdata, CD_BM_ELEM_PYPTR);
2490 
2491     /* Init the old-to-new vert pointers mapping */
2492     vptr_map = BLI_ghash_ptr_new_ex("BM_mesh_remap vert pointers mapping", bm->totvert);
2493 
2494     /* Make a copy of all vertices. */
2495     verts_pool = bm->vtable;
2496     verts_copy = MEM_mallocN(sizeof(BMVert) * totvert, "BM_mesh_remap verts copy");
2497     void **pyptrs = (cd_vert_pyptr != -1) ? MEM_mallocN(sizeof(void *) * totvert, __func__) : NULL;
2498     for (i = totvert, ve = verts_copy + totvert - 1, vep = verts_pool + totvert - 1; i--;
2499          ve--, vep--) {
2500       *ve = **vep;
2501       /*          printf("*vep: %p, verts_pool[%d]: %p\n", *vep, i, verts_pool[i]);*/
2502       if (cd_vert_pyptr != -1) {
2503         void **pyptr = BM_ELEM_CD_GET_VOID_P(((BMElem *)ve), cd_vert_pyptr);
2504         pyptrs[i] = *pyptr;
2505       }
2506     }
2507 
2508     /* Copy back verts to their new place, and update old2new pointers mapping. */
2509     new_idx = vert_idx + totvert - 1;
2510     ve = verts_copy + totvert - 1;
2511     vep = verts_pool + totvert - 1; /* old, org pointer */
2512     for (i = totvert; i--; new_idx--, ve--, vep--) {
2513       BMVert *new_vep = verts_pool[*new_idx];
2514       *new_vep = *ve;
2515 #if 0
2516       printf(
2517           "mapping vert from %d to %d (%p/%p to %p)\n", i, *new_idx, *vep, verts_pool[i], new_vep);
2518 #endif
2519       BLI_ghash_insert(vptr_map, *vep, new_vep);
2520       if (cd_vert_pyptr != -1) {
2521         void **pyptr = BM_ELEM_CD_GET_VOID_P(((BMElem *)new_vep), cd_vert_pyptr);
2522         *pyptr = pyptrs[*new_idx];
2523       }
2524     }
2525     bm->elem_index_dirty |= BM_VERT;
2526     bm->elem_table_dirty |= BM_VERT;
2527 
2528     MEM_freeN(verts_copy);
2529     if (pyptrs) {
2530       MEM_freeN(pyptrs);
2531     }
2532   }
2533 
2534   /* Remap Edges */
2535   if (edge_idx) {
2536     BMEdge **edges_pool, *edges_copy, **edp;
2537     int i, totedge = bm->totedge;
2538     const uint *new_idx;
2539     /* Special case: Python uses custom - data layers to hold PyObject references.
2540      * These have to be kept in - place, else the PyObject's we point to, wont point back to us. */
2541     const int cd_edge_pyptr = CustomData_get_offset(&bm->edata, CD_BM_ELEM_PYPTR);
2542 
2543     /* Init the old-to-new vert pointers mapping */
2544     eptr_map = BLI_ghash_ptr_new_ex("BM_mesh_remap edge pointers mapping", bm->totedge);
2545 
2546     /* Make a copy of all vertices. */
2547     edges_pool = bm->etable;
2548     edges_copy = MEM_mallocN(sizeof(BMEdge) * totedge, "BM_mesh_remap edges copy");
2549     void **pyptrs = (cd_edge_pyptr != -1) ? MEM_mallocN(sizeof(void *) * totedge, __func__) : NULL;
2550     for (i = totedge, ed = edges_copy + totedge - 1, edp = edges_pool + totedge - 1; i--;
2551          ed--, edp--) {
2552       *ed = **edp;
2553       if (cd_edge_pyptr != -1) {
2554         void **pyptr = BM_ELEM_CD_GET_VOID_P(((BMElem *)ed), cd_edge_pyptr);
2555         pyptrs[i] = *pyptr;
2556       }
2557     }
2558 
2559     /* Copy back verts to their new place, and update old2new pointers mapping. */
2560     new_idx = edge_idx + totedge - 1;
2561     ed = edges_copy + totedge - 1;
2562     edp = edges_pool + totedge - 1; /* old, org pointer */
2563     for (i = totedge; i--; new_idx--, ed--, edp--) {
2564       BMEdge *new_edp = edges_pool[*new_idx];
2565       *new_edp = *ed;
2566       BLI_ghash_insert(eptr_map, *edp, new_edp);
2567 #if 0
2568       printf(
2569           "mapping edge from %d to %d (%p/%p to %p)\n", i, *new_idx, *edp, edges_pool[i], new_edp);
2570 #endif
2571       if (cd_edge_pyptr != -1) {
2572         void **pyptr = BM_ELEM_CD_GET_VOID_P(((BMElem *)new_edp), cd_edge_pyptr);
2573         *pyptr = pyptrs[*new_idx];
2574       }
2575     }
2576     bm->elem_index_dirty |= BM_EDGE;
2577     bm->elem_table_dirty |= BM_EDGE;
2578 
2579     MEM_freeN(edges_copy);
2580     if (pyptrs) {
2581       MEM_freeN(pyptrs);
2582     }
2583   }
2584 
2585   /* Remap Faces */
2586   if (face_idx) {
2587     BMFace **faces_pool, *faces_copy, **fap;
2588     int i, totface = bm->totface;
2589     const uint *new_idx;
2590     /* Special case: Python uses custom - data layers to hold PyObject references.
2591      * These have to be kept in - place, else the PyObject's we point to, wont point back to us. */
2592     const int cd_poly_pyptr = CustomData_get_offset(&bm->pdata, CD_BM_ELEM_PYPTR);
2593 
2594     /* Init the old-to-new vert pointers mapping */
2595     fptr_map = BLI_ghash_ptr_new_ex("BM_mesh_remap face pointers mapping", bm->totface);
2596 
2597     /* Make a copy of all vertices. */
2598     faces_pool = bm->ftable;
2599     faces_copy = MEM_mallocN(sizeof(BMFace) * totface, "BM_mesh_remap faces copy");
2600     void **pyptrs = (cd_poly_pyptr != -1) ? MEM_mallocN(sizeof(void *) * totface, __func__) : NULL;
2601     for (i = totface, fa = faces_copy + totface - 1, fap = faces_pool + totface - 1; i--;
2602          fa--, fap--) {
2603       *fa = **fap;
2604       if (cd_poly_pyptr != -1) {
2605         void **pyptr = BM_ELEM_CD_GET_VOID_P(((BMElem *)fa), cd_poly_pyptr);
2606         pyptrs[i] = *pyptr;
2607       }
2608     }
2609 
2610     /* Copy back verts to their new place, and update old2new pointers mapping. */
2611     new_idx = face_idx + totface - 1;
2612     fa = faces_copy + totface - 1;
2613     fap = faces_pool + totface - 1; /* old, org pointer */
2614     for (i = totface; i--; new_idx--, fa--, fap--) {
2615       BMFace *new_fap = faces_pool[*new_idx];
2616       *new_fap = *fa;
2617       BLI_ghash_insert(fptr_map, *fap, new_fap);
2618       if (cd_poly_pyptr != -1) {
2619         void **pyptr = BM_ELEM_CD_GET_VOID_P(((BMElem *)new_fap), cd_poly_pyptr);
2620         *pyptr = pyptrs[*new_idx];
2621       }
2622     }
2623 
2624     bm->elem_index_dirty |= BM_FACE | BM_LOOP;
2625     bm->elem_table_dirty |= BM_FACE;
2626 
2627     MEM_freeN(faces_copy);
2628     if (pyptrs) {
2629       MEM_freeN(pyptrs);
2630     }
2631   }
2632 
2633   /* And now, fix all vertices/edges/faces/loops pointers! */
2634   /* Verts' pointers, only edge pointers... */
2635   if (eptr_map) {
2636     BM_ITER_MESH (ve, &iter, bm, BM_VERTS_OF_MESH) {
2637       /*          printf("Vert e: %p -> %p\n", ve->e, BLI_ghash_lookup(eptr_map, ve->e));*/
2638       if (ve->e) {
2639         ve->e = BLI_ghash_lookup(eptr_map, ve->e);
2640         BLI_assert(ve->e);
2641       }
2642     }
2643   }
2644 
2645   /* Edges' pointers, only vert pointers (as we don't mess with loops!),
2646    * and - ack! - edge pointers,
2647    * as we have to handle disklinks... */
2648   if (vptr_map || eptr_map) {
2649     BM_ITER_MESH (ed, &iter, bm, BM_EDGES_OF_MESH) {
2650       if (vptr_map) {
2651         /* printf("Edge v1: %p -> %p\n", ed->v1, BLI_ghash_lookup(vptr_map, ed->v1));*/
2652         /* printf("Edge v2: %p -> %p\n", ed->v2, BLI_ghash_lookup(vptr_map, ed->v2));*/
2653         ed->v1 = BLI_ghash_lookup(vptr_map, ed->v1);
2654         ed->v2 = BLI_ghash_lookup(vptr_map, ed->v2);
2655         BLI_assert(ed->v1);
2656         BLI_assert(ed->v2);
2657       }
2658       if (eptr_map) {
2659         /* printf("Edge v1_disk_link prev: %p -> %p\n", ed->v1_disk_link.prev,*/
2660         /*        BLI_ghash_lookup(eptr_map, ed->v1_disk_link.prev));*/
2661         /* printf("Edge v1_disk_link next: %p -> %p\n", ed->v1_disk_link.next,*/
2662         /*        BLI_ghash_lookup(eptr_map, ed->v1_disk_link.next));*/
2663         /* printf("Edge v2_disk_link prev: %p -> %p\n", ed->v2_disk_link.prev,*/
2664         /*        BLI_ghash_lookup(eptr_map, ed->v2_disk_link.prev));*/
2665         /* printf("Edge v2_disk_link next: %p -> %p\n", ed->v2_disk_link.next,*/
2666         /*        BLI_ghash_lookup(eptr_map, ed->v2_disk_link.next));*/
2667         ed->v1_disk_link.prev = BLI_ghash_lookup(eptr_map, ed->v1_disk_link.prev);
2668         ed->v1_disk_link.next = BLI_ghash_lookup(eptr_map, ed->v1_disk_link.next);
2669         ed->v2_disk_link.prev = BLI_ghash_lookup(eptr_map, ed->v2_disk_link.prev);
2670         ed->v2_disk_link.next = BLI_ghash_lookup(eptr_map, ed->v2_disk_link.next);
2671         BLI_assert(ed->v1_disk_link.prev);
2672         BLI_assert(ed->v1_disk_link.next);
2673         BLI_assert(ed->v2_disk_link.prev);
2674         BLI_assert(ed->v2_disk_link.next);
2675       }
2676     }
2677   }
2678 
2679   /* Faces' pointers (loops, in fact), always needed... */
2680   BM_ITER_MESH (fa, &iter, bm, BM_FACES_OF_MESH) {
2681     BM_ITER_ELEM (lo, &iterl, fa, BM_LOOPS_OF_FACE) {
2682       if (vptr_map) {
2683         /*              printf("Loop v: %p -> %p\n", lo->v, BLI_ghash_lookup(vptr_map, lo->v));*/
2684         lo->v = BLI_ghash_lookup(vptr_map, lo->v);
2685         BLI_assert(lo->v);
2686       }
2687       if (eptr_map) {
2688         /*              printf("Loop e: %p -> %p\n", lo->e, BLI_ghash_lookup(eptr_map, lo->e));*/
2689         lo->e = BLI_ghash_lookup(eptr_map, lo->e);
2690         BLI_assert(lo->e);
2691       }
2692       if (fptr_map) {
2693         /*              printf("Loop f: %p -> %p\n", lo->f, BLI_ghash_lookup(fptr_map, lo->f));*/
2694         lo->f = BLI_ghash_lookup(fptr_map, lo->f);
2695         BLI_assert(lo->f);
2696       }
2697     }
2698   }
2699 
2700   /* Selection history */
2701   {
2702     BMEditSelection *ese;
2703     for (ese = bm->selected.first; ese; ese = ese->next) {
2704       switch (ese->htype) {
2705         case BM_VERT:
2706           if (vptr_map) {
2707             ese->ele = BLI_ghash_lookup(vptr_map, ese->ele);
2708             BLI_assert(ese->ele);
2709           }
2710           break;
2711         case BM_EDGE:
2712           if (eptr_map) {
2713             ese->ele = BLI_ghash_lookup(eptr_map, ese->ele);
2714             BLI_assert(ese->ele);
2715           }
2716           break;
2717         case BM_FACE:
2718           if (fptr_map) {
2719             ese->ele = BLI_ghash_lookup(fptr_map, ese->ele);
2720             BLI_assert(ese->ele);
2721           }
2722           break;
2723       }
2724     }
2725   }
2726 
2727   if (fptr_map) {
2728     if (bm->act_face) {
2729       bm->act_face = BLI_ghash_lookup(fptr_map, bm->act_face);
2730       BLI_assert(bm->act_face);
2731     }
2732   }
2733 
2734   if (vptr_map) {
2735     BLI_ghash_free(vptr_map, NULL, NULL);
2736   }
2737   if (eptr_map) {
2738     BLI_ghash_free(eptr_map, NULL, NULL);
2739   }
2740   if (fptr_map) {
2741     BLI_ghash_free(fptr_map, NULL, NULL);
2742   }
2743 }
2744 
2745 /**
2746  * Use new memory pools for this mesh.
2747  *
2748  * \note needed for re-sizing elements (adding/removing tool flags)
2749  * but could also be used for packing fragmented bmeshes.
2750  */
BM_mesh_rebuild(BMesh * bm,const struct BMeshCreateParams * params,BLI_mempool * vpool_dst,BLI_mempool * epool_dst,BLI_mempool * lpool_dst,BLI_mempool * fpool_dst)2751 void BM_mesh_rebuild(BMesh *bm,
2752                      const struct BMeshCreateParams *params,
2753                      BLI_mempool *vpool_dst,
2754                      BLI_mempool *epool_dst,
2755                      BLI_mempool *lpool_dst,
2756                      BLI_mempool *fpool_dst)
2757 {
2758   const char remap = (vpool_dst ? BM_VERT : 0) | (epool_dst ? BM_EDGE : 0) |
2759                      (lpool_dst ? BM_LOOP : 0) | (fpool_dst ? BM_FACE : 0);
2760 
2761   BMVert **vtable_dst = (remap & BM_VERT) ? MEM_mallocN(bm->totvert * sizeof(BMVert *), __func__) :
2762                                             NULL;
2763   BMEdge **etable_dst = (remap & BM_EDGE) ? MEM_mallocN(bm->totedge * sizeof(BMEdge *), __func__) :
2764                                             NULL;
2765   BMLoop **ltable_dst = (remap & BM_LOOP) ? MEM_mallocN(bm->totloop * sizeof(BMLoop *), __func__) :
2766                                             NULL;
2767   BMFace **ftable_dst = (remap & BM_FACE) ? MEM_mallocN(bm->totface * sizeof(BMFace *), __func__) :
2768                                             NULL;
2769 
2770   const bool use_toolflags = params->use_toolflags;
2771 
2772   if (remap & BM_VERT) {
2773     BMIter iter;
2774     int index;
2775     BMVert *v_src;
2776     BM_ITER_MESH_INDEX (v_src, &iter, bm, BM_VERTS_OF_MESH, index) {
2777       BMVert *v_dst = BLI_mempool_alloc(vpool_dst);
2778       memcpy(v_dst, v_src, sizeof(BMVert));
2779       if (use_toolflags) {
2780         ((BMVert_OFlag *)v_dst)->oflags = bm->vtoolflagpool ?
2781                                               BLI_mempool_calloc(bm->vtoolflagpool) :
2782                                               NULL;
2783       }
2784 
2785       vtable_dst[index] = v_dst;
2786       BM_elem_index_set(v_src, index); /* set_ok */
2787     }
2788   }
2789 
2790   if (remap & BM_EDGE) {
2791     BMIter iter;
2792     int index;
2793     BMEdge *e_src;
2794     BM_ITER_MESH_INDEX (e_src, &iter, bm, BM_EDGES_OF_MESH, index) {
2795       BMEdge *e_dst = BLI_mempool_alloc(epool_dst);
2796       memcpy(e_dst, e_src, sizeof(BMEdge));
2797       if (use_toolflags) {
2798         ((BMEdge_OFlag *)e_dst)->oflags = bm->etoolflagpool ?
2799                                               BLI_mempool_calloc(bm->etoolflagpool) :
2800                                               NULL;
2801       }
2802 
2803       etable_dst[index] = e_dst;
2804       BM_elem_index_set(e_src, index); /* set_ok */
2805     }
2806   }
2807 
2808   if (remap & (BM_LOOP | BM_FACE)) {
2809     BMIter iter;
2810     int index, index_loop = 0;
2811     BMFace *f_src;
2812     BM_ITER_MESH_INDEX (f_src, &iter, bm, BM_FACES_OF_MESH, index) {
2813 
2814       if (remap & BM_FACE) {
2815         BMFace *f_dst = BLI_mempool_alloc(fpool_dst);
2816         memcpy(f_dst, f_src, sizeof(BMFace));
2817         if (use_toolflags) {
2818           ((BMFace_OFlag *)f_dst)->oflags = bm->ftoolflagpool ?
2819                                                 BLI_mempool_calloc(bm->ftoolflagpool) :
2820                                                 NULL;
2821         }
2822 
2823         ftable_dst[index] = f_dst;
2824         BM_elem_index_set(f_src, index); /* set_ok */
2825       }
2826 
2827       /* handle loops */
2828       if (remap & BM_LOOP) {
2829         BMLoop *l_iter_src, *l_first_src;
2830         l_iter_src = l_first_src = BM_FACE_FIRST_LOOP((BMFace *)f_src);
2831         do {
2832           BMLoop *l_dst = BLI_mempool_alloc(lpool_dst);
2833           memcpy(l_dst, l_iter_src, sizeof(BMLoop));
2834           ltable_dst[index_loop] = l_dst;
2835           BM_elem_index_set(l_iter_src, index_loop++); /* set_ok */
2836         } while ((l_iter_src = l_iter_src->next) != l_first_src);
2837       }
2838     }
2839   }
2840 
2841 #define MAP_VERT(ele) vtable_dst[BM_elem_index_get(ele)]
2842 #define MAP_EDGE(ele) etable_dst[BM_elem_index_get(ele)]
2843 #define MAP_LOOP(ele) ltable_dst[BM_elem_index_get(ele)]
2844 #define MAP_FACE(ele) ftable_dst[BM_elem_index_get(ele)]
2845 
2846 #define REMAP_VERT(ele) \
2847   { \
2848     if (remap & BM_VERT) { \
2849       ele = MAP_VERT(ele); \
2850     } \
2851   } \
2852   ((void)0)
2853 #define REMAP_EDGE(ele) \
2854   { \
2855     if (remap & BM_EDGE) { \
2856       ele = MAP_EDGE(ele); \
2857     } \
2858   } \
2859   ((void)0)
2860 #define REMAP_LOOP(ele) \
2861   { \
2862     if (remap & BM_LOOP) { \
2863       ele = MAP_LOOP(ele); \
2864     } \
2865   } \
2866   ((void)0)
2867 #define REMAP_FACE(ele) \
2868   { \
2869     if (remap & BM_FACE) { \
2870       ele = MAP_FACE(ele); \
2871     } \
2872   } \
2873   ((void)0)
2874 
2875   /* verts */
2876   {
2877     for (int i = 0; i < bm->totvert; i++) {
2878       BMVert *v = vtable_dst[i];
2879       if (v->e) {
2880         REMAP_EDGE(v->e);
2881       }
2882     }
2883   }
2884 
2885   /* edges */
2886   {
2887     for (int i = 0; i < bm->totedge; i++) {
2888       BMEdge *e = etable_dst[i];
2889       REMAP_VERT(e->v1);
2890       REMAP_VERT(e->v2);
2891       REMAP_EDGE(e->v1_disk_link.next);
2892       REMAP_EDGE(e->v1_disk_link.prev);
2893       REMAP_EDGE(e->v2_disk_link.next);
2894       REMAP_EDGE(e->v2_disk_link.prev);
2895       if (e->l) {
2896         REMAP_LOOP(e->l);
2897       }
2898     }
2899   }
2900 
2901   /* faces */
2902   {
2903     for (int i = 0; i < bm->totface; i++) {
2904       BMFace *f = ftable_dst[i];
2905       REMAP_LOOP(f->l_first);
2906 
2907       {
2908         BMLoop *l_iter, *l_first;
2909         l_iter = l_first = BM_FACE_FIRST_LOOP((BMFace *)f);
2910         do {
2911           REMAP_VERT(l_iter->v);
2912           REMAP_EDGE(l_iter->e);
2913           REMAP_FACE(l_iter->f);
2914 
2915           REMAP_LOOP(l_iter->radial_next);
2916           REMAP_LOOP(l_iter->radial_prev);
2917           REMAP_LOOP(l_iter->next);
2918           REMAP_LOOP(l_iter->prev);
2919         } while ((l_iter = l_iter->next) != l_first);
2920       }
2921     }
2922   }
2923 
2924   LISTBASE_FOREACH (BMEditSelection *, ese, &bm->selected) {
2925     switch (ese->htype) {
2926       case BM_VERT:
2927         if (remap & BM_VERT) {
2928           ese->ele = (BMElem *)MAP_VERT(ese->ele);
2929         }
2930         break;
2931       case BM_EDGE:
2932         if (remap & BM_EDGE) {
2933           ese->ele = (BMElem *)MAP_EDGE(ese->ele);
2934         }
2935         break;
2936       case BM_FACE:
2937         if (remap & BM_FACE) {
2938           ese->ele = (BMElem *)MAP_FACE(ese->ele);
2939         }
2940         break;
2941     }
2942   }
2943 
2944   if (bm->act_face) {
2945     REMAP_FACE(bm->act_face);
2946   }
2947 
2948 #undef MAP_VERT
2949 #undef MAP_EDGE
2950 #undef MAP_LOOP
2951 #undef MAP_EDGE
2952 
2953 #undef REMAP_VERT
2954 #undef REMAP_EDGE
2955 #undef REMAP_LOOP
2956 #undef REMAP_EDGE
2957 
2958   /* Cleanup, re-use local tables if the current mesh had tables allocated.
2959    * could use irrespective but it may use more memory than the caller wants
2960    * (and not be needed). */
2961   if (remap & BM_VERT) {
2962     if (bm->vtable) {
2963       SWAP(BMVert **, vtable_dst, bm->vtable);
2964       bm->vtable_tot = bm->totvert;
2965       bm->elem_table_dirty &= ~BM_VERT;
2966     }
2967     MEM_freeN(vtable_dst);
2968     BLI_mempool_destroy(bm->vpool);
2969     bm->vpool = vpool_dst;
2970   }
2971 
2972   if (remap & BM_EDGE) {
2973     if (bm->etable) {
2974       SWAP(BMEdge **, etable_dst, bm->etable);
2975       bm->etable_tot = bm->totedge;
2976       bm->elem_table_dirty &= ~BM_EDGE;
2977     }
2978     MEM_freeN(etable_dst);
2979     BLI_mempool_destroy(bm->epool);
2980     bm->epool = epool_dst;
2981   }
2982 
2983   if (remap & BM_LOOP) {
2984     /* no loop table */
2985     MEM_freeN(ltable_dst);
2986     BLI_mempool_destroy(bm->lpool);
2987     bm->lpool = lpool_dst;
2988   }
2989 
2990   if (remap & BM_FACE) {
2991     if (bm->ftable) {
2992       SWAP(BMFace **, ftable_dst, bm->ftable);
2993       bm->ftable_tot = bm->totface;
2994       bm->elem_table_dirty &= ~BM_FACE;
2995     }
2996     MEM_freeN(ftable_dst);
2997     BLI_mempool_destroy(bm->fpool);
2998     bm->fpool = fpool_dst;
2999   }
3000 }
3001 
3002 /**
3003  * Re-allocates mesh data with/without toolflags.
3004  */
BM_mesh_toolflags_set(BMesh * bm,bool use_toolflags)3005 void BM_mesh_toolflags_set(BMesh *bm, bool use_toolflags)
3006 {
3007   if (bm->use_toolflags == use_toolflags) {
3008     return;
3009   }
3010 
3011   const BMAllocTemplate allocsize = BMALLOC_TEMPLATE_FROM_BM(bm);
3012 
3013   BLI_mempool *vpool_dst = NULL;
3014   BLI_mempool *epool_dst = NULL;
3015   BLI_mempool *fpool_dst = NULL;
3016 
3017   bm_mempool_init_ex(&allocsize, use_toolflags, &vpool_dst, &epool_dst, NULL, &fpool_dst);
3018 
3019   if (use_toolflags == false) {
3020     BLI_mempool_destroy(bm->vtoolflagpool);
3021     BLI_mempool_destroy(bm->etoolflagpool);
3022     BLI_mempool_destroy(bm->ftoolflagpool);
3023 
3024     bm->vtoolflagpool = NULL;
3025     bm->etoolflagpool = NULL;
3026     bm->ftoolflagpool = NULL;
3027   }
3028 
3029   BM_mesh_rebuild(bm,
3030                   &((struct BMeshCreateParams){
3031                       .use_toolflags = use_toolflags,
3032                   }),
3033                   vpool_dst,
3034                   epool_dst,
3035                   NULL,
3036                   fpool_dst);
3037 
3038   bm->use_toolflags = use_toolflags;
3039 }
3040 
3041 /* -------------------------------------------------------------------- */
3042 /** \name BMesh Coordinate Access
3043  * \{ */
3044 
BM_mesh_vert_coords_get(BMesh * bm,float (* vert_coords)[3])3045 void BM_mesh_vert_coords_get(BMesh *bm, float (*vert_coords)[3])
3046 {
3047   BMIter iter;
3048   BMVert *v;
3049   int i;
3050   BM_ITER_MESH_INDEX (v, &iter, bm, BM_VERTS_OF_MESH, i) {
3051     copy_v3_v3(vert_coords[i], v->co);
3052   }
3053 }
3054 
BM_mesh_vert_coords_alloc(BMesh * bm,int * r_vert_len)3055 float (*BM_mesh_vert_coords_alloc(BMesh *bm, int *r_vert_len))[3]
3056 {
3057   float(*vert_coords)[3] = MEM_mallocN(bm->totvert * sizeof(*vert_coords), __func__);
3058   BM_mesh_vert_coords_get(bm, vert_coords);
3059   *r_vert_len = bm->totvert;
3060   return vert_coords;
3061 }
3062 
BM_mesh_vert_coords_apply(BMesh * bm,const float (* vert_coords)[3])3063 void BM_mesh_vert_coords_apply(BMesh *bm, const float (*vert_coords)[3])
3064 {
3065   BMIter iter;
3066   BMVert *v;
3067   int i;
3068   BM_ITER_MESH_INDEX (v, &iter, bm, BM_VERTS_OF_MESH, i) {
3069     copy_v3_v3(v->co, vert_coords[i]);
3070   }
3071 }
3072 
BM_mesh_vert_coords_apply_with_mat4(BMesh * bm,const float (* vert_coords)[3],const float mat[4][4])3073 void BM_mesh_vert_coords_apply_with_mat4(BMesh *bm,
3074                                          const float (*vert_coords)[3],
3075                                          const float mat[4][4])
3076 {
3077   BMIter iter;
3078   BMVert *v;
3079   int i;
3080   BM_ITER_MESH_INDEX (v, &iter, bm, BM_VERTS_OF_MESH, i) {
3081     mul_v3_m4v3(v->co, mat, vert_coords[i]);
3082   }
3083 }
3084 
3085 /** \} */
3086