1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software Foundation,
14 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * The Original Code is Copyright (C) 2001-2002 by NaN Holding BV.
17 * All rights reserved.
18 */
19
20 /** \file
21 * \ingroup bke
22 *
23 * Functions to evaluate mesh data.
24 */
25
26 #include <limits.h>
27
28 #include "CLG_log.h"
29
30 #include "MEM_guardedalloc.h"
31
32 #include "DNA_mesh_types.h"
33 #include "DNA_meshdata_types.h"
34 #include "DNA_object_types.h"
35
36 #include "BLI_alloca.h"
37 #include "BLI_bitmap.h"
38 #include "BLI_edgehash.h"
39 #include "BLI_linklist.h"
40 #include "BLI_linklist_stack.h"
41 #include "BLI_math.h"
42 #include "BLI_memarena.h"
43 #include "BLI_polyfill_2d.h"
44 #include "BLI_stack.h"
45 #include "BLI_task.h"
46 #include "BLI_utildefines.h"
47
48 #include "BKE_customdata.h"
49 #include "BKE_editmesh_cache.h"
50 #include "BKE_global.h"
51 #include "BKE_mesh.h"
52 #include "BKE_multires.h"
53 #include "BKE_report.h"
54
55 #include "BLI_strict_flags.h"
56
57 #include "atomic_ops.h"
58 #include "mikktspace.h"
59
60 // #define DEBUG_TIME
61
62 #include "PIL_time.h"
63 #ifdef DEBUG_TIME
64 # include "PIL_time_utildefines.h"
65 #endif
66
67 static CLG_LogRef LOG = {"bke.mesh_evaluate"};
68
69 /* -------------------------------------------------------------------- */
70 /** \name Mesh Normal Calculation
71 * \{ */
72
73 /**
74 * Call when there are no polygons.
75 */
mesh_calc_normals_vert_fallback(MVert * mverts,int numVerts)76 static void mesh_calc_normals_vert_fallback(MVert *mverts, int numVerts)
77 {
78 for (int i = 0; i < numVerts; i++) {
79 MVert *mv = &mverts[i];
80 float no[3];
81
82 normalize_v3_v3(no, mv->co);
83 normal_float_to_short_v3(mv->no, no);
84 }
85 }
86
87 /* TODO(Sybren): we can probably rename this to BKE_mesh_calc_normals_mapping(),
88 * and remove the function of the same name below, as that one doesn't seem to be
89 * called anywhere. */
BKE_mesh_calc_normals_mapping_simple(struct Mesh * mesh)90 void BKE_mesh_calc_normals_mapping_simple(struct Mesh *mesh)
91 {
92 const bool only_face_normals = CustomData_is_referenced_layer(&mesh->vdata, CD_MVERT);
93
94 BKE_mesh_calc_normals_mapping_ex(mesh->mvert,
95 mesh->totvert,
96 mesh->mloop,
97 mesh->mpoly,
98 mesh->totloop,
99 mesh->totpoly,
100 NULL,
101 mesh->mface,
102 mesh->totface,
103 NULL,
104 NULL,
105 only_face_normals);
106 }
107
108 /* Calculate vertex and face normals, face normals are returned in *r_faceNors if non-NULL
109 * and vertex normals are stored in actual mverts.
110 */
BKE_mesh_calc_normals_mapping(MVert * mverts,int numVerts,const MLoop * mloop,const MPoly * mpolys,int numLoops,int numPolys,float (* r_polyNors)[3],const MFace * mfaces,int numFaces,const int * origIndexFace,float (* r_faceNors)[3])111 void BKE_mesh_calc_normals_mapping(MVert *mverts,
112 int numVerts,
113 const MLoop *mloop,
114 const MPoly *mpolys,
115 int numLoops,
116 int numPolys,
117 float (*r_polyNors)[3],
118 const MFace *mfaces,
119 int numFaces,
120 const int *origIndexFace,
121 float (*r_faceNors)[3])
122 {
123 BKE_mesh_calc_normals_mapping_ex(mverts,
124 numVerts,
125 mloop,
126 mpolys,
127 numLoops,
128 numPolys,
129 r_polyNors,
130 mfaces,
131 numFaces,
132 origIndexFace,
133 r_faceNors,
134 false);
135 }
136 /* extended version of 'BKE_mesh_calc_normals_poly' with option not to calc vertex normals */
BKE_mesh_calc_normals_mapping_ex(MVert * mverts,int numVerts,const MLoop * mloop,const MPoly * mpolys,int numLoops,int numPolys,float (* r_polyNors)[3],const MFace * mfaces,int numFaces,const int * origIndexFace,float (* r_faceNors)[3],const bool only_face_normals)137 void BKE_mesh_calc_normals_mapping_ex(MVert *mverts,
138 int numVerts,
139 const MLoop *mloop,
140 const MPoly *mpolys,
141 int numLoops,
142 int numPolys,
143 float (*r_polyNors)[3],
144 const MFace *mfaces,
145 int numFaces,
146 const int *origIndexFace,
147 float (*r_faceNors)[3],
148 const bool only_face_normals)
149 {
150 float(*pnors)[3] = r_polyNors, (*fnors)[3] = r_faceNors;
151
152 if (numPolys == 0) {
153 if (only_face_normals == false) {
154 mesh_calc_normals_vert_fallback(mverts, numVerts);
155 }
156 return;
157 }
158
159 /* if we are not calculating verts and no verts were passes then we have nothing to do */
160 if ((only_face_normals == true) && (r_polyNors == NULL) && (r_faceNors == NULL)) {
161 CLOG_WARN(&LOG, "called with nothing to do");
162 return;
163 }
164
165 if (!pnors) {
166 pnors = MEM_calloc_arrayN((size_t)numPolys, sizeof(float[3]), __func__);
167 }
168 /* NO NEED TO ALLOC YET */
169 /* if (!fnors) fnors = MEM_calloc_arrayN(numFaces, sizeof(float[3]), "face nors mesh.c"); */
170
171 if (only_face_normals == false) {
172 /* vertex normals are optional, they require some extra calculations,
173 * so make them optional */
174 BKE_mesh_calc_normals_poly(
175 mverts, NULL, numVerts, mloop, mpolys, numLoops, numPolys, pnors, false);
176 }
177 else {
178 /* only calc poly normals */
179 const MPoly *mp = mpolys;
180 for (int i = 0; i < numPolys; i++, mp++) {
181 BKE_mesh_calc_poly_normal(mp, mloop + mp->loopstart, mverts, pnors[i]);
182 }
183 }
184
185 if (origIndexFace &&
186 /* fnors == r_faceNors */ /* NO NEED TO ALLOC YET */
187 fnors != NULL &&
188 numFaces) {
189 const MFace *mf = mfaces;
190 for (int i = 0; i < numFaces; i++, mf++, origIndexFace++) {
191 if (*origIndexFace < numPolys) {
192 copy_v3_v3(fnors[i], pnors[*origIndexFace]);
193 }
194 else {
195 /* eek, we're not corresponding to polys */
196 CLOG_ERROR(&LOG, "tessellation face indices are incorrect. normals may look bad.");
197 }
198 }
199 }
200
201 if (pnors != r_polyNors) {
202 MEM_freeN(pnors);
203 }
204 /* if (fnors != r_faceNors) MEM_freeN(fnors); */ /* NO NEED TO ALLOC YET */
205
206 fnors = pnors = NULL;
207 }
208
209 typedef struct MeshCalcNormalsData {
210 const MPoly *mpolys;
211 const MLoop *mloop;
212 MVert *mverts;
213 float (*pnors)[3];
214 float (*lnors_weighted)[3];
215 float (*vnors)[3];
216 } MeshCalcNormalsData;
217
mesh_calc_normals_poly_cb(void * __restrict userdata,const int pidx,const TaskParallelTLS * __restrict UNUSED (tls))218 static void mesh_calc_normals_poly_cb(void *__restrict userdata,
219 const int pidx,
220 const TaskParallelTLS *__restrict UNUSED(tls))
221 {
222 MeshCalcNormalsData *data = userdata;
223 const MPoly *mp = &data->mpolys[pidx];
224
225 BKE_mesh_calc_poly_normal(mp, data->mloop + mp->loopstart, data->mverts, data->pnors[pidx]);
226 }
227
mesh_calc_normals_poly_prepare_cb(void * __restrict userdata,const int pidx,const TaskParallelTLS * __restrict UNUSED (tls))228 static void mesh_calc_normals_poly_prepare_cb(void *__restrict userdata,
229 const int pidx,
230 const TaskParallelTLS *__restrict UNUSED(tls))
231 {
232 MeshCalcNormalsData *data = userdata;
233 const MPoly *mp = &data->mpolys[pidx];
234 const MLoop *ml = &data->mloop[mp->loopstart];
235 const MVert *mverts = data->mverts;
236
237 float pnor_temp[3];
238 float *pnor = data->pnors ? data->pnors[pidx] : pnor_temp;
239 float(*lnors_weighted)[3] = data->lnors_weighted;
240
241 const int nverts = mp->totloop;
242 float(*edgevecbuf)[3] = BLI_array_alloca(edgevecbuf, (size_t)nverts);
243
244 /* Polygon Normal and edge-vector */
245 /* inline version of #BKE_mesh_calc_poly_normal, also does edge-vectors */
246 {
247 int i_prev = nverts - 1;
248 const float *v_prev = mverts[ml[i_prev].v].co;
249 const float *v_curr;
250
251 zero_v3(pnor);
252 /* Newell's Method */
253 for (int i = 0; i < nverts; i++) {
254 v_curr = mverts[ml[i].v].co;
255 add_newell_cross_v3_v3v3(pnor, v_prev, v_curr);
256
257 /* Unrelated to normalize, calculate edge-vector */
258 sub_v3_v3v3(edgevecbuf[i_prev], v_prev, v_curr);
259 normalize_v3(edgevecbuf[i_prev]);
260 i_prev = i;
261
262 v_prev = v_curr;
263 }
264 if (UNLIKELY(normalize_v3(pnor) == 0.0f)) {
265 pnor[2] = 1.0f; /* other axes set to 0.0 */
266 }
267 }
268
269 /* accumulate angle weighted face normal */
270 /* inline version of #accumulate_vertex_normals_poly_v3,
271 * split between this threaded callback and #mesh_calc_normals_poly_accum_cb. */
272 {
273 const float *prev_edge = edgevecbuf[nverts - 1];
274
275 for (int i = 0; i < nverts; i++) {
276 const int lidx = mp->loopstart + i;
277 const float *cur_edge = edgevecbuf[i];
278
279 /* calculate angle between the two poly edges incident on
280 * this vertex */
281 const float fac = saacos(-dot_v3v3(cur_edge, prev_edge));
282
283 /* Store for later accumulation */
284 mul_v3_v3fl(lnors_weighted[lidx], pnor, fac);
285
286 prev_edge = cur_edge;
287 }
288 }
289 }
290
mesh_calc_normals_poly_finalize_cb(void * __restrict userdata,const int vidx,const TaskParallelTLS * __restrict UNUSED (tls))291 static void mesh_calc_normals_poly_finalize_cb(void *__restrict userdata,
292 const int vidx,
293 const TaskParallelTLS *__restrict UNUSED(tls))
294 {
295 MeshCalcNormalsData *data = userdata;
296
297 MVert *mv = &data->mverts[vidx];
298 float *no = data->vnors[vidx];
299
300 if (UNLIKELY(normalize_v3(no) == 0.0f)) {
301 /* following Mesh convention; we use vertex coordinate itself for normal in this case */
302 normalize_v3_v3(no, mv->co);
303 }
304
305 normal_float_to_short_v3(mv->no, no);
306 }
307
BKE_mesh_calc_normals_poly(MVert * mverts,float (* r_vertnors)[3],int numVerts,const MLoop * mloop,const MPoly * mpolys,int numLoops,int numPolys,float (* r_polynors)[3],const bool only_face_normals)308 void BKE_mesh_calc_normals_poly(MVert *mverts,
309 float (*r_vertnors)[3],
310 int numVerts,
311 const MLoop *mloop,
312 const MPoly *mpolys,
313 int numLoops,
314 int numPolys,
315 float (*r_polynors)[3],
316 const bool only_face_normals)
317 {
318 float(*pnors)[3] = r_polynors;
319
320 TaskParallelSettings settings;
321 BLI_parallel_range_settings_defaults(&settings);
322 settings.min_iter_per_thread = 1024;
323
324 if (only_face_normals) {
325 BLI_assert((pnors != NULL) || (numPolys == 0));
326 BLI_assert(r_vertnors == NULL);
327
328 MeshCalcNormalsData data = {
329 .mpolys = mpolys,
330 .mloop = mloop,
331 .mverts = mverts,
332 .pnors = pnors,
333 };
334
335 BLI_task_parallel_range(0, numPolys, &data, mesh_calc_normals_poly_cb, &settings);
336 return;
337 }
338
339 float(*vnors)[3] = r_vertnors;
340 float(*lnors_weighted)[3] = MEM_malloc_arrayN(
341 (size_t)numLoops, sizeof(*lnors_weighted), __func__);
342 bool free_vnors = false;
343
344 /* first go through and calculate normals for all the polys */
345 if (vnors == NULL) {
346 vnors = MEM_calloc_arrayN((size_t)numVerts, sizeof(*vnors), __func__);
347 free_vnors = true;
348 }
349 else {
350 memset(vnors, 0, sizeof(*vnors) * (size_t)numVerts);
351 }
352
353 MeshCalcNormalsData data = {
354 .mpolys = mpolys,
355 .mloop = mloop,
356 .mverts = mverts,
357 .pnors = pnors,
358 .lnors_weighted = lnors_weighted,
359 .vnors = vnors,
360 };
361
362 /* Compute poly normals, and prepare weighted loop normals. */
363 BLI_task_parallel_range(0, numPolys, &data, mesh_calc_normals_poly_prepare_cb, &settings);
364
365 /* Actually accumulate weighted loop normals into vertex ones. */
366 /* Unfortunately, not possible to thread that
367 * (not in a reasonable, totally lock- and barrier-free fashion),
368 * since several loops will point to the same vertex... */
369 for (int lidx = 0; lidx < numLoops; lidx++) {
370 add_v3_v3(vnors[mloop[lidx].v], data.lnors_weighted[lidx]);
371 }
372
373 /* Normalize and validate computed vertex normals. */
374 BLI_task_parallel_range(0, numVerts, &data, mesh_calc_normals_poly_finalize_cb, &settings);
375
376 if (free_vnors) {
377 MEM_freeN(vnors);
378 }
379 MEM_freeN(lnors_weighted);
380 }
381
BKE_mesh_ensure_normals(Mesh * mesh)382 void BKE_mesh_ensure_normals(Mesh *mesh)
383 {
384 if (mesh->runtime.cd_dirty_vert & CD_MASK_NORMAL) {
385 BKE_mesh_calc_normals(mesh);
386 }
387 BLI_assert((mesh->runtime.cd_dirty_vert & CD_MASK_NORMAL) == 0);
388 }
389
390 /**
391 * Called after calculating all modifiers.
392 */
BKE_mesh_ensure_normals_for_display(Mesh * mesh)393 void BKE_mesh_ensure_normals_for_display(Mesh *mesh)
394 {
395 switch ((eMeshWrapperType)mesh->runtime.wrapper_type) {
396 case ME_WRAPPER_TYPE_MDATA:
397 /* Run code below. */
398 break;
399 case ME_WRAPPER_TYPE_BMESH: {
400 struct BMEditMesh *em = mesh->edit_mesh;
401 EditMeshData *emd = mesh->runtime.edit_data;
402 if (emd->vertexCos) {
403 BKE_editmesh_cache_ensure_vert_normals(em, emd);
404 BKE_editmesh_cache_ensure_poly_normals(em, emd);
405 }
406 return;
407 }
408 }
409
410 float(*poly_nors)[3] = CustomData_get_layer(&mesh->pdata, CD_NORMAL);
411 const bool do_vert_normals = (mesh->runtime.cd_dirty_vert & CD_MASK_NORMAL) != 0;
412 const bool do_poly_normals = (mesh->runtime.cd_dirty_poly & CD_MASK_NORMAL || poly_nors == NULL);
413
414 if (do_vert_normals || do_poly_normals) {
415 const bool do_add_poly_nors_cddata = (poly_nors == NULL);
416 if (do_add_poly_nors_cddata) {
417 poly_nors = MEM_malloc_arrayN((size_t)mesh->totpoly, sizeof(*poly_nors), __func__);
418 }
419
420 /* calculate poly/vert normals */
421 BKE_mesh_calc_normals_poly(mesh->mvert,
422 NULL,
423 mesh->totvert,
424 mesh->mloop,
425 mesh->mpoly,
426 mesh->totloop,
427 mesh->totpoly,
428 poly_nors,
429 !do_vert_normals);
430
431 if (do_add_poly_nors_cddata) {
432 CustomData_add_layer(&mesh->pdata, CD_NORMAL, CD_ASSIGN, poly_nors, mesh->totpoly);
433 }
434
435 mesh->runtime.cd_dirty_vert &= ~CD_MASK_NORMAL;
436 mesh->runtime.cd_dirty_poly &= ~CD_MASK_NORMAL;
437 }
438 }
439
440 /* Note that this does not update the CD_NORMAL layer,
441 * but does update the normals in the CD_MVERT layer. */
BKE_mesh_calc_normals(Mesh * mesh)442 void BKE_mesh_calc_normals(Mesh *mesh)
443 {
444 #ifdef DEBUG_TIME
445 TIMEIT_START_AVERAGED(BKE_mesh_calc_normals);
446 #endif
447 BKE_mesh_calc_normals_poly(mesh->mvert,
448 NULL,
449 mesh->totvert,
450 mesh->mloop,
451 mesh->mpoly,
452 mesh->totloop,
453 mesh->totpoly,
454 NULL,
455 false);
456 #ifdef DEBUG_TIME
457 TIMEIT_END_AVERAGED(BKE_mesh_calc_normals);
458 #endif
459 mesh->runtime.cd_dirty_vert &= ~CD_MASK_NORMAL;
460 }
461
BKE_mesh_calc_normals_looptri(MVert * mverts,int numVerts,const MLoop * mloop,const MLoopTri * looptri,int looptri_num,float (* r_tri_nors)[3])462 void BKE_mesh_calc_normals_looptri(MVert *mverts,
463 int numVerts,
464 const MLoop *mloop,
465 const MLoopTri *looptri,
466 int looptri_num,
467 float (*r_tri_nors)[3])
468 {
469 float(*tnorms)[3] = MEM_calloc_arrayN((size_t)numVerts, sizeof(*tnorms), "tnorms");
470 float(*fnors)[3] = (r_tri_nors) ?
471 r_tri_nors :
472 MEM_calloc_arrayN((size_t)looptri_num, sizeof(*fnors), "meshnormals");
473
474 if (!tnorms || !fnors) {
475 goto cleanup;
476 }
477
478 for (int i = 0; i < looptri_num; i++) {
479 const MLoopTri *lt = &looptri[i];
480 float *f_no = fnors[i];
481 const unsigned int vtri[3] = {
482 mloop[lt->tri[0]].v,
483 mloop[lt->tri[1]].v,
484 mloop[lt->tri[2]].v,
485 };
486
487 normal_tri_v3(f_no, mverts[vtri[0]].co, mverts[vtri[1]].co, mverts[vtri[2]].co);
488
489 accumulate_vertex_normals_tri_v3(tnorms[vtri[0]],
490 tnorms[vtri[1]],
491 tnorms[vtri[2]],
492 f_no,
493 mverts[vtri[0]].co,
494 mverts[vtri[1]].co,
495 mverts[vtri[2]].co);
496 }
497
498 /* following Mesh convention; we use vertex coordinate itself for normal in this case */
499 for (int i = 0; i < numVerts; i++) {
500 MVert *mv = &mverts[i];
501 float *no = tnorms[i];
502
503 if (UNLIKELY(normalize_v3(no) == 0.0f)) {
504 normalize_v3_v3(no, mv->co);
505 }
506
507 normal_float_to_short_v3(mv->no, no);
508 }
509
510 cleanup:
511 MEM_freeN(tnorms);
512
513 if (fnors != r_tri_nors) {
514 MEM_freeN(fnors);
515 }
516 }
517
BKE_lnor_spacearr_init(MLoopNorSpaceArray * lnors_spacearr,const int numLoops,const char data_type)518 void BKE_lnor_spacearr_init(MLoopNorSpaceArray *lnors_spacearr,
519 const int numLoops,
520 const char data_type)
521 {
522 if (!(lnors_spacearr->lspacearr && lnors_spacearr->loops_pool)) {
523 MemArena *mem;
524
525 if (!lnors_spacearr->mem) {
526 lnors_spacearr->mem = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, __func__);
527 }
528 mem = lnors_spacearr->mem;
529 lnors_spacearr->lspacearr = BLI_memarena_calloc(mem,
530 sizeof(MLoopNorSpace *) * (size_t)numLoops);
531 lnors_spacearr->loops_pool = BLI_memarena_alloc(mem, sizeof(LinkNode) * (size_t)numLoops);
532
533 lnors_spacearr->num_spaces = 0;
534 }
535 BLI_assert(ELEM(data_type, MLNOR_SPACEARR_BMLOOP_PTR, MLNOR_SPACEARR_LOOP_INDEX));
536 lnors_spacearr->data_type = data_type;
537 }
538
BKE_lnor_spacearr_clear(MLoopNorSpaceArray * lnors_spacearr)539 void BKE_lnor_spacearr_clear(MLoopNorSpaceArray *lnors_spacearr)
540 {
541 lnors_spacearr->num_spaces = 0;
542 lnors_spacearr->lspacearr = NULL;
543 lnors_spacearr->loops_pool = NULL;
544 if (lnors_spacearr->mem != NULL) {
545 BLI_memarena_clear(lnors_spacearr->mem);
546 }
547 }
548
BKE_lnor_spacearr_free(MLoopNorSpaceArray * lnors_spacearr)549 void BKE_lnor_spacearr_free(MLoopNorSpaceArray *lnors_spacearr)
550 {
551 lnors_spacearr->num_spaces = 0;
552 lnors_spacearr->lspacearr = NULL;
553 lnors_spacearr->loops_pool = NULL;
554 BLI_memarena_free(lnors_spacearr->mem);
555 lnors_spacearr->mem = NULL;
556 }
557
BKE_lnor_space_create(MLoopNorSpaceArray * lnors_spacearr)558 MLoopNorSpace *BKE_lnor_space_create(MLoopNorSpaceArray *lnors_spacearr)
559 {
560 lnors_spacearr->num_spaces++;
561 return BLI_memarena_calloc(lnors_spacearr->mem, sizeof(MLoopNorSpace));
562 }
563
564 /* This threshold is a bit touchy (usual float precision issue), this value seems OK. */
565 #define LNOR_SPACE_TRIGO_THRESHOLD (1.0f - 1e-4f)
566
567 /* Should only be called once.
568 * Beware, this modifies ref_vec and other_vec in place!
569 * In case no valid space can be generated, ref_alpha and ref_beta are set to zero
570 * (which means 'use auto lnors').
571 */
BKE_lnor_space_define(MLoopNorSpace * lnor_space,const float lnor[3],float vec_ref[3],float vec_other[3],BLI_Stack * edge_vectors)572 void BKE_lnor_space_define(MLoopNorSpace *lnor_space,
573 const float lnor[3],
574 float vec_ref[3],
575 float vec_other[3],
576 BLI_Stack *edge_vectors)
577 {
578 const float pi2 = (float)M_PI * 2.0f;
579 float tvec[3], dtp;
580 const float dtp_ref = dot_v3v3(vec_ref, lnor);
581 const float dtp_other = dot_v3v3(vec_other, lnor);
582
583 if (UNLIKELY(fabsf(dtp_ref) >= LNOR_SPACE_TRIGO_THRESHOLD ||
584 fabsf(dtp_other) >= LNOR_SPACE_TRIGO_THRESHOLD)) {
585 /* If vec_ref or vec_other are too much aligned with lnor, we can't build lnor space,
586 * tag it as invalid and abort. */
587 lnor_space->ref_alpha = lnor_space->ref_beta = 0.0f;
588
589 if (edge_vectors) {
590 BLI_stack_clear(edge_vectors);
591 }
592 return;
593 }
594
595 copy_v3_v3(lnor_space->vec_lnor, lnor);
596
597 /* Compute ref alpha, average angle of all available edge vectors to lnor. */
598 if (edge_vectors) {
599 float alpha = 0.0f;
600 int nbr = 0;
601 while (!BLI_stack_is_empty(edge_vectors)) {
602 const float *vec = BLI_stack_peek(edge_vectors);
603 alpha += saacosf(dot_v3v3(vec, lnor));
604 BLI_stack_discard(edge_vectors);
605 nbr++;
606 }
607 /* Note: In theory, this could be 'nbr > 2',
608 * but there is one case where we only have two edges for two loops:
609 * a smooth vertex with only two edges and two faces (our Monkey's nose has that, e.g.).
610 */
611 BLI_assert(nbr >= 2); /* This piece of code shall only be called for more than one loop... */
612 lnor_space->ref_alpha = alpha / (float)nbr;
613 }
614 else {
615 lnor_space->ref_alpha = (saacosf(dot_v3v3(vec_ref, lnor)) +
616 saacosf(dot_v3v3(vec_other, lnor))) /
617 2.0f;
618 }
619
620 /* Project vec_ref on lnor's ortho plane. */
621 mul_v3_v3fl(tvec, lnor, dtp_ref);
622 sub_v3_v3(vec_ref, tvec);
623 normalize_v3_v3(lnor_space->vec_ref, vec_ref);
624
625 cross_v3_v3v3(tvec, lnor, lnor_space->vec_ref);
626 normalize_v3_v3(lnor_space->vec_ortho, tvec);
627
628 /* Project vec_other on lnor's ortho plane. */
629 mul_v3_v3fl(tvec, lnor, dtp_other);
630 sub_v3_v3(vec_other, tvec);
631 normalize_v3(vec_other);
632
633 /* Beta is angle between ref_vec and other_vec, around lnor. */
634 dtp = dot_v3v3(lnor_space->vec_ref, vec_other);
635 if (LIKELY(dtp < LNOR_SPACE_TRIGO_THRESHOLD)) {
636 const float beta = saacos(dtp);
637 lnor_space->ref_beta = (dot_v3v3(lnor_space->vec_ortho, vec_other) < 0.0f) ? pi2 - beta : beta;
638 }
639 else {
640 lnor_space->ref_beta = pi2;
641 }
642 }
643
644 /**
645 * Add a new given loop to given lnor_space.
646 * Depending on \a lnor_space->data_type, we expect \a bm_loop to be a pointer to BMLoop struct
647 * (in case of BMLOOP_PTR), or NULL (in case of LOOP_INDEX), loop index is then stored in pointer.
648 * If \a is_single is set, the BMLoop or loop index is directly stored in \a lnor_space->loops
649 * pointer (since there is only one loop in this fan),
650 * else it is added to the linked list of loops in the fan.
651 */
BKE_lnor_space_add_loop(MLoopNorSpaceArray * lnors_spacearr,MLoopNorSpace * lnor_space,const int ml_index,void * bm_loop,const bool is_single)652 void BKE_lnor_space_add_loop(MLoopNorSpaceArray *lnors_spacearr,
653 MLoopNorSpace *lnor_space,
654 const int ml_index,
655 void *bm_loop,
656 const bool is_single)
657 {
658 BLI_assert((lnors_spacearr->data_type == MLNOR_SPACEARR_LOOP_INDEX && bm_loop == NULL) ||
659 (lnors_spacearr->data_type == MLNOR_SPACEARR_BMLOOP_PTR && bm_loop != NULL));
660
661 lnors_spacearr->lspacearr[ml_index] = lnor_space;
662 if (bm_loop == NULL) {
663 bm_loop = POINTER_FROM_INT(ml_index);
664 }
665 if (is_single) {
666 BLI_assert(lnor_space->loops == NULL);
667 lnor_space->flags |= MLNOR_SPACE_IS_SINGLE;
668 lnor_space->loops = bm_loop;
669 }
670 else {
671 BLI_assert((lnor_space->flags & MLNOR_SPACE_IS_SINGLE) == 0);
672 BLI_linklist_prepend_nlink(&lnor_space->loops, bm_loop, &lnors_spacearr->loops_pool[ml_index]);
673 }
674 }
675
unit_short_to_float(const short val)676 MINLINE float unit_short_to_float(const short val)
677 {
678 return (float)val / (float)SHRT_MAX;
679 }
680
unit_float_to_short(const float val)681 MINLINE short unit_float_to_short(const float val)
682 {
683 /* Rounding... */
684 return (short)floorf(val * (float)SHRT_MAX + 0.5f);
685 }
686
BKE_lnor_space_custom_data_to_normal(MLoopNorSpace * lnor_space,const short clnor_data[2],float r_custom_lnor[3])687 void BKE_lnor_space_custom_data_to_normal(MLoopNorSpace *lnor_space,
688 const short clnor_data[2],
689 float r_custom_lnor[3])
690 {
691 /* NOP custom normal data or invalid lnor space, return. */
692 if (clnor_data[0] == 0 || lnor_space->ref_alpha == 0.0f || lnor_space->ref_beta == 0.0f) {
693 copy_v3_v3(r_custom_lnor, lnor_space->vec_lnor);
694 return;
695 }
696
697 {
698 /* TODO Check whether using sincosf() gives any noticeable benefit
699 * (could not even get it working under linux though)! */
700 const float pi2 = (float)(M_PI * 2.0);
701 const float alphafac = unit_short_to_float(clnor_data[0]);
702 const float alpha = (alphafac > 0.0f ? lnor_space->ref_alpha : pi2 - lnor_space->ref_alpha) *
703 alphafac;
704 const float betafac = unit_short_to_float(clnor_data[1]);
705
706 mul_v3_v3fl(r_custom_lnor, lnor_space->vec_lnor, cosf(alpha));
707
708 if (betafac == 0.0f) {
709 madd_v3_v3fl(r_custom_lnor, lnor_space->vec_ref, sinf(alpha));
710 }
711 else {
712 const float sinalpha = sinf(alpha);
713 const float beta = (betafac > 0.0f ? lnor_space->ref_beta : pi2 - lnor_space->ref_beta) *
714 betafac;
715 madd_v3_v3fl(r_custom_lnor, lnor_space->vec_ref, sinalpha * cosf(beta));
716 madd_v3_v3fl(r_custom_lnor, lnor_space->vec_ortho, sinalpha * sinf(beta));
717 }
718 }
719 }
720
BKE_lnor_space_custom_normal_to_data(MLoopNorSpace * lnor_space,const float custom_lnor[3],short r_clnor_data[2])721 void BKE_lnor_space_custom_normal_to_data(MLoopNorSpace *lnor_space,
722 const float custom_lnor[3],
723 short r_clnor_data[2])
724 {
725 /* We use null vector as NOP custom normal (can be simpler than giving autocomputed lnor...). */
726 if (is_zero_v3(custom_lnor) || compare_v3v3(lnor_space->vec_lnor, custom_lnor, 1e-4f)) {
727 r_clnor_data[0] = r_clnor_data[1] = 0;
728 return;
729 }
730
731 {
732 const float pi2 = (float)(M_PI * 2.0);
733 const float cos_alpha = dot_v3v3(lnor_space->vec_lnor, custom_lnor);
734 float vec[3], cos_beta;
735 float alpha;
736
737 alpha = saacosf(cos_alpha);
738 if (alpha > lnor_space->ref_alpha) {
739 /* Note we could stick to [0, pi] range here,
740 * but makes decoding more complex, not worth it. */
741 r_clnor_data[0] = unit_float_to_short(-(pi2 - alpha) / (pi2 - lnor_space->ref_alpha));
742 }
743 else {
744 r_clnor_data[0] = unit_float_to_short(alpha / lnor_space->ref_alpha);
745 }
746
747 /* Project custom lnor on (vec_ref, vec_ortho) plane. */
748 mul_v3_v3fl(vec, lnor_space->vec_lnor, -cos_alpha);
749 add_v3_v3(vec, custom_lnor);
750 normalize_v3(vec);
751
752 cos_beta = dot_v3v3(lnor_space->vec_ref, vec);
753
754 if (cos_beta < LNOR_SPACE_TRIGO_THRESHOLD) {
755 float beta = saacosf(cos_beta);
756 if (dot_v3v3(lnor_space->vec_ortho, vec) < 0.0f) {
757 beta = pi2 - beta;
758 }
759
760 if (beta > lnor_space->ref_beta) {
761 r_clnor_data[1] = unit_float_to_short(-(pi2 - beta) / (pi2 - lnor_space->ref_beta));
762 }
763 else {
764 r_clnor_data[1] = unit_float_to_short(beta / lnor_space->ref_beta);
765 }
766 }
767 else {
768 r_clnor_data[1] = 0;
769 }
770 }
771 }
772
773 #define LOOP_SPLIT_TASK_BLOCK_SIZE 1024
774
775 typedef struct LoopSplitTaskData {
776 /* Specific to each instance (each task). */
777
778 /** We have to create those outside of tasks, since afaik memarena is not threadsafe. */
779 MLoopNorSpace *lnor_space;
780 float (*lnor)[3];
781 const MLoop *ml_curr;
782 const MLoop *ml_prev;
783 int ml_curr_index;
784 int ml_prev_index;
785 /** Also used a flag to switch between single or fan process! */
786 const int *e2l_prev;
787 int mp_index;
788
789 /** This one is special, it's owned and managed by worker tasks,
790 * avoid to have to create it for each fan! */
791 BLI_Stack *edge_vectors;
792
793 char pad_c;
794 } LoopSplitTaskData;
795
796 typedef struct LoopSplitTaskDataCommon {
797 /* Read/write.
798 * Note we do not need to protect it, though, since two different tasks will *always* affect
799 * different elements in the arrays. */
800 MLoopNorSpaceArray *lnors_spacearr;
801 float (*loopnors)[3];
802 short (*clnors_data)[2];
803
804 /* Read-only. */
805 const MVert *mverts;
806 const MEdge *medges;
807 const MLoop *mloops;
808 const MPoly *mpolys;
809 int (*edge_to_loops)[2];
810 int *loop_to_poly;
811 const float (*polynors)[3];
812
813 int numEdges;
814 int numLoops;
815 int numPolys;
816 } LoopSplitTaskDataCommon;
817
818 #define INDEX_UNSET INT_MIN
819 #define INDEX_INVALID -1
820 /* See comment about edge_to_loops below. */
821 #define IS_EDGE_SHARP(_e2l) (ELEM((_e2l)[1], INDEX_UNSET, INDEX_INVALID))
822
mesh_edges_sharp_tag(LoopSplitTaskDataCommon * data,const bool check_angle,const float split_angle,const bool do_sharp_edges_tag)823 static void mesh_edges_sharp_tag(LoopSplitTaskDataCommon *data,
824 const bool check_angle,
825 const float split_angle,
826 const bool do_sharp_edges_tag)
827 {
828 const MVert *mverts = data->mverts;
829 const MEdge *medges = data->medges;
830 const MLoop *mloops = data->mloops;
831
832 const MPoly *mpolys = data->mpolys;
833
834 const int numEdges = data->numEdges;
835 const int numPolys = data->numPolys;
836
837 float(*loopnors)[3] = data->loopnors; /* Note: loopnors may be NULL here. */
838 const float(*polynors)[3] = data->polynors;
839
840 int(*edge_to_loops)[2] = data->edge_to_loops;
841 int *loop_to_poly = data->loop_to_poly;
842
843 BLI_bitmap *sharp_edges = do_sharp_edges_tag ? BLI_BITMAP_NEW(numEdges, __func__) : NULL;
844
845 const MPoly *mp;
846 int mp_index;
847
848 const float split_angle_cos = check_angle ? cosf(split_angle) : -1.0f;
849
850 for (mp = mpolys, mp_index = 0; mp_index < numPolys; mp++, mp_index++) {
851 const MLoop *ml_curr;
852 int *e2l;
853 int ml_curr_index = mp->loopstart;
854 const int ml_last_index = (ml_curr_index + mp->totloop) - 1;
855
856 ml_curr = &mloops[ml_curr_index];
857
858 for (; ml_curr_index <= ml_last_index; ml_curr++, ml_curr_index++) {
859 e2l = edge_to_loops[ml_curr->e];
860
861 loop_to_poly[ml_curr_index] = mp_index;
862
863 /* Pre-populate all loop normals as if their verts were all-smooth,
864 * this way we don't have to compute those later!
865 */
866 if (loopnors) {
867 normal_short_to_float_v3(loopnors[ml_curr_index], mverts[ml_curr->v].no);
868 }
869
870 /* Check whether current edge might be smooth or sharp */
871 if ((e2l[0] | e2l[1]) == 0) {
872 /* 'Empty' edge until now, set e2l[0] (and e2l[1] to INDEX_UNSET to tag it as unset). */
873 e2l[0] = ml_curr_index;
874 /* We have to check this here too, else we might miss some flat faces!!! */
875 e2l[1] = (mp->flag & ME_SMOOTH) ? INDEX_UNSET : INDEX_INVALID;
876 }
877 else if (e2l[1] == INDEX_UNSET) {
878 const bool is_angle_sharp = (check_angle &&
879 dot_v3v3(polynors[loop_to_poly[e2l[0]]], polynors[mp_index]) <
880 split_angle_cos);
881
882 /* Second loop using this edge, time to test its sharpness.
883 * An edge is sharp if it is tagged as such, or its face is not smooth,
884 * or both poly have opposed (flipped) normals, i.e. both loops on the same edge share the
885 * same vertex, or angle between both its polys' normals is above split_angle value.
886 */
887 if (!(mp->flag & ME_SMOOTH) || (medges[ml_curr->e].flag & ME_SHARP) ||
888 ml_curr->v == mloops[e2l[0]].v || is_angle_sharp) {
889 /* Note: we are sure that loop != 0 here ;) */
890 e2l[1] = INDEX_INVALID;
891
892 /* We want to avoid tagging edges as sharp when it is already defined as such by
893 * other causes than angle threshold... */
894 if (do_sharp_edges_tag && is_angle_sharp) {
895 BLI_BITMAP_SET(sharp_edges, ml_curr->e, true);
896 }
897 }
898 else {
899 e2l[1] = ml_curr_index;
900 }
901 }
902 else if (!IS_EDGE_SHARP(e2l)) {
903 /* More than two loops using this edge, tag as sharp if not yet done. */
904 e2l[1] = INDEX_INVALID;
905
906 /* We want to avoid tagging edges as sharp when it is already defined as such by
907 * other causes than angle threshold... */
908 if (do_sharp_edges_tag) {
909 BLI_BITMAP_SET(sharp_edges, ml_curr->e, false);
910 }
911 }
912 /* Else, edge is already 'disqualified' (i.e. sharp)! */
913 }
914 }
915
916 /* If requested, do actual tagging of edges as sharp in another loop. */
917 if (do_sharp_edges_tag) {
918 MEdge *me;
919 int me_index;
920 for (me = (MEdge *)medges, me_index = 0; me_index < numEdges; me++, me_index++) {
921 if (BLI_BITMAP_TEST(sharp_edges, me_index)) {
922 me->flag |= ME_SHARP;
923 }
924 }
925
926 MEM_freeN(sharp_edges);
927 }
928 }
929
930 /**
931 * Define sharp edges as needed to mimic 'autosmooth' from angle threshold.
932 *
933 * Used when defining an empty custom loop normals data layer,
934 * to keep same shading as with autosmooth!
935 */
BKE_edges_sharp_from_angle_set(const struct MVert * mverts,const int UNUSED (numVerts),struct MEdge * medges,const int numEdges,struct MLoop * mloops,const int numLoops,struct MPoly * mpolys,const float (* polynors)[3],const int numPolys,const float split_angle)936 void BKE_edges_sharp_from_angle_set(const struct MVert *mverts,
937 const int UNUSED(numVerts),
938 struct MEdge *medges,
939 const int numEdges,
940 struct MLoop *mloops,
941 const int numLoops,
942 struct MPoly *mpolys,
943 const float (*polynors)[3],
944 const int numPolys,
945 const float split_angle)
946 {
947 if (split_angle >= (float)M_PI) {
948 /* Nothing to do! */
949 return;
950 }
951
952 /* Mapping edge -> loops. See BKE_mesh_normals_loop_split() for details. */
953 int(*edge_to_loops)[2] = MEM_calloc_arrayN((size_t)numEdges, sizeof(*edge_to_loops), __func__);
954
955 /* Simple mapping from a loop to its polygon index. */
956 int *loop_to_poly = MEM_malloc_arrayN((size_t)numLoops, sizeof(*loop_to_poly), __func__);
957
958 LoopSplitTaskDataCommon common_data = {
959 .mverts = mverts,
960 .medges = medges,
961 .mloops = mloops,
962 .mpolys = mpolys,
963 .edge_to_loops = edge_to_loops,
964 .loop_to_poly = loop_to_poly,
965 .polynors = polynors,
966 .numEdges = numEdges,
967 .numPolys = numPolys,
968 };
969
970 mesh_edges_sharp_tag(&common_data, true, split_angle, true);
971
972 MEM_freeN(edge_to_loops);
973 MEM_freeN(loop_to_poly);
974 }
975
BKE_mesh_loop_manifold_fan_around_vert_next(const MLoop * mloops,const MPoly * mpolys,const int * loop_to_poly,const int * e2lfan_curr,const uint mv_pivot_index,const MLoop ** r_mlfan_curr,int * r_mlfan_curr_index,int * r_mlfan_vert_index,int * r_mpfan_curr_index)976 void BKE_mesh_loop_manifold_fan_around_vert_next(const MLoop *mloops,
977 const MPoly *mpolys,
978 const int *loop_to_poly,
979 const int *e2lfan_curr,
980 const uint mv_pivot_index,
981 const MLoop **r_mlfan_curr,
982 int *r_mlfan_curr_index,
983 int *r_mlfan_vert_index,
984 int *r_mpfan_curr_index)
985 {
986 const MLoop *mlfan_next;
987 const MPoly *mpfan_next;
988
989 /* Warning! This is rather complex!
990 * We have to find our next edge around the vertex (fan mode).
991 * First we find the next loop, which is either previous or next to mlfan_curr_index, depending
992 * whether both loops using current edge are in the same direction or not, and whether
993 * mlfan_curr_index actually uses the vertex we are fanning around!
994 * mlfan_curr_index is the index of mlfan_next here, and mlfan_next is not the real next one
995 * (i.e. not the future mlfan_curr)...
996 */
997 *r_mlfan_curr_index = (e2lfan_curr[0] == *r_mlfan_curr_index) ? e2lfan_curr[1] : e2lfan_curr[0];
998 *r_mpfan_curr_index = loop_to_poly[*r_mlfan_curr_index];
999
1000 BLI_assert(*r_mlfan_curr_index >= 0);
1001 BLI_assert(*r_mpfan_curr_index >= 0);
1002
1003 mlfan_next = &mloops[*r_mlfan_curr_index];
1004 mpfan_next = &mpolys[*r_mpfan_curr_index];
1005 if (((*r_mlfan_curr)->v == mlfan_next->v && (*r_mlfan_curr)->v == mv_pivot_index) ||
1006 ((*r_mlfan_curr)->v != mlfan_next->v && (*r_mlfan_curr)->v != mv_pivot_index)) {
1007 /* We need the previous loop, but current one is our vertex's loop. */
1008 *r_mlfan_vert_index = *r_mlfan_curr_index;
1009 if (--(*r_mlfan_curr_index) < mpfan_next->loopstart) {
1010 *r_mlfan_curr_index = mpfan_next->loopstart + mpfan_next->totloop - 1;
1011 }
1012 }
1013 else {
1014 /* We need the next loop, which is also our vertex's loop. */
1015 if (++(*r_mlfan_curr_index) >= mpfan_next->loopstart + mpfan_next->totloop) {
1016 *r_mlfan_curr_index = mpfan_next->loopstart;
1017 }
1018 *r_mlfan_vert_index = *r_mlfan_curr_index;
1019 }
1020 *r_mlfan_curr = &mloops[*r_mlfan_curr_index];
1021 /* And now we are back in sync, mlfan_curr_index is the index of mlfan_curr! Pff! */
1022 }
1023
split_loop_nor_single_do(LoopSplitTaskDataCommon * common_data,LoopSplitTaskData * data)1024 static void split_loop_nor_single_do(LoopSplitTaskDataCommon *common_data, LoopSplitTaskData *data)
1025 {
1026 MLoopNorSpaceArray *lnors_spacearr = common_data->lnors_spacearr;
1027 const short(*clnors_data)[2] = common_data->clnors_data;
1028
1029 const MVert *mverts = common_data->mverts;
1030 const MEdge *medges = common_data->medges;
1031 const float(*polynors)[3] = common_data->polynors;
1032
1033 MLoopNorSpace *lnor_space = data->lnor_space;
1034 float(*lnor)[3] = data->lnor;
1035 const MLoop *ml_curr = data->ml_curr;
1036 const MLoop *ml_prev = data->ml_prev;
1037 const int ml_curr_index = data->ml_curr_index;
1038 #if 0 /* Not needed for 'single' loop. */
1039 const int ml_prev_index = data->ml_prev_index;
1040 const int *e2l_prev = data->e2l_prev;
1041 #endif
1042 const int mp_index = data->mp_index;
1043
1044 /* Simple case (both edges around that vertex are sharp in current polygon),
1045 * this loop just takes its poly normal.
1046 */
1047 copy_v3_v3(*lnor, polynors[mp_index]);
1048
1049 #if 0
1050 printf("BASIC: handling loop %d / edge %d / vert %d / poly %d\n",
1051 ml_curr_index,
1052 ml_curr->e,
1053 ml_curr->v,
1054 mp_index);
1055 #endif
1056
1057 /* If needed, generate this (simple!) lnor space. */
1058 if (lnors_spacearr) {
1059 float vec_curr[3], vec_prev[3];
1060
1061 const unsigned int mv_pivot_index = ml_curr->v; /* The vertex we are "fanning" around! */
1062 const MVert *mv_pivot = &mverts[mv_pivot_index];
1063 const MEdge *me_curr = &medges[ml_curr->e];
1064 const MVert *mv_2 = (me_curr->v1 == mv_pivot_index) ? &mverts[me_curr->v2] :
1065 &mverts[me_curr->v1];
1066 const MEdge *me_prev = &medges[ml_prev->e];
1067 const MVert *mv_3 = (me_prev->v1 == mv_pivot_index) ? &mverts[me_prev->v2] :
1068 &mverts[me_prev->v1];
1069
1070 sub_v3_v3v3(vec_curr, mv_2->co, mv_pivot->co);
1071 normalize_v3(vec_curr);
1072 sub_v3_v3v3(vec_prev, mv_3->co, mv_pivot->co);
1073 normalize_v3(vec_prev);
1074
1075 BKE_lnor_space_define(lnor_space, *lnor, vec_curr, vec_prev, NULL);
1076 /* We know there is only one loop in this space,
1077 * no need to create a linklist in this case... */
1078 BKE_lnor_space_add_loop(lnors_spacearr, lnor_space, ml_curr_index, NULL, true);
1079
1080 if (clnors_data) {
1081 BKE_lnor_space_custom_data_to_normal(lnor_space, clnors_data[ml_curr_index], *lnor);
1082 }
1083 }
1084 }
1085
split_loop_nor_fan_do(LoopSplitTaskDataCommon * common_data,LoopSplitTaskData * data)1086 static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSplitTaskData *data)
1087 {
1088 MLoopNorSpaceArray *lnors_spacearr = common_data->lnors_spacearr;
1089 float(*loopnors)[3] = common_data->loopnors;
1090 short(*clnors_data)[2] = common_data->clnors_data;
1091
1092 const MVert *mverts = common_data->mverts;
1093 const MEdge *medges = common_data->medges;
1094 const MLoop *mloops = common_data->mloops;
1095 const MPoly *mpolys = common_data->mpolys;
1096 const int(*edge_to_loops)[2] = common_data->edge_to_loops;
1097 const int *loop_to_poly = common_data->loop_to_poly;
1098 const float(*polynors)[3] = common_data->polynors;
1099
1100 MLoopNorSpace *lnor_space = data->lnor_space;
1101 #if 0 /* Not needed for 'fan' loops. */
1102 float(*lnor)[3] = data->lnor;
1103 #endif
1104 const MLoop *ml_curr = data->ml_curr;
1105 const MLoop *ml_prev = data->ml_prev;
1106 const int ml_curr_index = data->ml_curr_index;
1107 const int ml_prev_index = data->ml_prev_index;
1108 const int mp_index = data->mp_index;
1109 const int *e2l_prev = data->e2l_prev;
1110
1111 BLI_Stack *edge_vectors = data->edge_vectors;
1112
1113 /* Gah... We have to fan around current vertex, until we find the other non-smooth edge,
1114 * and accumulate face normals into the vertex!
1115 * Note in case this vertex has only one sharp edges, this is a waste because the normal is the
1116 * same as the vertex normal, but I do not see any easy way to detect that (would need to count
1117 * number of sharp edges per vertex, I doubt the additional memory usage would be worth it,
1118 * especially as it should not be a common case in real-life meshes anyway).
1119 */
1120 const unsigned int mv_pivot_index = ml_curr->v; /* The vertex we are "fanning" around! */
1121 const MVert *mv_pivot = &mverts[mv_pivot_index];
1122
1123 /* ml_curr would be mlfan_prev if we needed that one. */
1124 const MEdge *me_org = &medges[ml_curr->e];
1125
1126 const int *e2lfan_curr;
1127 float vec_curr[3], vec_prev[3], vec_org[3];
1128 const MLoop *mlfan_curr;
1129 float lnor[3] = {0.0f, 0.0f, 0.0f};
1130 /* mlfan_vert_index: the loop of our current edge might not be the loop of our current vertex! */
1131 int mlfan_curr_index, mlfan_vert_index, mpfan_curr_index;
1132
1133 /* We validate clnors data on the fly - cheapest way to do! */
1134 int clnors_avg[2] = {0, 0};
1135 short(*clnor_ref)[2] = NULL;
1136 int clnors_nbr = 0;
1137 bool clnors_invalid = false;
1138
1139 /* Temp loop normal stack. */
1140 BLI_SMALLSTACK_DECLARE(normal, float *);
1141 /* Temp clnors stack. */
1142 BLI_SMALLSTACK_DECLARE(clnors, short *);
1143
1144 e2lfan_curr = e2l_prev;
1145 mlfan_curr = ml_prev;
1146 mlfan_curr_index = ml_prev_index;
1147 mlfan_vert_index = ml_curr_index;
1148 mpfan_curr_index = mp_index;
1149
1150 BLI_assert(mlfan_curr_index >= 0);
1151 BLI_assert(mlfan_vert_index >= 0);
1152 BLI_assert(mpfan_curr_index >= 0);
1153
1154 /* Only need to compute previous edge's vector once, then we can just reuse old current one! */
1155 {
1156 const MVert *mv_2 = (me_org->v1 == mv_pivot_index) ? &mverts[me_org->v2] : &mverts[me_org->v1];
1157
1158 sub_v3_v3v3(vec_org, mv_2->co, mv_pivot->co);
1159 normalize_v3(vec_org);
1160 copy_v3_v3(vec_prev, vec_org);
1161
1162 if (lnors_spacearr) {
1163 BLI_stack_push(edge_vectors, vec_org);
1164 }
1165 }
1166
1167 // printf("FAN: vert %d, start edge %d\n", mv_pivot_index, ml_curr->e);
1168
1169 while (true) {
1170 const MEdge *me_curr = &medges[mlfan_curr->e];
1171 /* Compute edge vectors.
1172 * NOTE: We could pre-compute those into an array, in the first iteration, instead of computing
1173 * them twice (or more) here. However, time gained is not worth memory and time lost,
1174 * given the fact that this code should not be called that much in real-life meshes...
1175 */
1176 {
1177 const MVert *mv_2 = (me_curr->v1 == mv_pivot_index) ? &mverts[me_curr->v2] :
1178 &mverts[me_curr->v1];
1179
1180 sub_v3_v3v3(vec_curr, mv_2->co, mv_pivot->co);
1181 normalize_v3(vec_curr);
1182 }
1183
1184 // printf("\thandling edge %d / loop %d\n", mlfan_curr->e, mlfan_curr_index);
1185
1186 {
1187 /* Code similar to accumulate_vertex_normals_poly_v3. */
1188 /* Calculate angle between the two poly edges incident on this vertex. */
1189 const float fac = saacos(dot_v3v3(vec_curr, vec_prev));
1190 /* Accumulate */
1191 madd_v3_v3fl(lnor, polynors[mpfan_curr_index], fac);
1192
1193 if (clnors_data) {
1194 /* Accumulate all clnors, if they are not all equal we have to fix that! */
1195 short(*clnor)[2] = &clnors_data[mlfan_vert_index];
1196 if (clnors_nbr) {
1197 clnors_invalid |= ((*clnor_ref)[0] != (*clnor)[0] || (*clnor_ref)[1] != (*clnor)[1]);
1198 }
1199 else {
1200 clnor_ref = clnor;
1201 }
1202 clnors_avg[0] += (*clnor)[0];
1203 clnors_avg[1] += (*clnor)[1];
1204 clnors_nbr++;
1205 /* We store here a pointer to all custom lnors processed. */
1206 BLI_SMALLSTACK_PUSH(clnors, (short *)*clnor);
1207 }
1208 }
1209
1210 /* We store here a pointer to all loop-normals processed. */
1211 BLI_SMALLSTACK_PUSH(normal, (float *)(loopnors[mlfan_vert_index]));
1212
1213 if (lnors_spacearr) {
1214 /* Assign current lnor space to current 'vertex' loop. */
1215 BKE_lnor_space_add_loop(lnors_spacearr, lnor_space, mlfan_vert_index, NULL, false);
1216 if (me_curr != me_org) {
1217 /* We store here all edges-normalized vectors processed. */
1218 BLI_stack_push(edge_vectors, vec_curr);
1219 }
1220 }
1221
1222 if (IS_EDGE_SHARP(e2lfan_curr) || (me_curr == me_org)) {
1223 /* Current edge is sharp and we have finished with this fan of faces around this vert,
1224 * or this vert is smooth, and we have completed a full turn around it.
1225 */
1226 // printf("FAN: Finished!\n");
1227 break;
1228 }
1229
1230 copy_v3_v3(vec_prev, vec_curr);
1231
1232 /* Find next loop of the smooth fan. */
1233 BKE_mesh_loop_manifold_fan_around_vert_next(mloops,
1234 mpolys,
1235 loop_to_poly,
1236 e2lfan_curr,
1237 mv_pivot_index,
1238 &mlfan_curr,
1239 &mlfan_curr_index,
1240 &mlfan_vert_index,
1241 &mpfan_curr_index);
1242
1243 e2lfan_curr = edge_to_loops[mlfan_curr->e];
1244 }
1245
1246 {
1247 float lnor_len = normalize_v3(lnor);
1248
1249 /* If we are generating lnor spacearr, we can now define the one for this fan,
1250 * and optionally compute final lnor from custom data too!
1251 */
1252 if (lnors_spacearr) {
1253 if (UNLIKELY(lnor_len == 0.0f)) {
1254 /* Use vertex normal as fallback! */
1255 copy_v3_v3(lnor, loopnors[mlfan_vert_index]);
1256 lnor_len = 1.0f;
1257 }
1258
1259 BKE_lnor_space_define(lnor_space, lnor, vec_org, vec_curr, edge_vectors);
1260
1261 if (clnors_data) {
1262 if (clnors_invalid) {
1263 short *clnor;
1264
1265 clnors_avg[0] /= clnors_nbr;
1266 clnors_avg[1] /= clnors_nbr;
1267 /* Fix/update all clnors of this fan with computed average value. */
1268 if (G.debug & G_DEBUG) {
1269 printf("Invalid clnors in this fan!\n");
1270 }
1271 while ((clnor = BLI_SMALLSTACK_POP(clnors))) {
1272 // print_v2("org clnor", clnor);
1273 clnor[0] = (short)clnors_avg[0];
1274 clnor[1] = (short)clnors_avg[1];
1275 }
1276 // print_v2("new clnors", clnors_avg);
1277 }
1278 /* Extra bonus: since small-stack is local to this function,
1279 * no more need to empty it at all cost! */
1280
1281 BKE_lnor_space_custom_data_to_normal(lnor_space, *clnor_ref, lnor);
1282 }
1283 }
1284
1285 /* In case we get a zero normal here, just use vertex normal already set! */
1286 if (LIKELY(lnor_len != 0.0f)) {
1287 /* Copy back the final computed normal into all related loop-normals. */
1288 float *nor;
1289
1290 while ((nor = BLI_SMALLSTACK_POP(normal))) {
1291 copy_v3_v3(nor, lnor);
1292 }
1293 }
1294 /* Extra bonus: since small-stack is local to this function,
1295 * no more need to empty it at all cost! */
1296 }
1297 }
1298
loop_split_worker_do(LoopSplitTaskDataCommon * common_data,LoopSplitTaskData * data,BLI_Stack * edge_vectors)1299 static void loop_split_worker_do(LoopSplitTaskDataCommon *common_data,
1300 LoopSplitTaskData *data,
1301 BLI_Stack *edge_vectors)
1302 {
1303 BLI_assert(data->ml_curr);
1304 if (data->e2l_prev) {
1305 BLI_assert((edge_vectors == NULL) || BLI_stack_is_empty(edge_vectors));
1306 data->edge_vectors = edge_vectors;
1307 split_loop_nor_fan_do(common_data, data);
1308 }
1309 else {
1310 /* No need for edge_vectors for 'single' case! */
1311 split_loop_nor_single_do(common_data, data);
1312 }
1313 }
1314
loop_split_worker(TaskPool * __restrict pool,void * taskdata)1315 static void loop_split_worker(TaskPool *__restrict pool, void *taskdata)
1316 {
1317 LoopSplitTaskDataCommon *common_data = BLI_task_pool_user_data(pool);
1318 LoopSplitTaskData *data = taskdata;
1319
1320 /* Temp edge vectors stack, only used when computing lnor spacearr. */
1321 BLI_Stack *edge_vectors = common_data->lnors_spacearr ?
1322 BLI_stack_new(sizeof(float[3]), __func__) :
1323 NULL;
1324
1325 #ifdef DEBUG_TIME
1326 TIMEIT_START_AVERAGED(loop_split_worker);
1327 #endif
1328
1329 for (int i = 0; i < LOOP_SPLIT_TASK_BLOCK_SIZE; i++, data++) {
1330 /* A NULL ml_curr is used to tag ended data! */
1331 if (data->ml_curr == NULL) {
1332 break;
1333 }
1334
1335 loop_split_worker_do(common_data, data, edge_vectors);
1336 }
1337
1338 if (edge_vectors) {
1339 BLI_stack_free(edge_vectors);
1340 }
1341
1342 #ifdef DEBUG_TIME
1343 TIMEIT_END_AVERAGED(loop_split_worker);
1344 #endif
1345 }
1346
1347 /**
1348 * Check whether given loop is part of an unknown-so-far cyclic smooth fan, or not.
1349 * Needed because cyclic smooth fans have no obvious 'entry point',
1350 * and yet we need to walk them once, and only once.
1351 */
loop_split_generator_check_cyclic_smooth_fan(const MLoop * mloops,const MPoly * mpolys,const int (* edge_to_loops)[2],const int * loop_to_poly,const int * e2l_prev,BLI_bitmap * skip_loops,const MLoop * ml_curr,const MLoop * ml_prev,const int ml_curr_index,const int ml_prev_index,const int mp_curr_index)1352 static bool loop_split_generator_check_cyclic_smooth_fan(const MLoop *mloops,
1353 const MPoly *mpolys,
1354 const int (*edge_to_loops)[2],
1355 const int *loop_to_poly,
1356 const int *e2l_prev,
1357 BLI_bitmap *skip_loops,
1358 const MLoop *ml_curr,
1359 const MLoop *ml_prev,
1360 const int ml_curr_index,
1361 const int ml_prev_index,
1362 const int mp_curr_index)
1363 {
1364 const unsigned int mv_pivot_index = ml_curr->v; /* The vertex we are "fanning" around! */
1365 const int *e2lfan_curr;
1366 const MLoop *mlfan_curr;
1367 /* mlfan_vert_index: the loop of our current edge might not be the loop of our current vertex! */
1368 int mlfan_curr_index, mlfan_vert_index, mpfan_curr_index;
1369
1370 e2lfan_curr = e2l_prev;
1371 if (IS_EDGE_SHARP(e2lfan_curr)) {
1372 /* Sharp loop, so not a cyclic smooth fan... */
1373 return false;
1374 }
1375
1376 mlfan_curr = ml_prev;
1377 mlfan_curr_index = ml_prev_index;
1378 mlfan_vert_index = ml_curr_index;
1379 mpfan_curr_index = mp_curr_index;
1380
1381 BLI_assert(mlfan_curr_index >= 0);
1382 BLI_assert(mlfan_vert_index >= 0);
1383 BLI_assert(mpfan_curr_index >= 0);
1384
1385 BLI_assert(!BLI_BITMAP_TEST(skip_loops, mlfan_vert_index));
1386 BLI_BITMAP_ENABLE(skip_loops, mlfan_vert_index);
1387
1388 while (true) {
1389 /* Find next loop of the smooth fan. */
1390 BKE_mesh_loop_manifold_fan_around_vert_next(mloops,
1391 mpolys,
1392 loop_to_poly,
1393 e2lfan_curr,
1394 mv_pivot_index,
1395 &mlfan_curr,
1396 &mlfan_curr_index,
1397 &mlfan_vert_index,
1398 &mpfan_curr_index);
1399
1400 e2lfan_curr = edge_to_loops[mlfan_curr->e];
1401
1402 if (IS_EDGE_SHARP(e2lfan_curr)) {
1403 /* Sharp loop/edge, so not a cyclic smooth fan... */
1404 return false;
1405 }
1406 /* Smooth loop/edge... */
1407 if (BLI_BITMAP_TEST(skip_loops, mlfan_vert_index)) {
1408 if (mlfan_vert_index == ml_curr_index) {
1409 /* We walked around a whole cyclic smooth fan without finding any already-processed loop,
1410 * means we can use initial ml_curr/ml_prev edge as start for this smooth fan. */
1411 return true;
1412 }
1413 /* ... already checked in some previous looping, we can abort. */
1414 return false;
1415 }
1416
1417 /* ... we can skip it in future, and keep checking the smooth fan. */
1418 BLI_BITMAP_ENABLE(skip_loops, mlfan_vert_index);
1419 }
1420 }
1421
loop_split_generator(TaskPool * pool,LoopSplitTaskDataCommon * common_data)1422 static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common_data)
1423 {
1424 MLoopNorSpaceArray *lnors_spacearr = common_data->lnors_spacearr;
1425 float(*loopnors)[3] = common_data->loopnors;
1426
1427 const MLoop *mloops = common_data->mloops;
1428 const MPoly *mpolys = common_data->mpolys;
1429 const int *loop_to_poly = common_data->loop_to_poly;
1430 const int(*edge_to_loops)[2] = common_data->edge_to_loops;
1431 const int numLoops = common_data->numLoops;
1432 const int numPolys = common_data->numPolys;
1433
1434 const MPoly *mp;
1435 int mp_index;
1436
1437 const MLoop *ml_curr;
1438 const MLoop *ml_prev;
1439 int ml_curr_index;
1440 int ml_prev_index;
1441
1442 BLI_bitmap *skip_loops = BLI_BITMAP_NEW(numLoops, __func__);
1443
1444 LoopSplitTaskData *data_buff = NULL;
1445 int data_idx = 0;
1446
1447 /* Temp edge vectors stack, only used when computing lnor spacearr
1448 * (and we are not multi-threading). */
1449 BLI_Stack *edge_vectors = NULL;
1450
1451 #ifdef DEBUG_TIME
1452 TIMEIT_START_AVERAGED(loop_split_generator);
1453 #endif
1454
1455 if (!pool) {
1456 if (lnors_spacearr) {
1457 edge_vectors = BLI_stack_new(sizeof(float[3]), __func__);
1458 }
1459 }
1460
1461 /* We now know edges that can be smoothed (with their vector, and their two loops),
1462 * and edges that will be hard! Now, time to generate the normals.
1463 */
1464 for (mp = mpolys, mp_index = 0; mp_index < numPolys; mp++, mp_index++) {
1465 float(*lnors)[3];
1466 const int ml_last_index = (mp->loopstart + mp->totloop) - 1;
1467 ml_curr_index = mp->loopstart;
1468 ml_prev_index = ml_last_index;
1469
1470 ml_curr = &mloops[ml_curr_index];
1471 ml_prev = &mloops[ml_prev_index];
1472 lnors = &loopnors[ml_curr_index];
1473
1474 for (; ml_curr_index <= ml_last_index; ml_curr++, ml_curr_index++, lnors++) {
1475 const int *e2l_curr = edge_to_loops[ml_curr->e];
1476 const int *e2l_prev = edge_to_loops[ml_prev->e];
1477
1478 #if 0
1479 printf("Checking loop %d / edge %u / vert %u (sharp edge: %d, skiploop: %d)...",
1480 ml_curr_index,
1481 ml_curr->e,
1482 ml_curr->v,
1483 IS_EDGE_SHARP(e2l_curr),
1484 BLI_BITMAP_TEST_BOOL(skip_loops, ml_curr_index));
1485 #endif
1486
1487 /* A smooth edge, we have to check for cyclic smooth fan case.
1488 * If we find a new, never-processed cyclic smooth fan, we can do it now using that loop/edge
1489 * as 'entry point', otherwise we can skip it. */
1490
1491 /* Note: In theory, we could make loop_split_generator_check_cyclic_smooth_fan() store
1492 * mlfan_vert_index'es and edge indexes in two stacks, to avoid having to fan again around
1493 * the vert during actual computation of clnor & clnorspace. However, this would complicate
1494 * the code, add more memory usage, and despite its logical complexity,
1495 * loop_manifold_fan_around_vert_next() is quite cheap in term of CPU cycles,
1496 * so really think it's not worth it. */
1497 if (!IS_EDGE_SHARP(e2l_curr) && (BLI_BITMAP_TEST(skip_loops, ml_curr_index) ||
1498 !loop_split_generator_check_cyclic_smooth_fan(mloops,
1499 mpolys,
1500 edge_to_loops,
1501 loop_to_poly,
1502 e2l_prev,
1503 skip_loops,
1504 ml_curr,
1505 ml_prev,
1506 ml_curr_index,
1507 ml_prev_index,
1508 mp_index))) {
1509 // printf("SKIPPING!\n");
1510 }
1511 else {
1512 LoopSplitTaskData *data, data_local;
1513
1514 // printf("PROCESSING!\n");
1515
1516 if (pool) {
1517 if (data_idx == 0) {
1518 data_buff = MEM_calloc_arrayN(
1519 LOOP_SPLIT_TASK_BLOCK_SIZE, sizeof(*data_buff), __func__);
1520 }
1521 data = &data_buff[data_idx];
1522 }
1523 else {
1524 data = &data_local;
1525 memset(data, 0, sizeof(*data));
1526 }
1527
1528 if (IS_EDGE_SHARP(e2l_curr) && IS_EDGE_SHARP(e2l_prev)) {
1529 data->lnor = lnors;
1530 data->ml_curr = ml_curr;
1531 data->ml_prev = ml_prev;
1532 data->ml_curr_index = ml_curr_index;
1533 #if 0 /* Not needed for 'single' loop. */
1534 data->ml_prev_index = ml_prev_index;
1535 data->e2l_prev = NULL; /* Tag as 'single' task. */
1536 #endif
1537 data->mp_index = mp_index;
1538 if (lnors_spacearr) {
1539 data->lnor_space = BKE_lnor_space_create(lnors_spacearr);
1540 }
1541 }
1542 /* We *do not need* to check/tag loops as already computed!
1543 * Due to the fact a loop only links to one of its two edges,
1544 * a same fan *will never be walked more than once!*
1545 * Since we consider edges having neighbor polys with inverted
1546 * (flipped) normals as sharp, we are sure that no fan will be skipped,
1547 * even only considering the case (sharp curr_edge, smooth prev_edge),
1548 * and not the alternative (smooth curr_edge, sharp prev_edge).
1549 * All this due/thanks to link between normals and loop ordering (i.e. winding).
1550 */
1551 else {
1552 #if 0 /* Not needed for 'fan' loops. */
1553 data->lnor = lnors;
1554 #endif
1555 data->ml_curr = ml_curr;
1556 data->ml_prev = ml_prev;
1557 data->ml_curr_index = ml_curr_index;
1558 data->ml_prev_index = ml_prev_index;
1559 data->e2l_prev = e2l_prev; /* Also tag as 'fan' task. */
1560 data->mp_index = mp_index;
1561 if (lnors_spacearr) {
1562 data->lnor_space = BKE_lnor_space_create(lnors_spacearr);
1563 }
1564 }
1565
1566 if (pool) {
1567 data_idx++;
1568 if (data_idx == LOOP_SPLIT_TASK_BLOCK_SIZE) {
1569 BLI_task_pool_push(pool, loop_split_worker, data_buff, true, NULL);
1570 data_idx = 0;
1571 }
1572 }
1573 else {
1574 loop_split_worker_do(common_data, data, edge_vectors);
1575 }
1576 }
1577
1578 ml_prev = ml_curr;
1579 ml_prev_index = ml_curr_index;
1580 }
1581 }
1582
1583 /* Last block of data... Since it is calloc'ed and we use first NULL item as stopper,
1584 * everything is fine. */
1585 if (pool && data_idx) {
1586 BLI_task_pool_push(pool, loop_split_worker, data_buff, true, NULL);
1587 }
1588
1589 if (edge_vectors) {
1590 BLI_stack_free(edge_vectors);
1591 }
1592 MEM_freeN(skip_loops);
1593
1594 #ifdef DEBUG_TIME
1595 TIMEIT_END_AVERAGED(loop_split_generator);
1596 #endif
1597 }
1598
1599 /**
1600 * Compute split normals, i.e. vertex normals associated with each poly (hence 'loop normals').
1601 * Useful to materialize sharp edges (or non-smooth faces) without actually modifying the geometry
1602 * (splitting edges).
1603 */
BKE_mesh_normals_loop_split(const MVert * mverts,const int UNUSED (numVerts),MEdge * medges,const int numEdges,MLoop * mloops,float (* r_loopnors)[3],const int numLoops,MPoly * mpolys,const float (* polynors)[3],const int numPolys,const bool use_split_normals,const float split_angle,MLoopNorSpaceArray * r_lnors_spacearr,short (* clnors_data)[2],int * r_loop_to_poly)1604 void BKE_mesh_normals_loop_split(const MVert *mverts,
1605 const int UNUSED(numVerts),
1606 MEdge *medges,
1607 const int numEdges,
1608 MLoop *mloops,
1609 float (*r_loopnors)[3],
1610 const int numLoops,
1611 MPoly *mpolys,
1612 const float (*polynors)[3],
1613 const int numPolys,
1614 const bool use_split_normals,
1615 const float split_angle,
1616 MLoopNorSpaceArray *r_lnors_spacearr,
1617 short (*clnors_data)[2],
1618 int *r_loop_to_poly)
1619 {
1620 /* For now this is not supported.
1621 * If we do not use split normals, we do not generate anything fancy! */
1622 BLI_assert(use_split_normals || !(r_lnors_spacearr));
1623
1624 if (!use_split_normals) {
1625 /* In this case, we simply fill lnors with vnors (or fnors for flat faces), quite simple!
1626 * Note this is done here to keep some logic and consistency in this quite complex code,
1627 * since we may want to use lnors even when mesh's 'autosmooth' is disabled
1628 * (see e.g. mesh mapping code).
1629 * As usual, we could handle that on case-by-case basis,
1630 * but simpler to keep it well confined here.
1631 */
1632 int mp_index;
1633
1634 for (mp_index = 0; mp_index < numPolys; mp_index++) {
1635 MPoly *mp = &mpolys[mp_index];
1636 int ml_index = mp->loopstart;
1637 const int ml_index_end = ml_index + mp->totloop;
1638 const bool is_poly_flat = ((mp->flag & ME_SMOOTH) == 0);
1639
1640 for (; ml_index < ml_index_end; ml_index++) {
1641 if (r_loop_to_poly) {
1642 r_loop_to_poly[ml_index] = mp_index;
1643 }
1644 if (is_poly_flat) {
1645 copy_v3_v3(r_loopnors[ml_index], polynors[mp_index]);
1646 }
1647 else {
1648 normal_short_to_float_v3(r_loopnors[ml_index], mverts[mloops[ml_index].v].no);
1649 }
1650 }
1651 }
1652 return;
1653 }
1654
1655 /**
1656 * Mapping edge -> loops.
1657 * If that edge is used by more than two loops (polys),
1658 * it is always sharp (and tagged as such, see below).
1659 * We also use the second loop index as a kind of flag:
1660 *
1661 * - smooth edge: > 0.
1662 * - sharp edge: < 0 (INDEX_INVALID || INDEX_UNSET).
1663 * - unset: INDEX_UNSET.
1664 *
1665 * Note that currently we only have two values for second loop of sharp edges.
1666 * However, if needed, we can store the negated value of loop index instead of INDEX_INVALID
1667 * to retrieve the real value later in code).
1668 * Note also that loose edges always have both values set to 0! */
1669 int(*edge_to_loops)[2] = MEM_calloc_arrayN((size_t)numEdges, sizeof(*edge_to_loops), __func__);
1670
1671 /* Simple mapping from a loop to its polygon index. */
1672 int *loop_to_poly = r_loop_to_poly ?
1673 r_loop_to_poly :
1674 MEM_malloc_arrayN((size_t)numLoops, sizeof(*loop_to_poly), __func__);
1675
1676 /* When using custom loop normals, disable the angle feature! */
1677 const bool check_angle = (split_angle < (float)M_PI) && (clnors_data == NULL);
1678
1679 MLoopNorSpaceArray _lnors_spacearr = {NULL};
1680
1681 #ifdef DEBUG_TIME
1682 TIMEIT_START_AVERAGED(BKE_mesh_normals_loop_split);
1683 #endif
1684
1685 if (!r_lnors_spacearr && clnors_data) {
1686 /* We need to compute lnor spacearr if some custom lnor data are given to us! */
1687 r_lnors_spacearr = &_lnors_spacearr;
1688 }
1689 if (r_lnors_spacearr) {
1690 BKE_lnor_spacearr_init(r_lnors_spacearr, numLoops, MLNOR_SPACEARR_LOOP_INDEX);
1691 }
1692
1693 /* Init data common to all tasks. */
1694 LoopSplitTaskDataCommon common_data = {
1695 .lnors_spacearr = r_lnors_spacearr,
1696 .loopnors = r_loopnors,
1697 .clnors_data = clnors_data,
1698 .mverts = mverts,
1699 .medges = medges,
1700 .mloops = mloops,
1701 .mpolys = mpolys,
1702 .edge_to_loops = edge_to_loops,
1703 .loop_to_poly = loop_to_poly,
1704 .polynors = polynors,
1705 .numEdges = numEdges,
1706 .numLoops = numLoops,
1707 .numPolys = numPolys,
1708 };
1709
1710 /* This first loop check which edges are actually smooth, and compute edge vectors. */
1711 mesh_edges_sharp_tag(&common_data, check_angle, split_angle, false);
1712
1713 if (numLoops < LOOP_SPLIT_TASK_BLOCK_SIZE * 8) {
1714 /* Not enough loops to be worth the whole threading overhead... */
1715 loop_split_generator(NULL, &common_data);
1716 }
1717 else {
1718 TaskPool *task_pool = BLI_task_pool_create(&common_data, TASK_PRIORITY_HIGH);
1719
1720 loop_split_generator(task_pool, &common_data);
1721
1722 BLI_task_pool_work_and_wait(task_pool);
1723
1724 BLI_task_pool_free(task_pool);
1725 }
1726
1727 MEM_freeN(edge_to_loops);
1728 if (!r_loop_to_poly) {
1729 MEM_freeN(loop_to_poly);
1730 }
1731
1732 if (r_lnors_spacearr) {
1733 if (r_lnors_spacearr == &_lnors_spacearr) {
1734 BKE_lnor_spacearr_free(r_lnors_spacearr);
1735 }
1736 }
1737
1738 #ifdef DEBUG_TIME
1739 TIMEIT_END_AVERAGED(BKE_mesh_normals_loop_split);
1740 #endif
1741 }
1742
1743 #undef INDEX_UNSET
1744 #undef INDEX_INVALID
1745 #undef IS_EDGE_SHARP
1746
1747 /**
1748 * Compute internal representation of given custom normals (as an array of float[2]).
1749 * It also makes sure the mesh matches those custom normals, by setting sharp edges flag as needed
1750 * to get a same custom lnor for all loops sharing a same smooth fan.
1751 * If use_vertices if true, r_custom_loopnors is assumed to be per-vertex, not per-loop
1752 * (this allows to set whole vert's normals at once, useful in some cases).
1753 * r_custom_loopnors is expected to have normalized normals, or zero ones,
1754 * in which case they will be replaced by default loop/vertex normal.
1755 */
mesh_normals_loop_custom_set(const MVert * mverts,const int numVerts,MEdge * medges,const int numEdges,MLoop * mloops,float (* r_custom_loopnors)[3],const int numLoops,MPoly * mpolys,const float (* polynors)[3],const int numPolys,short (* r_clnors_data)[2],const bool use_vertices)1756 static void mesh_normals_loop_custom_set(const MVert *mverts,
1757 const int numVerts,
1758 MEdge *medges,
1759 const int numEdges,
1760 MLoop *mloops,
1761 float (*r_custom_loopnors)[3],
1762 const int numLoops,
1763 MPoly *mpolys,
1764 const float (*polynors)[3],
1765 const int numPolys,
1766 short (*r_clnors_data)[2],
1767 const bool use_vertices)
1768 {
1769 /* We *may* make that poor BKE_mesh_normals_loop_split() even more complex by making it handling
1770 * that feature too, would probably be more efficient in absolute.
1771 * However, this function *is not* performance-critical, since it is mostly expected to be called
1772 * by io addons when importing custom normals, and modifier
1773 * (and perhaps from some editing tools later?).
1774 * So better to keep some simplicity here, and just call BKE_mesh_normals_loop_split() twice!
1775 */
1776 MLoopNorSpaceArray lnors_spacearr = {NULL};
1777 BLI_bitmap *done_loops = BLI_BITMAP_NEW((size_t)numLoops, __func__);
1778 float(*lnors)[3] = MEM_calloc_arrayN((size_t)numLoops, sizeof(*lnors), __func__);
1779 int *loop_to_poly = MEM_malloc_arrayN((size_t)numLoops, sizeof(int), __func__);
1780 /* In this case we always consider split nors as ON,
1781 * and do not want to use angle to define smooth fans! */
1782 const bool use_split_normals = true;
1783 const float split_angle = (float)M_PI;
1784
1785 BLI_SMALLSTACK_DECLARE(clnors_data, short *);
1786
1787 /* Compute current lnor spacearr. */
1788 BKE_mesh_normals_loop_split(mverts,
1789 numVerts,
1790 medges,
1791 numEdges,
1792 mloops,
1793 lnors,
1794 numLoops,
1795 mpolys,
1796 polynors,
1797 numPolys,
1798 use_split_normals,
1799 split_angle,
1800 &lnors_spacearr,
1801 NULL,
1802 loop_to_poly);
1803
1804 /* Set all given zero vectors to their default value. */
1805 if (use_vertices) {
1806 for (int i = 0; i < numVerts; i++) {
1807 if (is_zero_v3(r_custom_loopnors[i])) {
1808 normal_short_to_float_v3(r_custom_loopnors[i], mverts[i].no);
1809 }
1810 }
1811 }
1812 else {
1813 for (int i = 0; i < numLoops; i++) {
1814 if (is_zero_v3(r_custom_loopnors[i])) {
1815 copy_v3_v3(r_custom_loopnors[i], lnors[i]);
1816 }
1817 }
1818 }
1819
1820 BLI_assert(lnors_spacearr.data_type == MLNOR_SPACEARR_LOOP_INDEX);
1821
1822 /* Now, check each current smooth fan (one lnor space per smooth fan!),
1823 * and if all its matching custom lnors are not (enough) equal, add sharp edges as needed.
1824 * This way, next time we run BKE_mesh_normals_loop_split(), we'll get lnor spacearr/smooth fans
1825 * matching given custom lnors.
1826 * Note this code *will never* unsharp edges! And quite obviously,
1827 * when we set custom normals per vertices, running this is absolutely useless.
1828 */
1829 if (!use_vertices) {
1830 for (int i = 0; i < numLoops; i++) {
1831 if (!lnors_spacearr.lspacearr[i]) {
1832 /* This should not happen in theory, but in some rare case (probably ugly geometry)
1833 * we can get some NULL loopspacearr at this point. :/
1834 * Maybe we should set those loops' edges as sharp?
1835 */
1836 BLI_BITMAP_ENABLE(done_loops, i);
1837 if (G.debug & G_DEBUG) {
1838 printf("WARNING! Getting invalid NULL loop space for loop %d!\n", i);
1839 }
1840 continue;
1841 }
1842
1843 if (!BLI_BITMAP_TEST(done_loops, i)) {
1844 /* Notes:
1845 * * In case of mono-loop smooth fan, we have nothing to do.
1846 * * Loops in this linklist are ordered (in reversed order compared to how they were
1847 * discovered by BKE_mesh_normals_loop_split(), but this is not a problem).
1848 * Which means if we find a mismatching clnor,
1849 * we know all remaining loops will have to be in a new, different smooth fan/lnor space.
1850 * * In smooth fan case, we compare each clnor against a ref one,
1851 * to avoid small differences adding up into a real big one in the end!
1852 */
1853 if (lnors_spacearr.lspacearr[i]->flags & MLNOR_SPACE_IS_SINGLE) {
1854 BLI_BITMAP_ENABLE(done_loops, i);
1855 continue;
1856 }
1857
1858 LinkNode *loops = lnors_spacearr.lspacearr[i]->loops;
1859 MLoop *prev_ml = NULL;
1860 const float *org_nor = NULL;
1861
1862 while (loops) {
1863 const int lidx = POINTER_AS_INT(loops->link);
1864 MLoop *ml = &mloops[lidx];
1865 const int nidx = lidx;
1866 float *nor = r_custom_loopnors[nidx];
1867
1868 if (!org_nor) {
1869 org_nor = nor;
1870 }
1871 else if (dot_v3v3(org_nor, nor) < LNOR_SPACE_TRIGO_THRESHOLD) {
1872 /* Current normal differs too much from org one, we have to tag the edge between
1873 * previous loop's face and current's one as sharp.
1874 * We know those two loops do not point to the same edge,
1875 * since we do not allow reversed winding in a same smooth fan.
1876 */
1877 const MPoly *mp = &mpolys[loop_to_poly[lidx]];
1878 const MLoop *mlp =
1879 &mloops[(lidx == mp->loopstart) ? mp->loopstart + mp->totloop - 1 : lidx - 1];
1880 medges[(prev_ml->e == mlp->e) ? prev_ml->e : ml->e].flag |= ME_SHARP;
1881
1882 org_nor = nor;
1883 }
1884
1885 prev_ml = ml;
1886 loops = loops->next;
1887 BLI_BITMAP_ENABLE(done_loops, lidx);
1888 }
1889
1890 /* We also have to check between last and first loops,
1891 * otherwise we may miss some sharp edges here!
1892 * This is just a simplified version of above while loop.
1893 * See T45984. */
1894 loops = lnors_spacearr.lspacearr[i]->loops;
1895 if (loops && org_nor) {
1896 const int lidx = POINTER_AS_INT(loops->link);
1897 MLoop *ml = &mloops[lidx];
1898 const int nidx = lidx;
1899 float *nor = r_custom_loopnors[nidx];
1900
1901 if (dot_v3v3(org_nor, nor) < LNOR_SPACE_TRIGO_THRESHOLD) {
1902 const MPoly *mp = &mpolys[loop_to_poly[lidx]];
1903 const MLoop *mlp =
1904 &mloops[(lidx == mp->loopstart) ? mp->loopstart + mp->totloop - 1 : lidx - 1];
1905 medges[(prev_ml->e == mlp->e) ? prev_ml->e : ml->e].flag |= ME_SHARP;
1906 }
1907 }
1908 }
1909 }
1910
1911 /* And now, recompute our new auto lnors and lnor spacearr! */
1912 BKE_lnor_spacearr_clear(&lnors_spacearr);
1913 BKE_mesh_normals_loop_split(mverts,
1914 numVerts,
1915 medges,
1916 numEdges,
1917 mloops,
1918 lnors,
1919 numLoops,
1920 mpolys,
1921 polynors,
1922 numPolys,
1923 use_split_normals,
1924 split_angle,
1925 &lnors_spacearr,
1926 NULL,
1927 loop_to_poly);
1928 }
1929 else {
1930 BLI_bitmap_set_all(done_loops, true, (size_t)numLoops);
1931 }
1932
1933 /* And we just have to convert plain object-space custom normals to our
1934 * lnor space-encoded ones. */
1935 for (int i = 0; i < numLoops; i++) {
1936 if (!lnors_spacearr.lspacearr[i]) {
1937 BLI_BITMAP_DISABLE(done_loops, i);
1938 if (G.debug & G_DEBUG) {
1939 printf("WARNING! Still getting invalid NULL loop space in second loop for loop %d!\n", i);
1940 }
1941 continue;
1942 }
1943
1944 if (BLI_BITMAP_TEST_BOOL(done_loops, i)) {
1945 /* Note we accumulate and average all custom normals in current smooth fan,
1946 * to avoid getting different clnors data (tiny differences in plain custom normals can
1947 * give rather huge differences in computed 2D factors).
1948 */
1949 LinkNode *loops = lnors_spacearr.lspacearr[i]->loops;
1950 if (lnors_spacearr.lspacearr[i]->flags & MLNOR_SPACE_IS_SINGLE) {
1951 BLI_assert(POINTER_AS_INT(loops) == i);
1952 const int nidx = use_vertices ? (int)mloops[i].v : i;
1953 float *nor = r_custom_loopnors[nidx];
1954
1955 BKE_lnor_space_custom_normal_to_data(lnors_spacearr.lspacearr[i], nor, r_clnors_data[i]);
1956 BLI_BITMAP_DISABLE(done_loops, i);
1957 }
1958 else {
1959 int nbr_nors = 0;
1960 float avg_nor[3];
1961 short clnor_data_tmp[2], *clnor_data;
1962
1963 zero_v3(avg_nor);
1964 while (loops) {
1965 const int lidx = POINTER_AS_INT(loops->link);
1966 const int nidx = use_vertices ? (int)mloops[lidx].v : lidx;
1967 float *nor = r_custom_loopnors[nidx];
1968
1969 nbr_nors++;
1970 add_v3_v3(avg_nor, nor);
1971 BLI_SMALLSTACK_PUSH(clnors_data, (short *)r_clnors_data[lidx]);
1972
1973 loops = loops->next;
1974 BLI_BITMAP_DISABLE(done_loops, lidx);
1975 }
1976
1977 mul_v3_fl(avg_nor, 1.0f / (float)nbr_nors);
1978 BKE_lnor_space_custom_normal_to_data(lnors_spacearr.lspacearr[i], avg_nor, clnor_data_tmp);
1979
1980 while ((clnor_data = BLI_SMALLSTACK_POP(clnors_data))) {
1981 clnor_data[0] = clnor_data_tmp[0];
1982 clnor_data[1] = clnor_data_tmp[1];
1983 }
1984 }
1985 }
1986 }
1987
1988 MEM_freeN(lnors);
1989 MEM_freeN(loop_to_poly);
1990 MEM_freeN(done_loops);
1991 BKE_lnor_spacearr_free(&lnors_spacearr);
1992 }
1993
BKE_mesh_normals_loop_custom_set(const MVert * mverts,const int numVerts,MEdge * medges,const int numEdges,MLoop * mloops,float (* r_custom_loopnors)[3],const int numLoops,MPoly * mpolys,const float (* polynors)[3],const int numPolys,short (* r_clnors_data)[2])1994 void BKE_mesh_normals_loop_custom_set(const MVert *mverts,
1995 const int numVerts,
1996 MEdge *medges,
1997 const int numEdges,
1998 MLoop *mloops,
1999 float (*r_custom_loopnors)[3],
2000 const int numLoops,
2001 MPoly *mpolys,
2002 const float (*polynors)[3],
2003 const int numPolys,
2004 short (*r_clnors_data)[2])
2005 {
2006 mesh_normals_loop_custom_set(mverts,
2007 numVerts,
2008 medges,
2009 numEdges,
2010 mloops,
2011 r_custom_loopnors,
2012 numLoops,
2013 mpolys,
2014 polynors,
2015 numPolys,
2016 r_clnors_data,
2017 false);
2018 }
2019
BKE_mesh_normals_loop_custom_from_vertices_set(const MVert * mverts,float (* r_custom_vertnors)[3],const int numVerts,MEdge * medges,const int numEdges,MLoop * mloops,const int numLoops,MPoly * mpolys,const float (* polynors)[3],const int numPolys,short (* r_clnors_data)[2])2020 void BKE_mesh_normals_loop_custom_from_vertices_set(const MVert *mverts,
2021 float (*r_custom_vertnors)[3],
2022 const int numVerts,
2023 MEdge *medges,
2024 const int numEdges,
2025 MLoop *mloops,
2026 const int numLoops,
2027 MPoly *mpolys,
2028 const float (*polynors)[3],
2029 const int numPolys,
2030 short (*r_clnors_data)[2])
2031 {
2032 mesh_normals_loop_custom_set(mverts,
2033 numVerts,
2034 medges,
2035 numEdges,
2036 mloops,
2037 r_custom_vertnors,
2038 numLoops,
2039 mpolys,
2040 polynors,
2041 numPolys,
2042 r_clnors_data,
2043 true);
2044 }
2045
mesh_set_custom_normals(Mesh * mesh,float (* r_custom_nors)[3],const bool use_vertices)2046 static void mesh_set_custom_normals(Mesh *mesh, float (*r_custom_nors)[3], const bool use_vertices)
2047 {
2048 short(*clnors)[2];
2049 const int numloops = mesh->totloop;
2050
2051 clnors = CustomData_get_layer(&mesh->ldata, CD_CUSTOMLOOPNORMAL);
2052 if (clnors != NULL) {
2053 memset(clnors, 0, sizeof(*clnors) * (size_t)numloops);
2054 }
2055 else {
2056 clnors = CustomData_add_layer(&mesh->ldata, CD_CUSTOMLOOPNORMAL, CD_CALLOC, NULL, numloops);
2057 }
2058
2059 float(*polynors)[3] = CustomData_get_layer(&mesh->pdata, CD_NORMAL);
2060 bool free_polynors = false;
2061 if (polynors == NULL) {
2062 polynors = MEM_mallocN(sizeof(float[3]) * (size_t)mesh->totpoly, __func__);
2063 BKE_mesh_calc_normals_poly(mesh->mvert,
2064 NULL,
2065 mesh->totvert,
2066 mesh->mloop,
2067 mesh->mpoly,
2068 mesh->totloop,
2069 mesh->totpoly,
2070 polynors,
2071 false);
2072 free_polynors = true;
2073 }
2074
2075 mesh_normals_loop_custom_set(mesh->mvert,
2076 mesh->totvert,
2077 mesh->medge,
2078 mesh->totedge,
2079 mesh->mloop,
2080 r_custom_nors,
2081 mesh->totloop,
2082 mesh->mpoly,
2083 polynors,
2084 mesh->totpoly,
2085 clnors,
2086 use_vertices);
2087
2088 if (free_polynors) {
2089 MEM_freeN(polynors);
2090 }
2091 }
2092
2093 /**
2094 * Higher level functions hiding most of the code needed around call to
2095 * #BKE_mesh_normals_loop_custom_set().
2096 *
2097 * \param r_custom_loopnors: is not const, since code will replace zero_v3 normals there
2098 * with automatically computed vectors.
2099 */
BKE_mesh_set_custom_normals(Mesh * mesh,float (* r_custom_loopnors)[3])2100 void BKE_mesh_set_custom_normals(Mesh *mesh, float (*r_custom_loopnors)[3])
2101 {
2102 mesh_set_custom_normals(mesh, r_custom_loopnors, false);
2103 }
2104
2105 /**
2106 * Higher level functions hiding most of the code needed around call to
2107 * #BKE_mesh_normals_loop_custom_from_vertices_set().
2108 *
2109 * \param r_custom_vertnors: is not const, since code will replace zero_v3 normals there
2110 * with automatically computed vectors.
2111 */
BKE_mesh_set_custom_normals_from_vertices(Mesh * mesh,float (* r_custom_vertnors)[3])2112 void BKE_mesh_set_custom_normals_from_vertices(Mesh *mesh, float (*r_custom_vertnors)[3])
2113 {
2114 mesh_set_custom_normals(mesh, r_custom_vertnors, true);
2115 }
2116
2117 /**
2118 * Computes average per-vertex normals from given custom loop normals.
2119 *
2120 * \param clnors: The computed custom loop normals.
2121 * \param r_vert_clnors: The (already allocated) array where to store averaged per-vertex normals.
2122 */
BKE_mesh_normals_loop_to_vertex(const int numVerts,const MLoop * mloops,const int numLoops,const float (* clnors)[3],float (* r_vert_clnors)[3])2123 void BKE_mesh_normals_loop_to_vertex(const int numVerts,
2124 const MLoop *mloops,
2125 const int numLoops,
2126 const float (*clnors)[3],
2127 float (*r_vert_clnors)[3])
2128 {
2129 int *vert_loops_nbr = MEM_calloc_arrayN((size_t)numVerts, sizeof(*vert_loops_nbr), __func__);
2130
2131 copy_vn_fl((float *)r_vert_clnors, 3 * numVerts, 0.0f);
2132
2133 int i;
2134 const MLoop *ml;
2135 for (i = 0, ml = mloops; i < numLoops; i++, ml++) {
2136 const unsigned int v = ml->v;
2137
2138 add_v3_v3(r_vert_clnors[v], clnors[i]);
2139 vert_loops_nbr[v]++;
2140 }
2141
2142 for (i = 0; i < numVerts; i++) {
2143 mul_v3_fl(r_vert_clnors[i], 1.0f / (float)vert_loops_nbr[i]);
2144 }
2145
2146 MEM_freeN(vert_loops_nbr);
2147 }
2148
2149 #undef LNOR_SPACE_TRIGO_THRESHOLD
2150
2151 /** \} */
2152
2153 /* -------------------------------------------------------------------- */
2154 /** \name Polygon Calculations
2155 * \{ */
2156
2157 /*
2158 * COMPUTE POLY NORMAL
2159 *
2160 * Computes the normal of a planar
2161 * polygon See Graphics Gems for
2162 * computing newell normal.
2163 */
mesh_calc_ngon_normal(const MPoly * mpoly,const MLoop * loopstart,const MVert * mvert,float normal[3])2164 static void mesh_calc_ngon_normal(const MPoly *mpoly,
2165 const MLoop *loopstart,
2166 const MVert *mvert,
2167 float normal[3])
2168 {
2169 const int nverts = mpoly->totloop;
2170 const float *v_prev = mvert[loopstart[nverts - 1].v].co;
2171 const float *v_curr;
2172
2173 zero_v3(normal);
2174
2175 /* Newell's Method */
2176 for (int i = 0; i < nverts; i++) {
2177 v_curr = mvert[loopstart[i].v].co;
2178 add_newell_cross_v3_v3v3(normal, v_prev, v_curr);
2179 v_prev = v_curr;
2180 }
2181
2182 if (UNLIKELY(normalize_v3(normal) == 0.0f)) {
2183 normal[2] = 1.0f; /* other axis set to 0.0 */
2184 }
2185 }
2186
BKE_mesh_calc_poly_normal(const MPoly * mpoly,const MLoop * loopstart,const MVert * mvarray,float r_no[3])2187 void BKE_mesh_calc_poly_normal(const MPoly *mpoly,
2188 const MLoop *loopstart,
2189 const MVert *mvarray,
2190 float r_no[3])
2191 {
2192 if (mpoly->totloop > 4) {
2193 mesh_calc_ngon_normal(mpoly, loopstart, mvarray, r_no);
2194 }
2195 else if (mpoly->totloop == 3) {
2196 normal_tri_v3(
2197 r_no, mvarray[loopstart[0].v].co, mvarray[loopstart[1].v].co, mvarray[loopstart[2].v].co);
2198 }
2199 else if (mpoly->totloop == 4) {
2200 normal_quad_v3(r_no,
2201 mvarray[loopstart[0].v].co,
2202 mvarray[loopstart[1].v].co,
2203 mvarray[loopstart[2].v].co,
2204 mvarray[loopstart[3].v].co);
2205 }
2206 else { /* horrible, two sided face! */
2207 r_no[0] = 0.0;
2208 r_no[1] = 0.0;
2209 r_no[2] = 1.0;
2210 }
2211 }
2212 /* duplicate of function above _but_ takes coords rather than mverts */
mesh_calc_ngon_normal_coords(const MPoly * mpoly,const MLoop * loopstart,const float (* vertex_coords)[3],float r_normal[3])2213 static void mesh_calc_ngon_normal_coords(const MPoly *mpoly,
2214 const MLoop *loopstart,
2215 const float (*vertex_coords)[3],
2216 float r_normal[3])
2217 {
2218 const int nverts = mpoly->totloop;
2219 const float *v_prev = vertex_coords[loopstart[nverts - 1].v];
2220 const float *v_curr;
2221
2222 zero_v3(r_normal);
2223
2224 /* Newell's Method */
2225 for (int i = 0; i < nverts; i++) {
2226 v_curr = vertex_coords[loopstart[i].v];
2227 add_newell_cross_v3_v3v3(r_normal, v_prev, v_curr);
2228 v_prev = v_curr;
2229 }
2230
2231 if (UNLIKELY(normalize_v3(r_normal) == 0.0f)) {
2232 r_normal[2] = 1.0f; /* other axis set to 0.0 */
2233 }
2234 }
2235
BKE_mesh_calc_poly_normal_coords(const MPoly * mpoly,const MLoop * loopstart,const float (* vertex_coords)[3],float r_no[3])2236 void BKE_mesh_calc_poly_normal_coords(const MPoly *mpoly,
2237 const MLoop *loopstart,
2238 const float (*vertex_coords)[3],
2239 float r_no[3])
2240 {
2241 if (mpoly->totloop > 4) {
2242 mesh_calc_ngon_normal_coords(mpoly, loopstart, vertex_coords, r_no);
2243 }
2244 else if (mpoly->totloop == 3) {
2245 normal_tri_v3(r_no,
2246 vertex_coords[loopstart[0].v],
2247 vertex_coords[loopstart[1].v],
2248 vertex_coords[loopstart[2].v]);
2249 }
2250 else if (mpoly->totloop == 4) {
2251 normal_quad_v3(r_no,
2252 vertex_coords[loopstart[0].v],
2253 vertex_coords[loopstart[1].v],
2254 vertex_coords[loopstart[2].v],
2255 vertex_coords[loopstart[3].v]);
2256 }
2257 else { /* horrible, two sided face! */
2258 r_no[0] = 0.0;
2259 r_no[1] = 0.0;
2260 r_no[2] = 1.0;
2261 }
2262 }
2263
mesh_calc_ngon_center(const MPoly * mpoly,const MLoop * loopstart,const MVert * mvert,float cent[3])2264 static void mesh_calc_ngon_center(const MPoly *mpoly,
2265 const MLoop *loopstart,
2266 const MVert *mvert,
2267 float cent[3])
2268 {
2269 const float w = 1.0f / (float)mpoly->totloop;
2270
2271 zero_v3(cent);
2272
2273 for (int i = 0; i < mpoly->totloop; i++) {
2274 madd_v3_v3fl(cent, mvert[(loopstart++)->v].co, w);
2275 }
2276 }
2277
BKE_mesh_calc_poly_center(const MPoly * mpoly,const MLoop * loopstart,const MVert * mvarray,float r_cent[3])2278 void BKE_mesh_calc_poly_center(const MPoly *mpoly,
2279 const MLoop *loopstart,
2280 const MVert *mvarray,
2281 float r_cent[3])
2282 {
2283 if (mpoly->totloop == 3) {
2284 mid_v3_v3v3v3(r_cent,
2285 mvarray[loopstart[0].v].co,
2286 mvarray[loopstart[1].v].co,
2287 mvarray[loopstart[2].v].co);
2288 }
2289 else if (mpoly->totloop == 4) {
2290 mid_v3_v3v3v3v3(r_cent,
2291 mvarray[loopstart[0].v].co,
2292 mvarray[loopstart[1].v].co,
2293 mvarray[loopstart[2].v].co,
2294 mvarray[loopstart[3].v].co);
2295 }
2296 else {
2297 mesh_calc_ngon_center(mpoly, loopstart, mvarray, r_cent);
2298 }
2299 }
2300
2301 /* note, passing polynormal is only a speedup so we can skip calculating it */
BKE_mesh_calc_poly_area(const MPoly * mpoly,const MLoop * loopstart,const MVert * mvarray)2302 float BKE_mesh_calc_poly_area(const MPoly *mpoly, const MLoop *loopstart, const MVert *mvarray)
2303 {
2304 if (mpoly->totloop == 3) {
2305 return area_tri_v3(
2306 mvarray[loopstart[0].v].co, mvarray[loopstart[1].v].co, mvarray[loopstart[2].v].co);
2307 }
2308
2309 const MLoop *l_iter = loopstart;
2310 float(*vertexcos)[3] = BLI_array_alloca(vertexcos, (size_t)mpoly->totloop);
2311
2312 /* pack vertex cos into an array for area_poly_v3 */
2313 for (int i = 0; i < mpoly->totloop; i++, l_iter++) {
2314 copy_v3_v3(vertexcos[i], mvarray[l_iter->v].co);
2315 }
2316
2317 /* finally calculate the area */
2318 float area = area_poly_v3((const float(*)[3])vertexcos, (unsigned int)mpoly->totloop);
2319
2320 return area;
2321 }
2322
BKE_mesh_calc_area(const Mesh * me)2323 float BKE_mesh_calc_area(const Mesh *me)
2324 {
2325 MVert *mvert = me->mvert;
2326 MLoop *mloop = me->mloop;
2327 MPoly *mpoly = me->mpoly;
2328
2329 MPoly *mp;
2330 int i = me->totpoly;
2331 float total_area = 0;
2332
2333 for (mp = mpoly; i--; mp++) {
2334 MLoop *ml_start = &mloop[mp->loopstart];
2335
2336 total_area += BKE_mesh_calc_poly_area(mp, ml_start, mvert);
2337 }
2338 return total_area;
2339 }
2340
BKE_mesh_calc_poly_uv_area(const MPoly * mpoly,const MLoopUV * uv_array)2341 float BKE_mesh_calc_poly_uv_area(const MPoly *mpoly, const MLoopUV *uv_array)
2342 {
2343
2344 int i, l_iter = mpoly->loopstart;
2345 float area;
2346 float(*vertexcos)[2] = BLI_array_alloca(vertexcos, (size_t)mpoly->totloop);
2347
2348 /* pack vertex cos into an array for area_poly_v2 */
2349 for (i = 0; i < mpoly->totloop; i++, l_iter++) {
2350 copy_v2_v2(vertexcos[i], uv_array[l_iter].uv);
2351 }
2352
2353 /* finally calculate the area */
2354 area = area_poly_v2((const float(*)[2])vertexcos, (unsigned int)mpoly->totloop);
2355
2356 return area;
2357 }
2358
2359 /**
2360 * Calculate the volume and volume-weighted centroid of the volume
2361 * formed by the polygon and the origin.
2362 * Results will be negative if the origin is "outside" the polygon
2363 * (+ve normal side), but the polygon may be non-planar with no effect.
2364 *
2365 * Method from:
2366 * - http://forums.cgsociety.org/archive/index.php?t-756235.html
2367 * - http://www.globalspec.com/reference/52702/203279/4-8-the-centroid-of-a-tetrahedron
2368 *
2369 * \note
2370 * - Volume is 6x actual volume, and centroid is 4x actual volume-weighted centroid
2371 * (so division can be done once at the end).
2372 * - Results will have bias if polygon is non-planar.
2373 * - The resulting volume will only be correct if the mesh is manifold and has consistent
2374 * face winding (non-contiguous face normals or holes in the mesh surface).
2375 */
UNUSED_FUNCTION(mesh_calc_poly_volume_centroid)2376 static float UNUSED_FUNCTION(mesh_calc_poly_volume_centroid)(const MPoly *mpoly,
2377 const MLoop *loopstart,
2378 const MVert *mvarray,
2379 float r_cent[3])
2380 {
2381 const float *v_pivot, *v_step1;
2382 float total_volume = 0.0f;
2383
2384 zero_v3(r_cent);
2385
2386 v_pivot = mvarray[loopstart[0].v].co;
2387 v_step1 = mvarray[loopstart[1].v].co;
2388
2389 for (int i = 2; i < mpoly->totloop; i++) {
2390 const float *v_step2 = mvarray[loopstart[i].v].co;
2391
2392 /* Calculate the 6x volume of the tetrahedron formed by the 3 vertices
2393 * of the triangle and the origin as the fourth vertex */
2394 const float tetra_volume = volume_tri_tetrahedron_signed_v3_6x(v_pivot, v_step1, v_step2);
2395 total_volume += tetra_volume;
2396
2397 /* Calculate the centroid of the tetrahedron formed by the 3 vertices
2398 * of the triangle and the origin as the fourth vertex.
2399 * The centroid is simply the average of the 4 vertices.
2400 *
2401 * Note that the vector is 4x the actual centroid
2402 * so the division can be done once at the end. */
2403 for (uint j = 0; j < 3; j++) {
2404 r_cent[j] += tetra_volume * (v_pivot[j] + v_step1[j] + v_step2[j]);
2405 }
2406
2407 v_step1 = v_step2;
2408 }
2409
2410 return total_volume;
2411 }
2412
2413 /**
2414 * A version of mesh_calc_poly_volume_centroid that takes an initial reference center,
2415 * use this to increase numeric stability as the quality of the result becomes
2416 * very low quality as the value moves away from 0.0, see: T65986.
2417 */
mesh_calc_poly_volume_centroid_with_reference_center(const MPoly * mpoly,const MLoop * loopstart,const MVert * mvarray,const float reference_center[3],float r_cent[3])2418 static float mesh_calc_poly_volume_centroid_with_reference_center(const MPoly *mpoly,
2419 const MLoop *loopstart,
2420 const MVert *mvarray,
2421 const float reference_center[3],
2422 float r_cent[3])
2423 {
2424 /* See: mesh_calc_poly_volume_centroid for comments. */
2425 float v_pivot[3], v_step1[3];
2426 float total_volume = 0.0f;
2427 zero_v3(r_cent);
2428 sub_v3_v3v3(v_pivot, mvarray[loopstart[0].v].co, reference_center);
2429 sub_v3_v3v3(v_step1, mvarray[loopstart[1].v].co, reference_center);
2430 for (int i = 2; i < mpoly->totloop; i++) {
2431 float v_step2[3];
2432 sub_v3_v3v3(v_step2, mvarray[loopstart[i].v].co, reference_center);
2433 const float tetra_volume = volume_tri_tetrahedron_signed_v3_6x(v_pivot, v_step1, v_step2);
2434 total_volume += tetra_volume;
2435 for (uint j = 0; j < 3; j++) {
2436 r_cent[j] += tetra_volume * (v_pivot[j] + v_step1[j] + v_step2[j]);
2437 }
2438 copy_v3_v3(v_step1, v_step2);
2439 }
2440 return total_volume;
2441 }
2442
2443 /**
2444 * \note
2445 * - Results won't be correct if polygon is non-planar.
2446 * - This has the advantage over #mesh_calc_poly_volume_centroid
2447 * that it doesn't depend on solid geometry, instead it weights the surface by volume.
2448 */
mesh_calc_poly_area_centroid(const MPoly * mpoly,const MLoop * loopstart,const MVert * mvarray,float r_cent[3])2449 static float mesh_calc_poly_area_centroid(const MPoly *mpoly,
2450 const MLoop *loopstart,
2451 const MVert *mvarray,
2452 float r_cent[3])
2453 {
2454 float total_area = 0.0f;
2455 float v1[3], v2[3], v3[3], normal[3], tri_cent[3];
2456
2457 BKE_mesh_calc_poly_normal(mpoly, loopstart, mvarray, normal);
2458 copy_v3_v3(v1, mvarray[loopstart[0].v].co);
2459 copy_v3_v3(v2, mvarray[loopstart[1].v].co);
2460 zero_v3(r_cent);
2461
2462 for (int i = 2; i < mpoly->totloop; i++) {
2463 copy_v3_v3(v3, mvarray[loopstart[i].v].co);
2464
2465 float tri_area = area_tri_signed_v3(v1, v2, v3, normal);
2466 total_area += tri_area;
2467
2468 mid_v3_v3v3v3(tri_cent, v1, v2, v3);
2469 madd_v3_v3fl(r_cent, tri_cent, tri_area);
2470
2471 copy_v3_v3(v2, v3);
2472 }
2473
2474 mul_v3_fl(r_cent, 1.0f / total_area);
2475
2476 return total_area;
2477 }
2478
BKE_mesh_calc_poly_angles(const MPoly * mpoly,const MLoop * loopstart,const MVert * mvarray,float angles[])2479 void BKE_mesh_calc_poly_angles(const MPoly *mpoly,
2480 const MLoop *loopstart,
2481 const MVert *mvarray,
2482 float angles[])
2483 {
2484 float nor_prev[3];
2485 float nor_next[3];
2486
2487 int i_this = mpoly->totloop - 1;
2488 int i_next = 0;
2489
2490 sub_v3_v3v3(nor_prev, mvarray[loopstart[i_this - 1].v].co, mvarray[loopstart[i_this].v].co);
2491 normalize_v3(nor_prev);
2492
2493 while (i_next < mpoly->totloop) {
2494 sub_v3_v3v3(nor_next, mvarray[loopstart[i_this].v].co, mvarray[loopstart[i_next].v].co);
2495 normalize_v3(nor_next);
2496 angles[i_this] = angle_normalized_v3v3(nor_prev, nor_next);
2497
2498 /* step */
2499 copy_v3_v3(nor_prev, nor_next);
2500 i_this = i_next;
2501 i_next++;
2502 }
2503 }
2504
BKE_mesh_poly_edgehash_insert(EdgeHash * ehash,const MPoly * mp,const MLoop * mloop)2505 void BKE_mesh_poly_edgehash_insert(EdgeHash *ehash, const MPoly *mp, const MLoop *mloop)
2506 {
2507 const MLoop *ml, *ml_next;
2508 int i = mp->totloop;
2509
2510 ml_next = mloop; /* first loop */
2511 ml = &ml_next[i - 1]; /* last loop */
2512
2513 while (i-- != 0) {
2514 BLI_edgehash_reinsert(ehash, ml->v, ml_next->v, NULL);
2515
2516 ml = ml_next;
2517 ml_next++;
2518 }
2519 }
2520
BKE_mesh_poly_edgebitmap_insert(unsigned int * edge_bitmap,const MPoly * mp,const MLoop * mloop)2521 void BKE_mesh_poly_edgebitmap_insert(unsigned int *edge_bitmap,
2522 const MPoly *mp,
2523 const MLoop *mloop)
2524 {
2525 const MLoop *ml;
2526 int i = mp->totloop;
2527
2528 ml = mloop;
2529
2530 while (i-- != 0) {
2531 BLI_BITMAP_ENABLE(edge_bitmap, ml->e);
2532 ml++;
2533 }
2534 }
2535
2536 /** \} */
2537
2538 /* -------------------------------------------------------------------- */
2539 /** \name Mesh Center Calculation
2540 * \{ */
2541
BKE_mesh_center_median(const Mesh * me,float r_cent[3])2542 bool BKE_mesh_center_median(const Mesh *me, float r_cent[3])
2543 {
2544 int i = me->totvert;
2545 const MVert *mvert;
2546 zero_v3(r_cent);
2547 for (mvert = me->mvert; i--; mvert++) {
2548 add_v3_v3(r_cent, mvert->co);
2549 }
2550 /* otherwise we get NAN for 0 verts */
2551 if (me->totvert) {
2552 mul_v3_fl(r_cent, 1.0f / (float)me->totvert);
2553 }
2554 return (me->totvert != 0);
2555 }
2556
2557 /**
2558 * Calculate the center from polygons,
2559 * use when we want to ignore vertex locations that don't have connected faces.
2560 */
BKE_mesh_center_median_from_polys(const Mesh * me,float r_cent[3])2561 bool BKE_mesh_center_median_from_polys(const Mesh *me, float r_cent[3])
2562 {
2563 int i = me->totpoly;
2564 int tot = 0;
2565 const MPoly *mpoly = me->mpoly;
2566 const MLoop *mloop = me->mloop;
2567 const MVert *mvert = me->mvert;
2568 zero_v3(r_cent);
2569 for (mpoly = me->mpoly; i--; mpoly++) {
2570 int loopend = mpoly->loopstart + mpoly->totloop;
2571 for (int j = mpoly->loopstart; j < loopend; j++) {
2572 add_v3_v3(r_cent, mvert[mloop[j].v].co);
2573 }
2574 tot += mpoly->totloop;
2575 }
2576 /* otherwise we get NAN for 0 verts */
2577 if (me->totpoly) {
2578 mul_v3_fl(r_cent, 1.0f / (float)tot);
2579 }
2580 return (me->totpoly != 0);
2581 }
2582
BKE_mesh_center_bounds(const Mesh * me,float r_cent[3])2583 bool BKE_mesh_center_bounds(const Mesh *me, float r_cent[3])
2584 {
2585 float min[3], max[3];
2586 INIT_MINMAX(min, max);
2587 if (BKE_mesh_minmax(me, min, max)) {
2588 mid_v3_v3v3(r_cent, min, max);
2589 return true;
2590 }
2591
2592 return false;
2593 }
2594
BKE_mesh_center_of_surface(const Mesh * me,float r_cent[3])2595 bool BKE_mesh_center_of_surface(const Mesh *me, float r_cent[3])
2596 {
2597 int i = me->totpoly;
2598 MPoly *mpoly;
2599 float poly_area;
2600 float total_area = 0.0f;
2601 float poly_cent[3];
2602
2603 zero_v3(r_cent);
2604
2605 /* calculate a weighted average of polygon centroids */
2606 for (mpoly = me->mpoly; i--; mpoly++) {
2607 poly_area = mesh_calc_poly_area_centroid(
2608 mpoly, me->mloop + mpoly->loopstart, me->mvert, poly_cent);
2609
2610 madd_v3_v3fl(r_cent, poly_cent, poly_area);
2611 total_area += poly_area;
2612 }
2613 /* otherwise we get NAN for 0 polys */
2614 if (me->totpoly) {
2615 mul_v3_fl(r_cent, 1.0f / total_area);
2616 }
2617
2618 /* zero area faces cause this, fallback to median */
2619 if (UNLIKELY(!is_finite_v3(r_cent))) {
2620 return BKE_mesh_center_median(me, r_cent);
2621 }
2622
2623 return (me->totpoly != 0);
2624 }
2625
2626 /**
2627 * \note Mesh must be manifold with consistent face-winding,
2628 * see #mesh_calc_poly_volume_centroid for details.
2629 */
BKE_mesh_center_of_volume(const Mesh * me,float r_cent[3])2630 bool BKE_mesh_center_of_volume(const Mesh *me, float r_cent[3])
2631 {
2632 int i = me->totpoly;
2633 MPoly *mpoly;
2634 float poly_volume;
2635 float total_volume = 0.0f;
2636 float poly_cent[3];
2637
2638 /* Use an initial center to avoid numeric instability of geometry far away from the center. */
2639 float init_cent[3];
2640 const bool init_cent_result = BKE_mesh_center_median_from_polys(me, init_cent);
2641
2642 zero_v3(r_cent);
2643
2644 /* calculate a weighted average of polyhedron centroids */
2645 for (mpoly = me->mpoly; i--; mpoly++) {
2646 poly_volume = mesh_calc_poly_volume_centroid_with_reference_center(
2647 mpoly, me->mloop + mpoly->loopstart, me->mvert, init_cent, poly_cent);
2648
2649 /* poly_cent is already volume-weighted, so no need to multiply by the volume */
2650 add_v3_v3(r_cent, poly_cent);
2651 total_volume += poly_volume;
2652 }
2653 /* otherwise we get NAN for 0 polys */
2654 if (total_volume != 0.0f) {
2655 /* multiply by 0.25 to get the correct centroid */
2656 /* no need to divide volume by 6 as the centroid is weighted by 6x the volume,
2657 * so it all cancels out. */
2658 mul_v3_fl(r_cent, 0.25f / total_volume);
2659 }
2660
2661 /* this can happen for non-manifold objects, fallback to median */
2662 if (UNLIKELY(!is_finite_v3(r_cent))) {
2663 copy_v3_v3(r_cent, init_cent);
2664 return init_cent_result;
2665 }
2666 add_v3_v3(r_cent, init_cent);
2667 return (me->totpoly != 0);
2668 }
2669
2670 /** \} */
2671
2672 /* -------------------------------------------------------------------- */
2673 /** \name Mesh Volume Calculation
2674 * \{ */
2675
mesh_calc_center_centroid_ex(const MVert * mverts,int UNUSED (mverts_num),const MLoopTri * looptri,int looptri_num,const MLoop * mloop,float r_center[3])2676 static bool mesh_calc_center_centroid_ex(const MVert *mverts,
2677 int UNUSED(mverts_num),
2678 const MLoopTri *looptri,
2679 int looptri_num,
2680 const MLoop *mloop,
2681 float r_center[3])
2682 {
2683
2684 zero_v3(r_center);
2685
2686 if (looptri_num == 0) {
2687 return false;
2688 }
2689
2690 float totweight = 0.0f;
2691 const MLoopTri *lt;
2692 int i;
2693 for (i = 0, lt = looptri; i < looptri_num; i++, lt++) {
2694 const MVert *v1 = &mverts[mloop[lt->tri[0]].v];
2695 const MVert *v2 = &mverts[mloop[lt->tri[1]].v];
2696 const MVert *v3 = &mverts[mloop[lt->tri[2]].v];
2697 float area;
2698
2699 area = area_tri_v3(v1->co, v2->co, v3->co);
2700 madd_v3_v3fl(r_center, v1->co, area);
2701 madd_v3_v3fl(r_center, v2->co, area);
2702 madd_v3_v3fl(r_center, v3->co, area);
2703 totweight += area;
2704 }
2705 if (totweight == 0.0f) {
2706 return false;
2707 }
2708
2709 mul_v3_fl(r_center, 1.0f / (3.0f * totweight));
2710
2711 return true;
2712 }
2713
2714 /**
2715 * Calculate the volume and center.
2716 *
2717 * \param r_volume: Volume (unsigned).
2718 * \param r_center: Center of mass.
2719 */
BKE_mesh_calc_volume(const MVert * mverts,const int mverts_num,const MLoopTri * looptri,const int looptri_num,const MLoop * mloop,float * r_volume,float r_center[3])2720 void BKE_mesh_calc_volume(const MVert *mverts,
2721 const int mverts_num,
2722 const MLoopTri *looptri,
2723 const int looptri_num,
2724 const MLoop *mloop,
2725 float *r_volume,
2726 float r_center[3])
2727 {
2728 const MLoopTri *lt;
2729 float center[3];
2730 float totvol;
2731 int i;
2732
2733 if (r_volume) {
2734 *r_volume = 0.0f;
2735 }
2736 if (r_center) {
2737 zero_v3(r_center);
2738 }
2739
2740 if (looptri_num == 0) {
2741 return;
2742 }
2743
2744 if (!mesh_calc_center_centroid_ex(mverts, mverts_num, looptri, looptri_num, mloop, center)) {
2745 return;
2746 }
2747
2748 totvol = 0.0f;
2749
2750 for (i = 0, lt = looptri; i < looptri_num; i++, lt++) {
2751 const MVert *v1 = &mverts[mloop[lt->tri[0]].v];
2752 const MVert *v2 = &mverts[mloop[lt->tri[1]].v];
2753 const MVert *v3 = &mverts[mloop[lt->tri[2]].v];
2754 float vol;
2755
2756 vol = volume_tetrahedron_signed_v3(center, v1->co, v2->co, v3->co);
2757 if (r_volume) {
2758 totvol += vol;
2759 }
2760 if (r_center) {
2761 /* averaging factor 1/3 is applied in the end */
2762 madd_v3_v3fl(r_center, v1->co, vol);
2763 madd_v3_v3fl(r_center, v2->co, vol);
2764 madd_v3_v3fl(r_center, v3->co, vol);
2765 }
2766 }
2767
2768 /* Note: Depending on arbitrary centroid position,
2769 * totvol can become negative even for a valid mesh.
2770 * The true value is always the positive value.
2771 */
2772 if (r_volume) {
2773 *r_volume = fabsf(totvol);
2774 }
2775 if (r_center) {
2776 /* Note: Factor 1/3 is applied once for all vertices here.
2777 * This also automatically negates the vector if totvol is negative.
2778 */
2779 if (totvol != 0.0f) {
2780 mul_v3_fl(r_center, (1.0f / 3.0f) / totvol);
2781 }
2782 }
2783 }
2784
2785 /** \} */
2786
2787 /* -------------------------------------------------------------------- */
2788 /** \name NGon Tessellation (NGon/Tessface Conversion)
2789 * \{ */
2790
2791 /**
2792 * Convert a triangle or quadrangle of loop/poly data to tessface data
2793 */
BKE_mesh_loops_to_mface_corners(CustomData * fdata,CustomData * ldata,CustomData * UNUSED (pdata),unsigned int lindex[4],int findex,const int UNUSED (polyindex),const int mf_len,const int numUV,const int numCol,const bool hasPCol,const bool hasOrigSpace,const bool hasLNor)2794 void BKE_mesh_loops_to_mface_corners(
2795 CustomData *fdata,
2796 CustomData *ldata,
2797 CustomData *UNUSED(pdata),
2798 unsigned int lindex[4],
2799 int findex,
2800 const int UNUSED(polyindex),
2801 const int mf_len, /* 3 or 4 */
2802
2803 /* cache values to avoid lookups every time */
2804 const int numUV, /* CustomData_number_of_layers(ldata, CD_MLOOPUV) */
2805 const int numCol, /* CustomData_number_of_layers(ldata, CD_MLOOPCOL) */
2806 const bool hasPCol, /* CustomData_has_layer(ldata, CD_PREVIEW_MLOOPCOL) */
2807 const bool hasOrigSpace, /* CustomData_has_layer(ldata, CD_ORIGSPACE_MLOOP) */
2808 const bool hasLNor /* CustomData_has_layer(ldata, CD_NORMAL) */
2809 )
2810 {
2811 MTFace *texface;
2812 MCol *mcol;
2813 MLoopCol *mloopcol;
2814 MLoopUV *mloopuv;
2815 int i, j;
2816
2817 for (i = 0; i < numUV; i++) {
2818 texface = CustomData_get_n(fdata, CD_MTFACE, findex, i);
2819
2820 for (j = 0; j < mf_len; j++) {
2821 mloopuv = CustomData_get_n(ldata, CD_MLOOPUV, (int)lindex[j], i);
2822 copy_v2_v2(texface->uv[j], mloopuv->uv);
2823 }
2824 }
2825
2826 for (i = 0; i < numCol; i++) {
2827 mcol = CustomData_get_n(fdata, CD_MCOL, findex, i);
2828
2829 for (j = 0; j < mf_len; j++) {
2830 mloopcol = CustomData_get_n(ldata, CD_MLOOPCOL, (int)lindex[j], i);
2831 MESH_MLOOPCOL_TO_MCOL(mloopcol, &mcol[j]);
2832 }
2833 }
2834
2835 if (hasPCol) {
2836 mcol = CustomData_get(fdata, findex, CD_PREVIEW_MCOL);
2837
2838 for (j = 0; j < mf_len; j++) {
2839 mloopcol = CustomData_get(ldata, (int)lindex[j], CD_PREVIEW_MLOOPCOL);
2840 MESH_MLOOPCOL_TO_MCOL(mloopcol, &mcol[j]);
2841 }
2842 }
2843
2844 if (hasOrigSpace) {
2845 OrigSpaceFace *of = CustomData_get(fdata, findex, CD_ORIGSPACE);
2846 OrigSpaceLoop *lof;
2847
2848 for (j = 0; j < mf_len; j++) {
2849 lof = CustomData_get(ldata, (int)lindex[j], CD_ORIGSPACE_MLOOP);
2850 copy_v2_v2(of->uv[j], lof->uv);
2851 }
2852 }
2853
2854 if (hasLNor) {
2855 short(*tlnors)[3] = CustomData_get(fdata, findex, CD_TESSLOOPNORMAL);
2856
2857 for (j = 0; j < mf_len; j++) {
2858 normal_float_to_short_v3(tlnors[j], CustomData_get(ldata, (int)lindex[j], CD_NORMAL));
2859 }
2860 }
2861 }
2862
2863 /**
2864 * Convert all CD layers from loop/poly to tessface data.
2865 *
2866 * \param loopindices: is an array of an int[4] per tessface,
2867 * mapping tessface's verts to loops indices.
2868 *
2869 * \note when mface is not NULL, mface[face_index].v4
2870 * is used to test quads, else, loopindices[face_index][3] is used.
2871 */
BKE_mesh_loops_to_tessdata(CustomData * fdata,CustomData * ldata,MFace * mface,const int * polyindices,unsigned int (* loopindices)[4],const int num_faces)2872 void BKE_mesh_loops_to_tessdata(CustomData *fdata,
2873 CustomData *ldata,
2874 MFace *mface,
2875 const int *polyindices,
2876 unsigned int (*loopindices)[4],
2877 const int num_faces)
2878 {
2879 /* Note: performances are sub-optimal when we get a NULL mface,
2880 * we could be ~25% quicker with dedicated code...
2881 * Issue is, unless having two different functions with nearly the same code,
2882 * there's not much ways to solve this. Better imho to live with it for now. :/ --mont29
2883 */
2884 const int numUV = CustomData_number_of_layers(ldata, CD_MLOOPUV);
2885 const int numCol = CustomData_number_of_layers(ldata, CD_MLOOPCOL);
2886 const bool hasPCol = CustomData_has_layer(ldata, CD_PREVIEW_MLOOPCOL);
2887 const bool hasOrigSpace = CustomData_has_layer(ldata, CD_ORIGSPACE_MLOOP);
2888 const bool hasLoopNormal = CustomData_has_layer(ldata, CD_NORMAL);
2889 const bool hasLoopTangent = CustomData_has_layer(ldata, CD_TANGENT);
2890 int findex, i, j;
2891 const int *pidx;
2892 unsigned int(*lidx)[4];
2893
2894 for (i = 0; i < numUV; i++) {
2895 MTFace *texface = CustomData_get_layer_n(fdata, CD_MTFACE, i);
2896 MLoopUV *mloopuv = CustomData_get_layer_n(ldata, CD_MLOOPUV, i);
2897
2898 for (findex = 0, pidx = polyindices, lidx = loopindices; findex < num_faces;
2899 pidx++, lidx++, findex++, texface++) {
2900 for (j = (mface ? mface[findex].v4 : (*lidx)[3]) ? 4 : 3; j--;) {
2901 copy_v2_v2(texface->uv[j], mloopuv[(*lidx)[j]].uv);
2902 }
2903 }
2904 }
2905
2906 for (i = 0; i < numCol; i++) {
2907 MCol(*mcol)[4] = CustomData_get_layer_n(fdata, CD_MCOL, i);
2908 MLoopCol *mloopcol = CustomData_get_layer_n(ldata, CD_MLOOPCOL, i);
2909
2910 for (findex = 0, lidx = loopindices; findex < num_faces; lidx++, findex++, mcol++) {
2911 for (j = (mface ? mface[findex].v4 : (*lidx)[3]) ? 4 : 3; j--;) {
2912 MESH_MLOOPCOL_TO_MCOL(&mloopcol[(*lidx)[j]], &(*mcol)[j]);
2913 }
2914 }
2915 }
2916
2917 if (hasPCol) {
2918 MCol(*mcol)[4] = CustomData_get_layer(fdata, CD_PREVIEW_MCOL);
2919 MLoopCol *mloopcol = CustomData_get_layer(ldata, CD_PREVIEW_MLOOPCOL);
2920
2921 for (findex = 0, lidx = loopindices; findex < num_faces; lidx++, findex++, mcol++) {
2922 for (j = (mface ? mface[findex].v4 : (*lidx)[3]) ? 4 : 3; j--;) {
2923 MESH_MLOOPCOL_TO_MCOL(&mloopcol[(*lidx)[j]], &(*mcol)[j]);
2924 }
2925 }
2926 }
2927
2928 if (hasOrigSpace) {
2929 OrigSpaceFace *of = CustomData_get_layer(fdata, CD_ORIGSPACE);
2930 OrigSpaceLoop *lof = CustomData_get_layer(ldata, CD_ORIGSPACE_MLOOP);
2931
2932 for (findex = 0, lidx = loopindices; findex < num_faces; lidx++, findex++, of++) {
2933 for (j = (mface ? mface[findex].v4 : (*lidx)[3]) ? 4 : 3; j--;) {
2934 copy_v2_v2(of->uv[j], lof[(*lidx)[j]].uv);
2935 }
2936 }
2937 }
2938
2939 if (hasLoopNormal) {
2940 short(*fnors)[4][3] = CustomData_get_layer(fdata, CD_TESSLOOPNORMAL);
2941 float(*lnors)[3] = CustomData_get_layer(ldata, CD_NORMAL);
2942
2943 for (findex = 0, lidx = loopindices; findex < num_faces; lidx++, findex++, fnors++) {
2944 for (j = (mface ? mface[findex].v4 : (*lidx)[3]) ? 4 : 3; j--;) {
2945 normal_float_to_short_v3((*fnors)[j], lnors[(*lidx)[j]]);
2946 }
2947 }
2948 }
2949
2950 if (hasLoopTangent) {
2951 /* need to do for all uv maps at some point */
2952 float(*ftangents)[4] = CustomData_get_layer(fdata, CD_TANGENT);
2953 float(*ltangents)[4] = CustomData_get_layer(ldata, CD_TANGENT);
2954
2955 for (findex = 0, pidx = polyindices, lidx = loopindices; findex < num_faces;
2956 pidx++, lidx++, findex++) {
2957 int nverts = (mface ? mface[findex].v4 : (*lidx)[3]) ? 4 : 3;
2958 for (j = nverts; j--;) {
2959 copy_v4_v4(ftangents[findex * 4 + j], ltangents[(*lidx)[j]]);
2960 }
2961 }
2962 }
2963 }
2964
BKE_mesh_tangent_loops_to_tessdata(CustomData * fdata,CustomData * ldata,MFace * mface,const int * polyindices,unsigned int (* loopindices)[4],const int num_faces,const char * layer_name)2965 void BKE_mesh_tangent_loops_to_tessdata(CustomData *fdata,
2966 CustomData *ldata,
2967 MFace *mface,
2968 const int *polyindices,
2969 unsigned int (*loopindices)[4],
2970 const int num_faces,
2971 const char *layer_name)
2972 {
2973 /* Note: performances are sub-optimal when we get a NULL mface,
2974 * we could be ~25% quicker with dedicated code...
2975 * Issue is, unless having two different functions with nearly the same code,
2976 * there's not much ways to solve this. Better imho to live with it for now. :/ --mont29
2977 */
2978
2979 float(*ftangents)[4] = NULL;
2980 float(*ltangents)[4] = NULL;
2981
2982 int findex, j;
2983 const int *pidx;
2984 unsigned int(*lidx)[4];
2985
2986 if (layer_name) {
2987 ltangents = CustomData_get_layer_named(ldata, CD_TANGENT, layer_name);
2988 }
2989 else {
2990 ltangents = CustomData_get_layer(ldata, CD_TANGENT);
2991 }
2992
2993 if (ltangents) {
2994 /* need to do for all uv maps at some point */
2995 if (layer_name) {
2996 ftangents = CustomData_get_layer_named(fdata, CD_TANGENT, layer_name);
2997 }
2998 else {
2999 ftangents = CustomData_get_layer(fdata, CD_TANGENT);
3000 }
3001 if (ftangents) {
3002 for (findex = 0, pidx = polyindices, lidx = loopindices; findex < num_faces;
3003 pidx++, lidx++, findex++) {
3004 int nverts = (mface ? mface[findex].v4 : (*lidx)[3]) ? 4 : 3;
3005 for (j = nverts; j--;) {
3006 copy_v4_v4(ftangents[findex * 4 + j], ltangents[(*lidx)[j]]);
3007 }
3008 }
3009 }
3010 }
3011 }
3012
3013 /**
3014 * Recreate tessellation.
3015 *
3016 * \param do_face_nor_copy: Controls whether the normals from the poly
3017 * are copied to the tessellated faces.
3018 *
3019 * \return number of tessellation faces.
3020 */
BKE_mesh_tessface_calc_ex(CustomData * fdata,CustomData * ldata,CustomData * pdata,MVert * mvert,int totface,int totloop,int totpoly,const bool do_face_nor_copy)3021 int BKE_mesh_tessface_calc_ex(CustomData *fdata,
3022 CustomData *ldata,
3023 CustomData *pdata,
3024 MVert *mvert,
3025 int totface,
3026 int totloop,
3027 int totpoly,
3028 const bool do_face_nor_copy)
3029 {
3030 /* use this to avoid locking pthread for _every_ polygon
3031 * and calling the fill function */
3032
3033 #define USE_TESSFACE_SPEEDUP
3034 #define USE_TESSFACE_QUADS /* NEEDS FURTHER TESTING */
3035
3036 /* We abuse MFace->edcode to tag quad faces. See below for details. */
3037 #define TESSFACE_IS_QUAD 1
3038
3039 const int looptri_num = poly_to_tri_count(totpoly, totloop);
3040
3041 MPoly *mp, *mpoly;
3042 MLoop *ml, *mloop;
3043 MFace *mface, *mf;
3044 MemArena *arena = NULL;
3045 int *mface_to_poly_map;
3046 unsigned int(*lindices)[4];
3047 int poly_index, mface_index;
3048 unsigned int j;
3049
3050 mpoly = CustomData_get_layer(pdata, CD_MPOLY);
3051 mloop = CustomData_get_layer(ldata, CD_MLOOP);
3052
3053 /* allocate the length of totfaces, avoid many small reallocs,
3054 * if all faces are tri's it will be correct, quads == 2x allocs */
3055 /* take care. we are _not_ calloc'ing so be sure to initialize each field */
3056 mface_to_poly_map = MEM_malloc_arrayN((size_t)looptri_num, sizeof(*mface_to_poly_map), __func__);
3057 mface = MEM_malloc_arrayN((size_t)looptri_num, sizeof(*mface), __func__);
3058 lindices = MEM_malloc_arrayN((size_t)looptri_num, sizeof(*lindices), __func__);
3059
3060 mface_index = 0;
3061 mp = mpoly;
3062 for (poly_index = 0; poly_index < totpoly; poly_index++, mp++) {
3063 const unsigned int mp_loopstart = (unsigned int)mp->loopstart;
3064 const unsigned int mp_totloop = (unsigned int)mp->totloop;
3065 unsigned int l1, l2, l3, l4;
3066 unsigned int *lidx;
3067 if (mp_totloop < 3) {
3068 /* do nothing */
3069 }
3070
3071 #ifdef USE_TESSFACE_SPEEDUP
3072
3073 # define ML_TO_MF(i1, i2, i3) \
3074 mface_to_poly_map[mface_index] = poly_index; \
3075 mf = &mface[mface_index]; \
3076 lidx = lindices[mface_index]; \
3077 /* set loop indices, transformed to vert indices later */ \
3078 l1 = mp_loopstart + i1; \
3079 l2 = mp_loopstart + i2; \
3080 l3 = mp_loopstart + i3; \
3081 mf->v1 = mloop[l1].v; \
3082 mf->v2 = mloop[l2].v; \
3083 mf->v3 = mloop[l3].v; \
3084 mf->v4 = 0; \
3085 lidx[0] = l1; \
3086 lidx[1] = l2; \
3087 lidx[2] = l3; \
3088 lidx[3] = 0; \
3089 mf->mat_nr = mp->mat_nr; \
3090 mf->flag = mp->flag; \
3091 mf->edcode = 0; \
3092 (void)0
3093
3094 /* ALMOST IDENTICAL TO DEFINE ABOVE (see EXCEPTION) */
3095 # define ML_TO_MF_QUAD() \
3096 mface_to_poly_map[mface_index] = poly_index; \
3097 mf = &mface[mface_index]; \
3098 lidx = lindices[mface_index]; \
3099 /* set loop indices, transformed to vert indices later */ \
3100 l1 = mp_loopstart + 0; /* EXCEPTION */ \
3101 l2 = mp_loopstart + 1; /* EXCEPTION */ \
3102 l3 = mp_loopstart + 2; /* EXCEPTION */ \
3103 l4 = mp_loopstart + 3; /* EXCEPTION */ \
3104 mf->v1 = mloop[l1].v; \
3105 mf->v2 = mloop[l2].v; \
3106 mf->v3 = mloop[l3].v; \
3107 mf->v4 = mloop[l4].v; \
3108 lidx[0] = l1; \
3109 lidx[1] = l2; \
3110 lidx[2] = l3; \
3111 lidx[3] = l4; \
3112 mf->mat_nr = mp->mat_nr; \
3113 mf->flag = mp->flag; \
3114 mf->edcode = TESSFACE_IS_QUAD; \
3115 (void)0
3116
3117 else if (mp_totloop == 3) {
3118 ML_TO_MF(0, 1, 2);
3119 mface_index++;
3120 }
3121 else if (mp_totloop == 4) {
3122 # ifdef USE_TESSFACE_QUADS
3123 ML_TO_MF_QUAD();
3124 mface_index++;
3125 # else
3126 ML_TO_MF(0, 1, 2);
3127 mface_index++;
3128 ML_TO_MF(0, 2, 3);
3129 mface_index++;
3130 # endif
3131 }
3132 #endif /* USE_TESSFACE_SPEEDUP */
3133 else {
3134 const float *co_curr, *co_prev;
3135
3136 float normal[3];
3137
3138 float axis_mat[3][3];
3139 float(*projverts)[2];
3140 unsigned int(*tris)[3];
3141
3142 const unsigned int totfilltri = mp_totloop - 2;
3143
3144 if (UNLIKELY(arena == NULL)) {
3145 arena = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, __func__);
3146 }
3147
3148 tris = BLI_memarena_alloc(arena, sizeof(*tris) * (size_t)totfilltri);
3149 projverts = BLI_memarena_alloc(arena, sizeof(*projverts) * (size_t)mp_totloop);
3150
3151 zero_v3(normal);
3152
3153 /* calc normal, flipped: to get a positive 2d cross product */
3154 ml = mloop + mp_loopstart;
3155 co_prev = mvert[ml[mp_totloop - 1].v].co;
3156 for (j = 0; j < mp_totloop; j++, ml++) {
3157 co_curr = mvert[ml->v].co;
3158 add_newell_cross_v3_v3v3(normal, co_prev, co_curr);
3159 co_prev = co_curr;
3160 }
3161 if (UNLIKELY(normalize_v3(normal) == 0.0f)) {
3162 normal[2] = 1.0f;
3163 }
3164
3165 /* project verts to 2d */
3166 axis_dominant_v3_to_m3_negate(axis_mat, normal);
3167
3168 ml = mloop + mp_loopstart;
3169 for (j = 0; j < mp_totloop; j++, ml++) {
3170 mul_v2_m3v3(projverts[j], axis_mat, mvert[ml->v].co);
3171 }
3172
3173 BLI_polyfill_calc_arena(projverts, mp_totloop, 1, tris, arena);
3174
3175 /* apply fill */
3176 for (j = 0; j < totfilltri; j++) {
3177 unsigned int *tri = tris[j];
3178 lidx = lindices[mface_index];
3179
3180 mface_to_poly_map[mface_index] = poly_index;
3181 mf = &mface[mface_index];
3182
3183 /* set loop indices, transformed to vert indices later */
3184 l1 = mp_loopstart + tri[0];
3185 l2 = mp_loopstart + tri[1];
3186 l3 = mp_loopstart + tri[2];
3187
3188 mf->v1 = mloop[l1].v;
3189 mf->v2 = mloop[l2].v;
3190 mf->v3 = mloop[l3].v;
3191 mf->v4 = 0;
3192
3193 lidx[0] = l1;
3194 lidx[1] = l2;
3195 lidx[2] = l3;
3196 lidx[3] = 0;
3197
3198 mf->mat_nr = mp->mat_nr;
3199 mf->flag = mp->flag;
3200 mf->edcode = 0;
3201
3202 mface_index++;
3203 }
3204
3205 BLI_memarena_clear(arena);
3206 }
3207 }
3208
3209 if (arena) {
3210 BLI_memarena_free(arena);
3211 arena = NULL;
3212 }
3213
3214 CustomData_free(fdata, totface);
3215 totface = mface_index;
3216
3217 BLI_assert(totface <= looptri_num);
3218
3219 /* not essential but without this we store over-alloc'd memory in the CustomData layers */
3220 if (LIKELY(looptri_num != totface)) {
3221 mface = MEM_reallocN(mface, sizeof(*mface) * (size_t)totface);
3222 mface_to_poly_map = MEM_reallocN(mface_to_poly_map,
3223 sizeof(*mface_to_poly_map) * (size_t)totface);
3224 }
3225
3226 CustomData_add_layer(fdata, CD_MFACE, CD_ASSIGN, mface, totface);
3227
3228 /* CD_ORIGINDEX will contain an array of indices from tessfaces to the polygons
3229 * they are directly tessellated from */
3230 CustomData_add_layer(fdata, CD_ORIGINDEX, CD_ASSIGN, mface_to_poly_map, totface);
3231 CustomData_from_bmeshpoly(fdata, ldata, totface);
3232
3233 if (do_face_nor_copy) {
3234 /* If polys have a normals layer, copying that to faces can help
3235 * avoid the need to recalculate normals later */
3236 if (CustomData_has_layer(pdata, CD_NORMAL)) {
3237 float(*pnors)[3] = CustomData_get_layer(pdata, CD_NORMAL);
3238 float(*fnors)[3] = CustomData_add_layer(fdata, CD_NORMAL, CD_CALLOC, NULL, totface);
3239 for (mface_index = 0; mface_index < totface; mface_index++) {
3240 copy_v3_v3(fnors[mface_index], pnors[mface_to_poly_map[mface_index]]);
3241 }
3242 }
3243 }
3244
3245 /* NOTE: quad detection issue - fourth vertidx vs fourth loopidx:
3246 * Polygons take care of their loops ordering, hence not of their vertices ordering.
3247 * Currently, our tfaces' fourth vertex index might be 0 even for a quad. However,
3248 * we know our fourth loop index is never 0 for quads (because they are sorted for polygons,
3249 * and our quads are still mere copies of their polygons).
3250 * So we pass NULL as MFace pointer, and BKE_mesh_loops_to_tessdata
3251 * will use the fourth loop index as quad test.
3252 * ...
3253 */
3254 BKE_mesh_loops_to_tessdata(fdata, ldata, NULL, mface_to_poly_map, lindices, totface);
3255
3256 /* NOTE: quad detection issue - fourth vertidx vs fourth loopidx:
3257 * ...However, most TFace code uses 'MFace->v4 == 0' test to check whether it is a tri or quad.
3258 * test_index_face() will check this and rotate the tessellated face if needed.
3259 */
3260 #ifdef USE_TESSFACE_QUADS
3261 mf = mface;
3262 for (mface_index = 0; mface_index < totface; mface_index++, mf++) {
3263 if (mf->edcode == TESSFACE_IS_QUAD) {
3264 test_index_face(mf, fdata, mface_index, 4);
3265 mf->edcode = 0;
3266 }
3267 }
3268 #endif
3269
3270 MEM_freeN(lindices);
3271
3272 return totface;
3273
3274 #undef USE_TESSFACE_SPEEDUP
3275 #undef USE_TESSFACE_QUADS
3276
3277 #undef ML_TO_MF
3278 #undef ML_TO_MF_QUAD
3279 }
3280
3281 /**
3282 * Calculate tessellation into #MLoopTri which exist only for this purpose.
3283 */
BKE_mesh_recalc_looptri(const MLoop * mloop,const MPoly * mpoly,const MVert * mvert,int totloop,int totpoly,MLoopTri * mlooptri)3284 void BKE_mesh_recalc_looptri(const MLoop *mloop,
3285 const MPoly *mpoly,
3286 const MVert *mvert,
3287 int totloop,
3288 int totpoly,
3289 MLoopTri *mlooptri)
3290 {
3291 /* use this to avoid locking pthread for _every_ polygon
3292 * and calling the fill function */
3293
3294 #define USE_TESSFACE_SPEEDUP
3295
3296 const MPoly *mp;
3297 const MLoop *ml;
3298 MLoopTri *mlt;
3299 MemArena *arena = NULL;
3300 int poly_index, mlooptri_index;
3301 unsigned int j;
3302
3303 mlooptri_index = 0;
3304 mp = mpoly;
3305 for (poly_index = 0; poly_index < totpoly; poly_index++, mp++) {
3306 const unsigned int mp_loopstart = (unsigned int)mp->loopstart;
3307 const unsigned int mp_totloop = (unsigned int)mp->totloop;
3308 unsigned int l1, l2, l3;
3309 if (mp_totloop < 3) {
3310 /* do nothing */
3311 }
3312
3313 #ifdef USE_TESSFACE_SPEEDUP
3314
3315 # define ML_TO_MLT(i1, i2, i3) \
3316 { \
3317 mlt = &mlooptri[mlooptri_index]; \
3318 l1 = mp_loopstart + i1; \
3319 l2 = mp_loopstart + i2; \
3320 l3 = mp_loopstart + i3; \
3321 ARRAY_SET_ITEMS(mlt->tri, l1, l2, l3); \
3322 mlt->poly = (unsigned int)poly_index; \
3323 } \
3324 ((void)0)
3325
3326 else if (mp_totloop == 3) {
3327 ML_TO_MLT(0, 1, 2);
3328 mlooptri_index++;
3329 }
3330 else if (mp_totloop == 4) {
3331 ML_TO_MLT(0, 1, 2);
3332 MLoopTri *mlt_a = mlt;
3333 mlooptri_index++;
3334 ML_TO_MLT(0, 2, 3);
3335 MLoopTri *mlt_b = mlt;
3336 mlooptri_index++;
3337
3338 if (UNLIKELY(is_quad_flip_v3_first_third_fast(mvert[mloop[mlt_a->tri[0]].v].co,
3339 mvert[mloop[mlt_a->tri[1]].v].co,
3340 mvert[mloop[mlt_a->tri[2]].v].co,
3341 mvert[mloop[mlt_b->tri[2]].v].co))) {
3342 /* flip out of degenerate 0-2 state. */
3343 mlt_a->tri[2] = mlt_b->tri[2];
3344 mlt_b->tri[0] = mlt_a->tri[1];
3345 }
3346 }
3347 #endif /* USE_TESSFACE_SPEEDUP */
3348 else {
3349 const float *co_curr, *co_prev;
3350
3351 float normal[3];
3352
3353 float axis_mat[3][3];
3354 float(*projverts)[2];
3355 unsigned int(*tris)[3];
3356
3357 const unsigned int totfilltri = mp_totloop - 2;
3358
3359 if (UNLIKELY(arena == NULL)) {
3360 arena = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, __func__);
3361 }
3362
3363 tris = BLI_memarena_alloc(arena, sizeof(*tris) * (size_t)totfilltri);
3364 projverts = BLI_memarena_alloc(arena, sizeof(*projverts) * (size_t)mp_totloop);
3365
3366 zero_v3(normal);
3367
3368 /* calc normal, flipped: to get a positive 2d cross product */
3369 ml = mloop + mp_loopstart;
3370 co_prev = mvert[ml[mp_totloop - 1].v].co;
3371 for (j = 0; j < mp_totloop; j++, ml++) {
3372 co_curr = mvert[ml->v].co;
3373 add_newell_cross_v3_v3v3(normal, co_prev, co_curr);
3374 co_prev = co_curr;
3375 }
3376 if (UNLIKELY(normalize_v3(normal) == 0.0f)) {
3377 normal[2] = 1.0f;
3378 }
3379
3380 /* project verts to 2d */
3381 axis_dominant_v3_to_m3_negate(axis_mat, normal);
3382
3383 ml = mloop + mp_loopstart;
3384 for (j = 0; j < mp_totloop; j++, ml++) {
3385 mul_v2_m3v3(projverts[j], axis_mat, mvert[ml->v].co);
3386 }
3387
3388 BLI_polyfill_calc_arena(projverts, mp_totloop, 1, tris, arena);
3389
3390 /* apply fill */
3391 for (j = 0; j < totfilltri; j++) {
3392 unsigned int *tri = tris[j];
3393
3394 mlt = &mlooptri[mlooptri_index];
3395
3396 /* set loop indices, transformed to vert indices later */
3397 l1 = mp_loopstart + tri[0];
3398 l2 = mp_loopstart + tri[1];
3399 l3 = mp_loopstart + tri[2];
3400
3401 ARRAY_SET_ITEMS(mlt->tri, l1, l2, l3);
3402 mlt->poly = (unsigned int)poly_index;
3403
3404 mlooptri_index++;
3405 }
3406
3407 BLI_memarena_clear(arena);
3408 }
3409 }
3410
3411 if (arena) {
3412 BLI_memarena_free(arena);
3413 arena = NULL;
3414 }
3415
3416 BLI_assert(mlooptri_index == poly_to_tri_count(totpoly, totloop));
3417 UNUSED_VARS_NDEBUG(totloop);
3418
3419 #undef USE_TESSFACE_SPEEDUP
3420 #undef ML_TO_MLT
3421 }
3422
bm_corners_to_loops_ex(ID * id,CustomData * fdata,CustomData * ldata,MFace * mface,int totloop,int findex,int loopstart,int numTex,int numCol)3423 static void bm_corners_to_loops_ex(ID *id,
3424 CustomData *fdata,
3425 CustomData *ldata,
3426 MFace *mface,
3427 int totloop,
3428 int findex,
3429 int loopstart,
3430 int numTex,
3431 int numCol)
3432 {
3433 MFace *mf = mface + findex;
3434
3435 for (int i = 0; i < numTex; i++) {
3436 MTFace *texface = CustomData_get_n(fdata, CD_MTFACE, findex, i);
3437
3438 MLoopUV *mloopuv = CustomData_get_n(ldata, CD_MLOOPUV, loopstart, i);
3439 copy_v2_v2(mloopuv->uv, texface->uv[0]);
3440 mloopuv++;
3441 copy_v2_v2(mloopuv->uv, texface->uv[1]);
3442 mloopuv++;
3443 copy_v2_v2(mloopuv->uv, texface->uv[2]);
3444 mloopuv++;
3445
3446 if (mf->v4) {
3447 copy_v2_v2(mloopuv->uv, texface->uv[3]);
3448 mloopuv++;
3449 }
3450 }
3451
3452 for (int i = 0; i < numCol; i++) {
3453 MLoopCol *mloopcol = CustomData_get_n(ldata, CD_MLOOPCOL, loopstart, i);
3454 MCol *mcol = CustomData_get_n(fdata, CD_MCOL, findex, i);
3455
3456 MESH_MLOOPCOL_FROM_MCOL(mloopcol, &mcol[0]);
3457 mloopcol++;
3458 MESH_MLOOPCOL_FROM_MCOL(mloopcol, &mcol[1]);
3459 mloopcol++;
3460 MESH_MLOOPCOL_FROM_MCOL(mloopcol, &mcol[2]);
3461 mloopcol++;
3462 if (mf->v4) {
3463 MESH_MLOOPCOL_FROM_MCOL(mloopcol, &mcol[3]);
3464 mloopcol++;
3465 }
3466 }
3467
3468 if (CustomData_has_layer(fdata, CD_TESSLOOPNORMAL)) {
3469 float(*lnors)[3] = CustomData_get(ldata, loopstart, CD_NORMAL);
3470 short(*tlnors)[3] = CustomData_get(fdata, findex, CD_TESSLOOPNORMAL);
3471 const int max = mf->v4 ? 4 : 3;
3472
3473 for (int i = 0; i < max; i++, lnors++, tlnors++) {
3474 normal_short_to_float_v3(*lnors, *tlnors);
3475 }
3476 }
3477
3478 if (CustomData_has_layer(fdata, CD_MDISPS)) {
3479 MDisps *ld = CustomData_get(ldata, loopstart, CD_MDISPS);
3480 MDisps *fd = CustomData_get(fdata, findex, CD_MDISPS);
3481 float(*disps)[3] = fd->disps;
3482 int tot = mf->v4 ? 4 : 3;
3483 int corners;
3484
3485 if (CustomData_external_test(fdata, CD_MDISPS)) {
3486 if (id && fdata->external) {
3487 CustomData_external_add(ldata, id, CD_MDISPS, totloop, fdata->external->filename);
3488 }
3489 }
3490
3491 corners = multires_mdisp_corners(fd);
3492
3493 if (corners == 0) {
3494 /* Empty MDisp layers appear in at least one of the sintel.blend files.
3495 * Not sure why this happens, but it seems fine to just ignore them here.
3496 * If (corners == 0) for a non-empty layer though, something went wrong. */
3497 BLI_assert(fd->totdisp == 0);
3498 }
3499 else {
3500 const int side = (int)sqrtf((float)(fd->totdisp / corners));
3501 const int side_sq = side * side;
3502
3503 for (int i = 0; i < tot; i++, disps += side_sq, ld++) {
3504 ld->totdisp = side_sq;
3505 ld->level = (int)(logf((float)side - 1.0f) / (float)M_LN2) + 1;
3506
3507 if (ld->disps) {
3508 MEM_freeN(ld->disps);
3509 }
3510
3511 ld->disps = MEM_malloc_arrayN((size_t)side_sq, sizeof(float[3]), "converted loop mdisps");
3512 if (fd->disps) {
3513 memcpy(ld->disps, disps, (size_t)side_sq * sizeof(float[3]));
3514 }
3515 else {
3516 memset(ld->disps, 0, (size_t)side_sq * sizeof(float[3]));
3517 }
3518 }
3519 }
3520 }
3521 }
3522
BKE_mesh_convert_mfaces_to_mpolys(Mesh * mesh)3523 void BKE_mesh_convert_mfaces_to_mpolys(Mesh *mesh)
3524 {
3525 BKE_mesh_convert_mfaces_to_mpolys_ex(&mesh->id,
3526 &mesh->fdata,
3527 &mesh->ldata,
3528 &mesh->pdata,
3529 mesh->totedge,
3530 mesh->totface,
3531 mesh->totloop,
3532 mesh->totpoly,
3533 mesh->medge,
3534 mesh->mface,
3535 &mesh->totloop,
3536 &mesh->totpoly,
3537 &mesh->mloop,
3538 &mesh->mpoly);
3539
3540 BKE_mesh_update_customdata_pointers(mesh, true);
3541 }
3542
3543 /**
3544 * The same as #BKE_mesh_convert_mfaces_to_mpolys
3545 * but oriented to be used in #do_versions from readfile.c
3546 * the difference is how active/render/clone/stencil indices are handled here
3547 *
3548 * normally thay're being set from pdata which totally makes sense for meshes which are already
3549 * converted to bmesh structures, but when loading older files indices shall be updated in other
3550 * way around, so newly added pdata and ldata would have this indices set based on fdata layer
3551 *
3552 * this is normally only needed when reading older files,
3553 * in all other cases #BKE_mesh_convert_mfaces_to_mpolys shall be always used
3554 */
BKE_mesh_do_versions_convert_mfaces_to_mpolys(Mesh * mesh)3555 void BKE_mesh_do_versions_convert_mfaces_to_mpolys(Mesh *mesh)
3556 {
3557 BKE_mesh_convert_mfaces_to_mpolys_ex(&mesh->id,
3558 &mesh->fdata,
3559 &mesh->ldata,
3560 &mesh->pdata,
3561 mesh->totedge,
3562 mesh->totface,
3563 mesh->totloop,
3564 mesh->totpoly,
3565 mesh->medge,
3566 mesh->mface,
3567 &mesh->totloop,
3568 &mesh->totpoly,
3569 &mesh->mloop,
3570 &mesh->mpoly);
3571
3572 CustomData_bmesh_do_versions_update_active_layers(&mesh->fdata, &mesh->ldata);
3573
3574 BKE_mesh_update_customdata_pointers(mesh, true);
3575 }
3576
BKE_mesh_convert_mfaces_to_mpolys_ex(ID * id,CustomData * fdata,CustomData * ldata,CustomData * pdata,int totedge_i,int totface_i,int totloop_i,int totpoly_i,MEdge * medge,MFace * mface,int * r_totloop,int * r_totpoly,MLoop ** r_mloop,MPoly ** r_mpoly)3577 void BKE_mesh_convert_mfaces_to_mpolys_ex(ID *id,
3578 CustomData *fdata,
3579 CustomData *ldata,
3580 CustomData *pdata,
3581 int totedge_i,
3582 int totface_i,
3583 int totloop_i,
3584 int totpoly_i,
3585 MEdge *medge,
3586 MFace *mface,
3587 int *r_totloop,
3588 int *r_totpoly,
3589 MLoop **r_mloop,
3590 MPoly **r_mpoly)
3591 {
3592 MFace *mf;
3593 MLoop *ml, *mloop;
3594 MPoly *mp, *mpoly;
3595 MEdge *me;
3596 EdgeHash *eh;
3597 int numTex, numCol;
3598 int i, j, totloop, totpoly, *polyindex;
3599
3600 /* old flag, clear to allow for reuse */
3601 #define ME_FGON (1 << 3)
3602
3603 /* just in case some of these layers are filled in (can happen with python created meshes) */
3604 CustomData_free(ldata, totloop_i);
3605 CustomData_free(pdata, totpoly_i);
3606
3607 totpoly = totface_i;
3608 mpoly = MEM_calloc_arrayN((size_t)totpoly, sizeof(MPoly), "mpoly converted");
3609 CustomData_add_layer(pdata, CD_MPOLY, CD_ASSIGN, mpoly, totpoly);
3610
3611 numTex = CustomData_number_of_layers(fdata, CD_MTFACE);
3612 numCol = CustomData_number_of_layers(fdata, CD_MCOL);
3613
3614 totloop = 0;
3615 mf = mface;
3616 for (i = 0; i < totface_i; i++, mf++) {
3617 totloop += mf->v4 ? 4 : 3;
3618 }
3619
3620 mloop = MEM_calloc_arrayN((size_t)totloop, sizeof(MLoop), "mloop converted");
3621
3622 CustomData_add_layer(ldata, CD_MLOOP, CD_ASSIGN, mloop, totloop);
3623
3624 CustomData_to_bmeshpoly(fdata, ldata, totloop);
3625
3626 if (id) {
3627 /* ensure external data is transferred */
3628 /* TODO(sergey): Use multiresModifier_ensure_external_read(). */
3629 CustomData_external_read(fdata, id, CD_MASK_MDISPS, totface_i);
3630 }
3631
3632 eh = BLI_edgehash_new_ex(__func__, (unsigned int)totedge_i);
3633
3634 /* build edge hash */
3635 me = medge;
3636 for (i = 0; i < totedge_i; i++, me++) {
3637 BLI_edgehash_insert(eh, me->v1, me->v2, POINTER_FROM_UINT(i));
3638
3639 /* unrelated but avoid having the FGON flag enabled,
3640 * so we can reuse it later for something else */
3641 me->flag &= ~ME_FGON;
3642 }
3643
3644 polyindex = CustomData_get_layer(fdata, CD_ORIGINDEX);
3645
3646 j = 0; /* current loop index */
3647 ml = mloop;
3648 mf = mface;
3649 mp = mpoly;
3650 for (i = 0; i < totface_i; i++, mf++, mp++) {
3651 mp->loopstart = j;
3652
3653 mp->totloop = mf->v4 ? 4 : 3;
3654
3655 mp->mat_nr = mf->mat_nr;
3656 mp->flag = mf->flag;
3657
3658 #define ML(v1, v2) \
3659 { \
3660 ml->v = mf->v1; \
3661 ml->e = POINTER_AS_UINT(BLI_edgehash_lookup(eh, mf->v1, mf->v2)); \
3662 ml++; \
3663 j++; \
3664 } \
3665 (void)0
3666
3667 ML(v1, v2);
3668 ML(v2, v3);
3669 if (mf->v4) {
3670 ML(v3, v4);
3671 ML(v4, v1);
3672 }
3673 else {
3674 ML(v3, v1);
3675 }
3676
3677 #undef ML
3678
3679 bm_corners_to_loops_ex(id, fdata, ldata, mface, totloop, i, mp->loopstart, numTex, numCol);
3680
3681 if (polyindex) {
3682 *polyindex = i;
3683 polyindex++;
3684 }
3685 }
3686
3687 /* note, we don't convert NGons at all, these are not even real ngons,
3688 * they have their own UV's, colors etc - its more an editing feature. */
3689
3690 BLI_edgehash_free(eh, NULL);
3691
3692 *r_totpoly = totpoly;
3693 *r_totloop = totloop;
3694 *r_mpoly = mpoly;
3695 *r_mloop = mloop;
3696
3697 #undef ME_FGON
3698 }
3699 /** \} */
3700
3701 /**
3702 * Flip a single MLoop's #MDisps structure,
3703 * low level function to be called from face-flipping code which re-arranged the mdisps themselves.
3704 */
BKE_mesh_mdisp_flip(MDisps * md,const bool use_loop_mdisp_flip)3705 void BKE_mesh_mdisp_flip(MDisps *md, const bool use_loop_mdisp_flip)
3706 {
3707 if (UNLIKELY(!md->totdisp || !md->disps)) {
3708 return;
3709 }
3710
3711 const int sides = (int)sqrt(md->totdisp);
3712 float(*co)[3] = md->disps;
3713
3714 for (int x = 0; x < sides; x++) {
3715 float *co_a, *co_b;
3716
3717 for (int y = 0; y < x; y++) {
3718 co_a = co[y * sides + x];
3719 co_b = co[x * sides + y];
3720
3721 swap_v3_v3(co_a, co_b);
3722 SWAP(float, co_a[0], co_a[1]);
3723 SWAP(float, co_b[0], co_b[1]);
3724
3725 if (use_loop_mdisp_flip) {
3726 co_a[2] *= -1.0f;
3727 co_b[2] *= -1.0f;
3728 }
3729 }
3730
3731 co_a = co[x * sides + x];
3732
3733 SWAP(float, co_a[0], co_a[1]);
3734
3735 if (use_loop_mdisp_flip) {
3736 co_a[2] *= -1.0f;
3737 }
3738 }
3739 }
3740
3741 /**
3742 * Flip (invert winding of) the given \a mpoly, i.e. reverse order of its loops
3743 * (keeping the same vertex as 'start point').
3744 *
3745 * \param mpoly: the polygon to flip.
3746 * \param mloop: the full loops array.
3747 * \param ldata: the loops custom data.
3748 */
BKE_mesh_polygon_flip_ex(MPoly * mpoly,MLoop * mloop,CustomData * ldata,float (* lnors)[3],MDisps * mdisp,const bool use_loop_mdisp_flip)3749 void BKE_mesh_polygon_flip_ex(MPoly *mpoly,
3750 MLoop *mloop,
3751 CustomData *ldata,
3752 float (*lnors)[3],
3753 MDisps *mdisp,
3754 const bool use_loop_mdisp_flip)
3755 {
3756 int loopstart = mpoly->loopstart;
3757 int loopend = loopstart + mpoly->totloop - 1;
3758 const bool loops_in_ldata = (CustomData_get_layer(ldata, CD_MLOOP) == mloop);
3759
3760 if (mdisp) {
3761 for (int i = loopstart; i <= loopend; i++) {
3762 BKE_mesh_mdisp_flip(&mdisp[i], use_loop_mdisp_flip);
3763 }
3764 }
3765
3766 /* Note that we keep same start vertex for flipped face. */
3767
3768 /* We also have to update loops edge
3769 * (they will get their original 'other edge', that is,
3770 * the original edge of their original previous loop)... */
3771 unsigned int prev_edge_index = mloop[loopstart].e;
3772 mloop[loopstart].e = mloop[loopend].e;
3773
3774 for (loopstart++; loopend > loopstart; loopstart++, loopend--) {
3775 mloop[loopend].e = mloop[loopend - 1].e;
3776 SWAP(unsigned int, mloop[loopstart].e, prev_edge_index);
3777
3778 if (!loops_in_ldata) {
3779 SWAP(MLoop, mloop[loopstart], mloop[loopend]);
3780 }
3781 if (lnors) {
3782 swap_v3_v3(lnors[loopstart], lnors[loopend]);
3783 }
3784 CustomData_swap(ldata, loopstart, loopend);
3785 }
3786 /* Even if we did not swap the other 'pivot' loop, we need to set its swapped edge. */
3787 if (loopstart == loopend) {
3788 mloop[loopstart].e = prev_edge_index;
3789 }
3790 }
3791
BKE_mesh_polygon_flip(MPoly * mpoly,MLoop * mloop,CustomData * ldata)3792 void BKE_mesh_polygon_flip(MPoly *mpoly, MLoop *mloop, CustomData *ldata)
3793 {
3794 MDisps *mdisp = CustomData_get_layer(ldata, CD_MDISPS);
3795 BKE_mesh_polygon_flip_ex(mpoly, mloop, ldata, NULL, mdisp, true);
3796 }
3797
3798 /**
3799 * Flip (invert winding of) all polygons (used to inverse their normals).
3800 *
3801 * \note Invalidates tessellation, caller must handle that.
3802 */
BKE_mesh_polygons_flip(MPoly * mpoly,MLoop * mloop,CustomData * ldata,int totpoly)3803 void BKE_mesh_polygons_flip(MPoly *mpoly, MLoop *mloop, CustomData *ldata, int totpoly)
3804 {
3805 MDisps *mdisp = CustomData_get_layer(ldata, CD_MDISPS);
3806 MPoly *mp;
3807 int i;
3808
3809 for (mp = mpoly, i = 0; i < totpoly; mp++, i++) {
3810 BKE_mesh_polygon_flip_ex(mp, mloop, ldata, NULL, mdisp, true);
3811 }
3812 }
3813
3814 /* -------------------------------------------------------------------- */
3815 /** \name Mesh Flag Flushing
3816 * \{ */
3817
3818 /* update the hide flag for edges and faces from the corresponding
3819 * flag in verts */
BKE_mesh_flush_hidden_from_verts_ex(const MVert * mvert,const MLoop * mloop,MEdge * medge,const int totedge,MPoly * mpoly,const int totpoly)3820 void BKE_mesh_flush_hidden_from_verts_ex(const MVert *mvert,
3821 const MLoop *mloop,
3822 MEdge *medge,
3823 const int totedge,
3824 MPoly *mpoly,
3825 const int totpoly)
3826 {
3827 int i, j;
3828
3829 for (i = 0; i < totedge; i++) {
3830 MEdge *e = &medge[i];
3831 if (mvert[e->v1].flag & ME_HIDE || mvert[e->v2].flag & ME_HIDE) {
3832 e->flag |= ME_HIDE;
3833 }
3834 else {
3835 e->flag &= ~ME_HIDE;
3836 }
3837 }
3838 for (i = 0; i < totpoly; i++) {
3839 MPoly *p = &mpoly[i];
3840 p->flag &= (char)~ME_HIDE;
3841 for (j = 0; j < p->totloop; j++) {
3842 if (mvert[mloop[p->loopstart + j].v].flag & ME_HIDE) {
3843 p->flag |= ME_HIDE;
3844 }
3845 }
3846 }
3847 }
BKE_mesh_flush_hidden_from_verts(Mesh * me)3848 void BKE_mesh_flush_hidden_from_verts(Mesh *me)
3849 {
3850 BKE_mesh_flush_hidden_from_verts_ex(
3851 me->mvert, me->mloop, me->medge, me->totedge, me->mpoly, me->totpoly);
3852 }
3853
BKE_mesh_flush_hidden_from_polys_ex(MVert * mvert,const MLoop * mloop,MEdge * medge,const int UNUSED (totedge),const MPoly * mpoly,const int totpoly)3854 void BKE_mesh_flush_hidden_from_polys_ex(MVert *mvert,
3855 const MLoop *mloop,
3856 MEdge *medge,
3857 const int UNUSED(totedge),
3858 const MPoly *mpoly,
3859 const int totpoly)
3860 {
3861 int i = totpoly;
3862 for (const MPoly *mp = mpoly; i--; mp++) {
3863 if (mp->flag & ME_HIDE) {
3864 const MLoop *ml;
3865 int j = mp->totloop;
3866 for (ml = &mloop[mp->loopstart]; j--; ml++) {
3867 mvert[ml->v].flag |= ME_HIDE;
3868 medge[ml->e].flag |= ME_HIDE;
3869 }
3870 }
3871 }
3872
3873 i = totpoly;
3874 for (const MPoly *mp = mpoly; i--; mp++) {
3875 if ((mp->flag & ME_HIDE) == 0) {
3876 const MLoop *ml;
3877 int j = mp->totloop;
3878 for (ml = &mloop[mp->loopstart]; j--; ml++) {
3879 mvert[ml->v].flag &= (char)~ME_HIDE;
3880 medge[ml->e].flag &= (short)~ME_HIDE;
3881 }
3882 }
3883 }
3884 }
BKE_mesh_flush_hidden_from_polys(Mesh * me)3885 void BKE_mesh_flush_hidden_from_polys(Mesh *me)
3886 {
3887 BKE_mesh_flush_hidden_from_polys_ex(
3888 me->mvert, me->mloop, me->medge, me->totedge, me->mpoly, me->totpoly);
3889 }
3890
3891 /**
3892 * simple poly -> vert/edge selection.
3893 */
BKE_mesh_flush_select_from_polys_ex(MVert * mvert,const int totvert,const MLoop * mloop,MEdge * medge,const int totedge,const MPoly * mpoly,const int totpoly)3894 void BKE_mesh_flush_select_from_polys_ex(MVert *mvert,
3895 const int totvert,
3896 const MLoop *mloop,
3897 MEdge *medge,
3898 const int totedge,
3899 const MPoly *mpoly,
3900 const int totpoly)
3901 {
3902 MVert *mv;
3903 MEdge *med;
3904 const MPoly *mp;
3905
3906 int i = totvert;
3907 for (mv = mvert; i--; mv++) {
3908 mv->flag &= (char)~SELECT;
3909 }
3910
3911 i = totedge;
3912 for (med = medge; i--; med++) {
3913 med->flag &= ~SELECT;
3914 }
3915
3916 i = totpoly;
3917 for (mp = mpoly; i--; mp++) {
3918 /* assume if its selected its not hidden and none of its verts/edges are hidden
3919 * (a common assumption)*/
3920 if (mp->flag & ME_FACE_SEL) {
3921 const MLoop *ml;
3922 int j;
3923 j = mp->totloop;
3924 for (ml = &mloop[mp->loopstart]; j--; ml++) {
3925 mvert[ml->v].flag |= SELECT;
3926 medge[ml->e].flag |= SELECT;
3927 }
3928 }
3929 }
3930 }
BKE_mesh_flush_select_from_polys(Mesh * me)3931 void BKE_mesh_flush_select_from_polys(Mesh *me)
3932 {
3933 BKE_mesh_flush_select_from_polys_ex(
3934 me->mvert, me->totvert, me->mloop, me->medge, me->totedge, me->mpoly, me->totpoly);
3935 }
3936
BKE_mesh_flush_select_from_verts_ex(const MVert * mvert,const int UNUSED (totvert),const MLoop * mloop,MEdge * medge,const int totedge,MPoly * mpoly,const int totpoly)3937 void BKE_mesh_flush_select_from_verts_ex(const MVert *mvert,
3938 const int UNUSED(totvert),
3939 const MLoop *mloop,
3940 MEdge *medge,
3941 const int totedge,
3942 MPoly *mpoly,
3943 const int totpoly)
3944 {
3945 MEdge *med;
3946 MPoly *mp;
3947
3948 /* edges */
3949 int i = totedge;
3950 for (med = medge; i--; med++) {
3951 if ((med->flag & ME_HIDE) == 0) {
3952 if ((mvert[med->v1].flag & SELECT) && (mvert[med->v2].flag & SELECT)) {
3953 med->flag |= SELECT;
3954 }
3955 else {
3956 med->flag &= ~SELECT;
3957 }
3958 }
3959 }
3960
3961 /* polys */
3962 i = totpoly;
3963 for (mp = mpoly; i--; mp++) {
3964 if ((mp->flag & ME_HIDE) == 0) {
3965 bool ok = true;
3966 const MLoop *ml;
3967 int j;
3968 j = mp->totloop;
3969 for (ml = &mloop[mp->loopstart]; j--; ml++) {
3970 if ((mvert[ml->v].flag & SELECT) == 0) {
3971 ok = false;
3972 break;
3973 }
3974 }
3975
3976 if (ok) {
3977 mp->flag |= ME_FACE_SEL;
3978 }
3979 else {
3980 mp->flag &= (char)~ME_FACE_SEL;
3981 }
3982 }
3983 }
3984 }
BKE_mesh_flush_select_from_verts(Mesh * me)3985 void BKE_mesh_flush_select_from_verts(Mesh *me)
3986 {
3987 BKE_mesh_flush_select_from_verts_ex(
3988 me->mvert, me->totvert, me->mloop, me->medge, me->totedge, me->mpoly, me->totpoly);
3989 }
3990 /** \} */
3991
3992 /* -------------------------------------------------------------------- */
3993 /** \name Mesh Spatial Calculation
3994 * \{ */
3995
3996 /**
3997 * This function takes the difference between 2 vertex-coord-arrays
3998 * (\a vert_cos_src, \a vert_cos_dst),
3999 * and applies the difference to \a vert_cos_new relative to \a vert_cos_org.
4000 *
4001 * \param vert_cos_src: reference deform source.
4002 * \param vert_cos_dst: reference deform destination.
4003 *
4004 * \param vert_cos_org: reference for the output location.
4005 * \param vert_cos_new: resulting coords.
4006 */
BKE_mesh_calc_relative_deform(const MPoly * mpoly,const int totpoly,const MLoop * mloop,const int totvert,const float (* vert_cos_src)[3],const float (* vert_cos_dst)[3],const float (* vert_cos_org)[3],float (* vert_cos_new)[3])4007 void BKE_mesh_calc_relative_deform(const MPoly *mpoly,
4008 const int totpoly,
4009 const MLoop *mloop,
4010 const int totvert,
4011
4012 const float (*vert_cos_src)[3],
4013 const float (*vert_cos_dst)[3],
4014
4015 const float (*vert_cos_org)[3],
4016 float (*vert_cos_new)[3])
4017 {
4018 const MPoly *mp;
4019 int i;
4020
4021 int *vert_accum = MEM_calloc_arrayN((size_t)totvert, sizeof(*vert_accum), __func__);
4022
4023 memset(vert_cos_new, '\0', sizeof(*vert_cos_new) * (size_t)totvert);
4024
4025 for (i = 0, mp = mpoly; i < totpoly; i++, mp++) {
4026 const MLoop *loopstart = mloop + mp->loopstart;
4027
4028 for (int j = 0; j < mp->totloop; j++) {
4029 unsigned int v_prev = loopstart[(mp->totloop + (j - 1)) % mp->totloop].v;
4030 unsigned int v_curr = loopstart[j].v;
4031 unsigned int v_next = loopstart[(j + 1) % mp->totloop].v;
4032
4033 float tvec[3];
4034
4035 transform_point_by_tri_v3(tvec,
4036 vert_cos_dst[v_curr],
4037 vert_cos_org[v_prev],
4038 vert_cos_org[v_curr],
4039 vert_cos_org[v_next],
4040 vert_cos_src[v_prev],
4041 vert_cos_src[v_curr],
4042 vert_cos_src[v_next]);
4043
4044 add_v3_v3(vert_cos_new[v_curr], tvec);
4045 vert_accum[v_curr] += 1;
4046 }
4047 }
4048
4049 for (i = 0; i < totvert; i++) {
4050 if (vert_accum[i]) {
4051 mul_v3_fl(vert_cos_new[i], 1.0f / (float)vert_accum[i]);
4052 }
4053 else {
4054 copy_v3_v3(vert_cos_new[i], vert_cos_org[i]);
4055 }
4056 }
4057
4058 MEM_freeN(vert_accum);
4059 }
4060 /** \} */
4061