1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software Foundation,
14 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * The Original Code is Copyright (C) 2019 Blender Foundation.
17 * All rights reserved.
18 */
19
20 /** \file
21 * \ingroup bmesh
22 */
23
24 #include "MEM_guardedalloc.h"
25
26 #include "BLI_math.h"
27 #include "BLI_sort.h"
28 #include "BLI_stack.h"
29
30 #include "BKE_bvhutils.h"
31
32 #include "atomic_ops.h"
33
34 #include "bmesh.h"
35
36 #include "bmesh_intersect_edges.h" /* own include */
37
38 //#define INTERSECT_EDGES_DEBUG
39
40 #define KDOP_TREE_TYPE 4
41 #define KDOP_AXIS_LEN 14
42 #define BLI_STACK_PAIR_LEN 2 * KDOP_TREE_TYPE
43
44 /* -------------------------------------------------------------------- */
45 /** \name Weld Linked Wire Edges into Linked Faces
46 *
47 * Used with the merge vertices option.
48 * \{ */
49
50 /* Callbacks for `BM_vert_pair_shared_face_cb` */
51
52 struct EDBMSplitBestFaceData {
53 BMEdge **edgenet;
54 int edgenet_len;
55
56 /**
57 * Track the range of vertices in edgenet along the faces normal,
58 * find the lowest since it's most likely to be most co-planar with the face.
59 */
60 float best_edgenet_range_on_face_normal;
61 BMFace *r_best_face;
62 };
63
bm_vert_pair_share_best_splittable_face_cb(BMFace * f,BMLoop * l_a,BMLoop * l_b,void * userdata)64 static bool bm_vert_pair_share_best_splittable_face_cb(BMFace *f,
65 BMLoop *l_a,
66 BMLoop *l_b,
67 void *userdata)
68 {
69 struct EDBMSplitBestFaceData *data = userdata;
70 float no[3];
71 copy_v3_v3(no, f->no);
72
73 float min = dot_v3v3(l_a->v->co, no);
74 float max = dot_v3v3(l_b->v->co, no);
75 if (min > max) {
76 SWAP(float, min, max);
77 }
78
79 BMEdge **e_iter = &data->edgenet[0];
80 BMEdge *e_next = data->edgenet[1];
81 BMVert *v_test = ELEM((*e_iter)->v1, e_next->v1, e_next->v2) ? (*e_iter)->v2 : (*e_iter)->v1;
82
83 int verts_len = data->edgenet_len - 1;
84 for (int i = verts_len; i--; e_iter++) {
85 v_test = BM_edge_other_vert(*e_iter, v_test);
86 BLI_assert(v_test != NULL);
87 if (!BM_face_point_inside_test(f, v_test->co)) {
88 return false;
89 }
90 float dot = dot_v3v3(v_test->co, no);
91 if (dot < min) {
92 min = dot;
93 }
94 if (dot > max) {
95 max = dot;
96 }
97 }
98
99 const float test_edgenet_range_on_face_normal = max - min;
100 if (test_edgenet_range_on_face_normal < data->best_edgenet_range_on_face_normal) {
101 data->best_edgenet_range_on_face_normal = test_edgenet_range_on_face_normal;
102 data->r_best_face = f;
103 }
104
105 return false;
106 }
107
108 /* find the best splittable face between the two vertices. */
bm_vert_pair_share_splittable_face_cb(BMFace * UNUSED (f),BMLoop * l_a,BMLoop * l_b,void * userdata)109 static bool bm_vert_pair_share_splittable_face_cb(BMFace *UNUSED(f),
110 BMLoop *l_a,
111 BMLoop *l_b,
112 void *userdata)
113 {
114 float(*data)[3] = userdata;
115 float *v_a_co = data[0];
116 float *v_a_b_dir = data[1];
117 const float range_min = -FLT_EPSILON;
118 const float range_max = 1.0f + FLT_EPSILON;
119
120 float co[3];
121 float dir[3];
122 float lambda_b;
123
124 copy_v3_v3(co, l_a->prev->v->co);
125 sub_v3_v3v3(dir, l_a->next->v->co, co);
126 if (isect_ray_ray_v3(v_a_co, v_a_b_dir, co, dir, NULL, &lambda_b)) {
127 if (IN_RANGE(lambda_b, range_min, range_max)) {
128 return true;
129 }
130 copy_v3_v3(co, l_b->prev->v->co);
131 sub_v3_v3v3(dir, l_b->next->v->co, co);
132 if (isect_ray_ray_v3(v_a_co, v_a_b_dir, co, dir, NULL, &lambda_b)) {
133 return IN_RANGE(lambda_b, range_min, range_max);
134 }
135 }
136 return false;
137 }
138
bm_vert_pair_best_face_get(BMVert * v_a,BMVert * v_b,BMEdge ** edgenet,const int edgenet_len,const float epsilon)139 static BMFace *bm_vert_pair_best_face_get(
140 BMVert *v_a, BMVert *v_b, BMEdge **edgenet, const int edgenet_len, const float epsilon)
141 {
142 BMFace *r_best_face = NULL;
143
144 BLI_assert(v_a != v_b);
145
146 BMLoop *dummy;
147 if (edgenet_len == 1) {
148 float data[2][3];
149 copy_v3_v3(data[0], v_b->co);
150 sub_v3_v3v3(data[1], v_a->co, data[0]);
151 r_best_face = BM_vert_pair_shared_face_cb(
152 v_a, v_b, false, bm_vert_pair_share_splittable_face_cb, &data, &dummy, &dummy);
153 BLI_assert(!r_best_face || BM_edge_in_face(edgenet[0], r_best_face) == false);
154 }
155 else {
156 struct EDBMSplitBestFaceData data = {
157 .edgenet = edgenet,
158 .edgenet_len = edgenet_len,
159 .best_edgenet_range_on_face_normal = FLT_MAX,
160 .r_best_face = NULL,
161 };
162 BM_vert_pair_shared_face_cb(
163 v_a, v_b, true, bm_vert_pair_share_best_splittable_face_cb, &data, &dummy, &dummy);
164
165 if (data.r_best_face) {
166 /* Check if the edgenet's range is smaller than the face's range. */
167 float no[3], min = FLT_MAX, max = -FLT_MAX;
168 copy_v3_v3(no, data.r_best_face->no);
169 BMVert *v_test;
170 BMIter f_iter;
171 BM_ITER_ELEM (v_test, &f_iter, data.r_best_face, BM_VERTS_OF_FACE) {
172 float dot = dot_v3v3(v_test->co, no);
173 if (dot < min) {
174 min = dot;
175 }
176 if (dot > max) {
177 max = dot;
178 }
179 }
180 float face_range_on_normal = max - min + 2 * epsilon;
181 if (face_range_on_normal < data.best_edgenet_range_on_face_normal) {
182 data.r_best_face = NULL;
183 }
184 }
185 r_best_face = data.r_best_face;
186 }
187
188 return r_best_face;
189 }
190
191 /** \} */
192
193 /* -------------------------------------------------------------------- */
194 /** \name Auto-Merge & Split Selection
195 *
196 * Used after transform operations.
197 * \{ */
198
199 struct EDBMSplitElem {
200 union {
201 BMElem *elem;
202 BMVert *vert;
203 struct {
204 BMEdge *edge;
205 float lambda;
206 };
207 };
208 };
209
210 /* -------------------------------------------------------------------- */
211 /* Overlap Callbacks */
212
213 struct EDBMSplitData {
214 BMesh *bm;
215 BLI_Stack **pair_stack;
216 int cut_edges_len;
217 float dist_sq;
218 float dist_sq_sq;
219 };
220
221 /* Utils */
222
bm_vert_pair_elem_setup_ex(BMVert * v,struct EDBMSplitElem * r_pair_elem)223 static void bm_vert_pair_elem_setup_ex(BMVert *v, struct EDBMSplitElem *r_pair_elem)
224 {
225 r_pair_elem->vert = v;
226 }
227
bm_edge_pair_elem_setup(BMEdge * e,float lambda,int * r_data_cut_edges_len,struct EDBMSplitElem * r_pair_elem)228 static void bm_edge_pair_elem_setup(BMEdge *e,
229 float lambda,
230 int *r_data_cut_edges_len,
231 struct EDBMSplitElem *r_pair_elem)
232 {
233 r_pair_elem->edge = e;
234 r_pair_elem->lambda = lambda;
235
236 /* Even though we have multiple atomic operations, this is fine here, since
237 * there is no dependency on order.
238 * The `e->head.index` check + atomic increment will ever be true once, as
239 * expected. We don't care which instance of the code actually ends up
240 * incrementing `r_data_cut_edge_len`, so there is no race condition here. */
241 if (atomic_fetch_and_add_int32(&e->head.index, 1) == 0) {
242 atomic_fetch_and_add_int32(r_data_cut_edges_len, 1);
243 }
244 }
245
246 /* Util for Vert x Edge and Edge x Edge callbacks */
bm_edgexvert_isect_impl(BMVert * v,BMEdge * e,const float co[3],const float dir[3],float lambda,float data_dist_sq,int * data_cut_edges_len,struct EDBMSplitElem r_pair[2])247 static bool bm_edgexvert_isect_impl(BMVert *v,
248 BMEdge *e,
249 const float co[3],
250 const float dir[3],
251 float lambda,
252 float data_dist_sq,
253 int *data_cut_edges_len,
254 struct EDBMSplitElem r_pair[2])
255 {
256 BMVert *e_v;
257 float dist_sq_vert_factor;
258
259 if (!IN_RANGE_INCL(lambda, 0.0f, 1.0f)) {
260 /* Vert x Vert is already handled elsewhere. */
261 return false;
262 }
263
264 if (lambda < 0.5f) {
265 e_v = e->v1;
266 dist_sq_vert_factor = lambda;
267 }
268 else {
269 e_v = e->v2;
270 dist_sq_vert_factor = 1.0f - lambda;
271 }
272
273 if (v != e_v) {
274 float dist_sq_vert = square_f(dist_sq_vert_factor) * len_squared_v3(dir);
275 if (dist_sq_vert < data_dist_sq) {
276 /* Vert x Vert is already handled elsewhere. */
277 return false;
278 }
279
280 float closest[3];
281 madd_v3_v3v3fl(closest, co, dir, lambda);
282
283 float dist_sq = len_squared_v3v3(v->co, closest);
284 if (dist_sq < data_dist_sq) {
285 bm_edge_pair_elem_setup(e, lambda, data_cut_edges_len, &r_pair[0]);
286 bm_vert_pair_elem_setup_ex(v, &r_pair[1]);
287 return true;
288 }
289 }
290
291 return false;
292 }
293
294 /* Vertex x Vertex Callback */
295
bm_vertxvert_isect_cb(void * userdata,int index_a,int index_b,int thread)296 static bool bm_vertxvert_isect_cb(void *userdata, int index_a, int index_b, int thread)
297 {
298 struct EDBMSplitData *data = userdata;
299 BMVert *v_a = BM_vert_at_index(data->bm, index_a);
300 BMVert *v_b = BM_vert_at_index(data->bm, index_b);
301
302 struct EDBMSplitElem *pair = BLI_stack_push_r(data->pair_stack[thread]);
303
304 bm_vert_pair_elem_setup_ex(v_a, &pair[0]);
305 bm_vert_pair_elem_setup_ex(v_b, &pair[1]);
306
307 return true;
308 }
309
bm_vertxvert_self_isect_cb(void * userdata,int index_a,int index_b,int thread)310 static bool bm_vertxvert_self_isect_cb(void *userdata, int index_a, int index_b, int thread)
311 {
312 if (index_a < index_b) {
313 return bm_vertxvert_isect_cb(userdata, index_a, index_b, thread);
314 }
315 return false;
316 }
317
318 /* Vertex x Edge and Edge x Vertex Callbacks */
319
bm_edgexvert_isect_cb(void * userdata,int index_a,int index_b,int thread)320 static bool bm_edgexvert_isect_cb(void *userdata, int index_a, int index_b, int thread)
321 {
322 struct EDBMSplitData *data = userdata;
323 BMEdge *e = BM_edge_at_index(data->bm, index_a);
324 BMVert *v = BM_vert_at_index(data->bm, index_b);
325
326 float co[3], dir[3], lambda;
327 copy_v3_v3(co, e->v1->co);
328 sub_v3_v3v3(dir, e->v2->co, co);
329 lambda = ray_point_factor_v3_ex(v->co, co, dir, 0.0f, -1.0f);
330
331 struct EDBMSplitElem pair_tmp[2];
332 if (bm_edgexvert_isect_impl(
333 v, e, co, dir, lambda, data->dist_sq, &data->cut_edges_len, pair_tmp)) {
334 struct EDBMSplitElem *pair = BLI_stack_push_r(data->pair_stack[thread]);
335 pair[0] = pair_tmp[0];
336 pair[1] = pair_tmp[1];
337 }
338
339 /* Always return false with edges. */
340 return false;
341 }
342
343 /* Edge x Edge Callbacks */
344
bm_edgexedge_isect_impl(struct EDBMSplitData * data,BMEdge * e_a,BMEdge * e_b,const float co_a[3],const float dir_a[3],const float co_b[3],const float dir_b[3],float lambda_a,float lambda_b,struct EDBMSplitElem r_pair[2])345 static bool bm_edgexedge_isect_impl(struct EDBMSplitData *data,
346 BMEdge *e_a,
347 BMEdge *e_b,
348 const float co_a[3],
349 const float dir_a[3],
350 const float co_b[3],
351 const float dir_b[3],
352 float lambda_a,
353 float lambda_b,
354 struct EDBMSplitElem r_pair[2])
355 {
356 float dist_sq_va_factor, dist_sq_vb_factor;
357 BMVert *e_a_v, *e_b_v;
358 if (lambda_a < 0.5f) {
359 e_a_v = e_a->v1;
360 dist_sq_va_factor = lambda_a;
361 }
362 else {
363 e_a_v = e_a->v2;
364 dist_sq_va_factor = 1.0f - lambda_a;
365 }
366
367 if (lambda_b < 0.5f) {
368 e_b_v = e_b->v1;
369 dist_sq_vb_factor = lambda_b;
370 }
371 else {
372 e_b_v = e_b->v2;
373 dist_sq_vb_factor = 1.0f - lambda_b;
374 }
375
376 if (e_a_v != e_b_v) {
377 if (!IN_RANGE_INCL(lambda_a, 0.0f, 1.0f) || !IN_RANGE_INCL(lambda_b, 0.0f, 1.0f)) {
378 /* Vert x Edge is already handled elsewhere. */
379 return false;
380 }
381
382 float dist_sq_va = square_f(dist_sq_va_factor) * len_squared_v3(dir_a);
383 float dist_sq_vb = square_f(dist_sq_vb_factor) * len_squared_v3(dir_b);
384
385 if (dist_sq_va < data->dist_sq || dist_sq_vb < data->dist_sq) {
386 /* Vert x Edge is already handled elsewhere. */
387 return false;
388 }
389
390 float near_a[3], near_b[3];
391 madd_v3_v3v3fl(near_a, co_a, dir_a, lambda_a);
392 madd_v3_v3v3fl(near_b, co_b, dir_b, lambda_b);
393
394 float dist_sq = len_squared_v3v3(near_a, near_b);
395 if (dist_sq < data->dist_sq) {
396 bm_edge_pair_elem_setup(e_a, lambda_a, &data->cut_edges_len, &r_pair[0]);
397 bm_edge_pair_elem_setup(e_b, lambda_b, &data->cut_edges_len, &r_pair[1]);
398 return true;
399 }
400 }
401 return false;
402 }
403
bm_edgexedge_isect_cb(void * userdata,int index_a,int index_b,int thread)404 static bool bm_edgexedge_isect_cb(void *userdata, int index_a, int index_b, int thread)
405 {
406 struct EDBMSplitData *data = userdata;
407 BMEdge *e_a = BM_edge_at_index(data->bm, index_a);
408 BMEdge *e_b = BM_edge_at_index(data->bm, index_b);
409
410 if (BM_edge_share_vert_check(e_a, e_b)) {
411 /* The other vertices may intersect but Vert x Edge is already handled elsewhere. */
412 return false;
413 }
414
415 float co_a[3], dir_a[3], co_b[3], dir_b[3];
416 copy_v3_v3(co_a, e_a->v1->co);
417 sub_v3_v3v3(dir_a, e_a->v2->co, co_a);
418
419 copy_v3_v3(co_b, e_b->v1->co);
420 sub_v3_v3v3(dir_b, e_b->v2->co, co_b);
421
422 float lambda_a, lambda_b;
423 /* Using with dist^4 as `epsilon` is not the best solution, but it fits in most cases. */
424 if (isect_ray_ray_epsilon_v3(co_a, dir_a, co_b, dir_b, data->dist_sq_sq, &lambda_a, &lambda_b)) {
425 struct EDBMSplitElem pair_tmp[2];
426 if (bm_edgexedge_isect_impl(
427 data, e_a, e_b, co_a, dir_a, co_b, dir_b, lambda_a, lambda_b, pair_tmp)) {
428 struct EDBMSplitElem *pair = BLI_stack_push_r(data->pair_stack[thread]);
429 pair[0] = pair_tmp[0];
430 pair[1] = pair_tmp[1];
431 }
432 }
433
434 /* Edge x Edge returns always false. */
435 return false;
436 }
437
bm_edgexedge_self_isect_cb(void * userdata,int index_a,int index_b,int thread)438 static bool bm_edgexedge_self_isect_cb(void *userdata, int index_a, int index_b, int thread)
439 {
440 if (index_a < index_b) {
441 return bm_edgexedge_isect_cb(userdata, index_a, index_b, thread);
442 }
443 return false;
444 }
445
446 /* -------------------------------------------------------------------- */
447 /* BVHTree Overlap Function */
448
bm_elemxelem_bvhtree_overlap(const BVHTree * tree1,const BVHTree * tree2,BVHTree_OverlapCallback callback,struct EDBMSplitData * data,BLI_Stack ** pair_stack)449 static void bm_elemxelem_bvhtree_overlap(const BVHTree *tree1,
450 const BVHTree *tree2,
451 BVHTree_OverlapCallback callback,
452 struct EDBMSplitData *data,
453 BLI_Stack **pair_stack)
454 {
455 int parallel_tasks_num = BLI_bvhtree_overlap_thread_num(tree1);
456 for (int i = 0; i < parallel_tasks_num; i++) {
457 if (pair_stack[i] == NULL) {
458 pair_stack[i] = BLI_stack_new(sizeof(const struct EDBMSplitElem[2]), __func__);
459 }
460 }
461 data->pair_stack = pair_stack;
462 BLI_bvhtree_overlap_ex(tree1, tree2, NULL, callback, data, 1, BVH_OVERLAP_USE_THREADING);
463 }
464
465 /* -------------------------------------------------------------------- */
466 /* Callbacks for `BLI_qsort_r` */
467
sort_cmp_by_lambda_cb(const void * index1_v,const void * index2_v,void * keys_v)468 static int sort_cmp_by_lambda_cb(const void *index1_v, const void *index2_v, void *keys_v)
469 {
470 const struct EDBMSplitElem *pair_flat = keys_v;
471 const int index1 = *(int *)index1_v;
472 const int index2 = *(int *)index2_v;
473
474 if (pair_flat[index1].lambda > pair_flat[index2].lambda) {
475 return 1;
476 }
477 return -1;
478 }
479
480 /* -------------------------------------------------------------------- */
481 /* Main API */
482
483 #define INTERSECT_EDGES
484
BM_mesh_intersect_edges(BMesh * bm,const char hflag,const float dist,const bool split_faces,GHash * r_targetmap)485 bool BM_mesh_intersect_edges(
486 BMesh *bm, const char hflag, const float dist, const bool split_faces, GHash *r_targetmap)
487 {
488 bool ok = false;
489
490 BMIter iter;
491 BMVert *v;
492 BMEdge *e;
493 int i;
494
495 /* Store all intersections in this array. */
496 struct EDBMSplitElem(*pair_iter)[2], (*pair_array)[2] = NULL;
497 int pair_len = 0;
498
499 BLI_Stack *pair_stack[BLI_STACK_PAIR_LEN] = {NULL};
500 BLI_Stack **pair_stack_vertxvert = pair_stack;
501 BLI_Stack **pair_stack_edgexelem = &pair_stack[KDOP_TREE_TYPE];
502
503 const float dist_sq = square_f(dist);
504 const float dist_half = dist / 2;
505
506 struct EDBMSplitData data = {
507 .bm = bm,
508 .pair_stack = pair_stack,
509 .cut_edges_len = 0,
510 .dist_sq = dist_sq,
511 .dist_sq_sq = square_f(dist_sq),
512 };
513
514 BM_mesh_elem_table_ensure(bm, BM_VERT | BM_EDGE);
515
516 /* tag and count the verts to be tested. */
517 int verts_act_len = 0, verts_remain_len = 0;
518 BM_ITER_MESH (v, &iter, bm, BM_VERTS_OF_MESH) {
519 if (BM_elem_flag_test(v, hflag)) {
520 BM_elem_flag_enable(v, BM_ELEM_TAG);
521 verts_act_len++;
522 }
523 else {
524 BM_elem_flag_disable(v, BM_ELEM_TAG);
525 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) {
526 verts_remain_len++;
527 }
528 }
529
530 /* The index will indicate which cut in pair_array this vertex belongs to. */
531 BM_elem_index_set(v, -1);
532 }
533 bm->elem_index_dirty |= BM_VERT;
534
535 /* Start the creation of BVHTrees. */
536 BVHTree *tree_verts_act = NULL, *tree_verts_remain = NULL;
537 if (verts_act_len) {
538 tree_verts_act = BLI_bvhtree_new(verts_act_len, dist_half, KDOP_TREE_TYPE, KDOP_AXIS_LEN);
539 }
540 if (verts_remain_len) {
541 tree_verts_remain = BLI_bvhtree_new(
542 verts_remain_len, dist_half, KDOP_TREE_TYPE, KDOP_AXIS_LEN);
543 }
544
545 if (tree_verts_act || tree_verts_remain) {
546 BM_ITER_MESH_INDEX (v, &iter, bm, BM_VERTS_OF_MESH, i) {
547 if (BM_elem_flag_test(v, BM_ELEM_TAG)) {
548 if (tree_verts_act) {
549 BLI_bvhtree_insert(tree_verts_act, i, v->co, 1);
550 }
551 }
552 else if (tree_verts_remain && !BM_elem_flag_test(v, BM_ELEM_HIDDEN)) {
553 BLI_bvhtree_insert(tree_verts_remain, i, v->co, 1);
554 }
555 }
556
557 if (tree_verts_act) {
558 BLI_bvhtree_balance(tree_verts_act);
559 /* First pair search. */
560 bm_elemxelem_bvhtree_overlap(
561 tree_verts_act, tree_verts_act, bm_vertxvert_self_isect_cb, &data, pair_stack_vertxvert);
562 }
563
564 if (tree_verts_remain) {
565 BLI_bvhtree_balance(tree_verts_remain);
566 }
567
568 if (tree_verts_act && tree_verts_remain) {
569 bm_elemxelem_bvhtree_overlap(
570 tree_verts_remain, tree_verts_act, bm_vertxvert_isect_cb, &data, pair_stack_vertxvert);
571 }
572 }
573
574 for (i = KDOP_TREE_TYPE; i--;) {
575 if (pair_stack_vertxvert[i]) {
576 pair_len += BLI_stack_count(pair_stack_vertxvert[i]);
577 }
578 }
579
580 #ifdef INTERSECT_EDGES
581 uint vertxvert_pair_len = pair_len;
582
583 # define EDGE_ACT_TO_TEST 1
584 # define EDGE_REMAIN_TO_TEST 2
585 /* Tag and count the edges. */
586 int edges_act_len = 0, edges_remain_len = 0;
587 BM_ITER_MESH (e, &iter, bm, BM_EDGES_OF_MESH) {
588 if (BM_elem_flag_test(e, BM_ELEM_HIDDEN) ||
589 (len_squared_v3v3(e->v1->co, e->v2->co) < dist_sq)) {
590 /* Don't test hidden edges or smaller than the minimum distance.
591 * These have already been handled in the vertices overlap. */
592 BM_elem_index_set(e, 0);
593 if (split_faces) {
594 /* Tag to be ignored. */
595 BM_elem_flag_enable(e, BM_ELEM_TAG);
596 }
597 continue;
598 }
599
600 if (BM_elem_flag_test(e->v1, BM_ELEM_TAG) || BM_elem_flag_test(e->v2, BM_ELEM_TAG)) {
601 BM_elem_index_set(e, EDGE_ACT_TO_TEST);
602 edges_act_len++;
603 }
604 else {
605 BM_elem_index_set(e, EDGE_REMAIN_TO_TEST);
606 edges_remain_len++;
607 if (split_faces) {
608 /* Tag to be ignored. */
609 BM_elem_flag_enable(e, BM_ELEM_TAG);
610 }
611 }
612 }
613
614 BVHTree *tree_edges_act = NULL, *tree_edges_remain = NULL;
615 if (edges_act_len) {
616 tree_edges_act = BLI_bvhtree_new(edges_act_len, dist_half, KDOP_TREE_TYPE, KDOP_AXIS_LEN);
617 }
618
619 if (edges_remain_len && (tree_edges_act || tree_verts_act)) {
620 tree_edges_remain = BLI_bvhtree_new(
621 edges_remain_len, dist_half, KDOP_TREE_TYPE, KDOP_AXIS_LEN);
622 }
623
624 if (tree_edges_act || tree_edges_remain) {
625 BM_ITER_MESH_INDEX (e, &iter, bm, BM_EDGES_OF_MESH, i) {
626 int edge_test = BM_elem_index_get(e);
627 float co[2][3];
628 if (edge_test == EDGE_ACT_TO_TEST) {
629 BLI_assert(tree_edges_act);
630 e->head.index = 0;
631 copy_v3_v3(co[0], e->v1->co);
632 copy_v3_v3(co[1], e->v2->co);
633 BLI_bvhtree_insert(tree_edges_act, i, co[0], 2);
634 }
635 else if (edge_test == EDGE_REMAIN_TO_TEST) {
636 BLI_assert(tree_edges_remain);
637 e->head.index = 0;
638 copy_v3_v3(co[0], e->v1->co);
639 copy_v3_v3(co[1], e->v2->co);
640 BLI_bvhtree_insert(tree_edges_remain, i, co[0], 2);
641 }
642 # ifdef INTERSECT_EDGES_DEBUG
643 else {
644 e->head.index = 0;
645 }
646 # endif
647 /* Tag used when converting pairs to vert x vert. */
648 BM_elem_flag_disable(e, BM_ELEM_TAG);
649 }
650 # undef EDGE_ACT_TO_TEST
651 # undef EDGE_REMAIN_TO_TEST
652
653 /* Use `e->head.index` to count intersections. */
654 bm->elem_index_dirty |= BM_EDGE;
655
656 if (tree_edges_act) {
657 BLI_bvhtree_balance(tree_edges_act);
658 }
659
660 if (tree_edges_remain) {
661 BLI_bvhtree_balance(tree_edges_remain);
662 }
663
664 int edgexedge_pair_len = 0;
665 if (tree_edges_act) {
666 /* Edge x Edge */
667 bm_elemxelem_bvhtree_overlap(
668 tree_edges_act, tree_edges_act, bm_edgexedge_self_isect_cb, &data, pair_stack_edgexelem);
669
670 if (tree_edges_remain) {
671 bm_elemxelem_bvhtree_overlap(
672 tree_edges_remain, tree_edges_act, bm_edgexedge_isect_cb, &data, pair_stack_edgexelem);
673 }
674
675 for (i = KDOP_TREE_TYPE; i--;) {
676 if (pair_stack_edgexelem[i]) {
677 edgexedge_pair_len += BLI_stack_count(pair_stack_edgexelem[i]);
678 }
679 }
680
681 if (tree_verts_act) {
682 /* Edge v Vert */
683 bm_elemxelem_bvhtree_overlap(
684 tree_edges_act, tree_verts_act, bm_edgexvert_isect_cb, &data, pair_stack_edgexelem);
685 }
686
687 if (tree_verts_remain) {
688 /* Edge v Vert */
689 bm_elemxelem_bvhtree_overlap(
690 tree_edges_act, tree_verts_remain, bm_edgexvert_isect_cb, &data, pair_stack_edgexelem);
691 }
692
693 BLI_bvhtree_free(tree_edges_act);
694 }
695
696 if (tree_verts_act && tree_edges_remain) {
697 /* Edge v Vert */
698 bm_elemxelem_bvhtree_overlap(
699 tree_edges_remain, tree_verts_act, bm_edgexvert_isect_cb, &data, pair_stack_edgexelem);
700 }
701
702 BLI_bvhtree_free(tree_edges_remain);
703
704 int edgexelem_pair_len = 0;
705 for (i = KDOP_TREE_TYPE; i--;) {
706 if (pair_stack_edgexelem[i]) {
707 edgexelem_pair_len += BLI_stack_count(pair_stack_edgexelem[i]);
708 }
709 }
710
711 pair_len += edgexelem_pair_len;
712 int edgexvert_pair_len = edgexelem_pair_len - edgexedge_pair_len;
713
714 if (edgexelem_pair_len) {
715 pair_array = MEM_mallocN(sizeof(*pair_array) * pair_len, __func__);
716
717 pair_iter = pair_array;
718 for (i = 0; i < BLI_STACK_PAIR_LEN; i++) {
719 if (pair_stack[i]) {
720 uint count = (uint)BLI_stack_count(pair_stack[i]);
721 BLI_stack_pop_n_reverse(pair_stack[i], pair_iter, count);
722 pair_iter += count;
723 }
724 }
725
726 /* Map intersections per edge. */
727 union {
728 struct {
729 int cuts_len;
730 int cuts_index[];
731 };
732 int as_int[0];
733 } * e_map_iter, *e_map;
734
735 # ifdef INTERSECT_EDGES_DEBUG
736 int cut_edges_len = 0;
737 BM_ITER_MESH (e, &iter, bm, BM_EDGES_OF_MESH) {
738 if (e->head.index != 0) {
739 cut_edges_len++;
740 }
741 }
742 BLI_assert(cut_edges_len == data.cut_edges_len);
743 # endif
744
745 size_t e_map_size = (data.cut_edges_len * sizeof(*e_map)) +
746 (((size_t)2 * edgexedge_pair_len + edgexvert_pair_len) *
747 sizeof(*(e_map->cuts_index)));
748
749 e_map = MEM_mallocN(e_map_size, __func__);
750 int map_len = 0;
751
752 /* Convert every pair to Vert x Vert. */
753
754 /* The list of pairs starts with [vert x vert] followed by [edge x edge]
755 * and finally [edge x vert].
756 * Ignore the [vert x vert] pairs */
757 struct EDBMSplitElem *pair_flat, *pair_flat_iter;
758 pair_flat = (struct EDBMSplitElem *)&pair_array[vertxvert_pair_len];
759 pair_flat_iter = &pair_flat[0];
760 uint pair_flat_len = 2 * edgexelem_pair_len;
761 for (i = 0; i < pair_flat_len; i++, pair_flat_iter++) {
762 if (pair_flat_iter->elem->head.htype != BM_EDGE) {
763 continue;
764 }
765
766 e = pair_flat_iter->edge;
767 if (!BM_elem_flag_test(e, BM_ELEM_TAG)) {
768 BM_elem_flag_enable(e, BM_ELEM_TAG);
769 int e_cuts_len = e->head.index;
770
771 e_map_iter = (void *)&e_map->as_int[map_len];
772 e_map_iter->cuts_len = e_cuts_len;
773 e_map_iter->cuts_index[0] = i;
774
775 /* Use `e->head.index` to indicate which slot to fill with the `cut` index. */
776 e->head.index = map_len + 1;
777 map_len += 1 + e_cuts_len;
778 }
779 else {
780 e_map->as_int[++e->head.index] = i;
781 }
782 }
783
784 /* Split Edges A to set all Vert x Edge. */
785 for (i = 0; i < map_len;
786 e_map_iter = (void *)&e_map->as_int[i], i += 1 + e_map_iter->cuts_len) {
787
788 /* sort by lambda. */
789 BLI_qsort_r(e_map_iter->cuts_index,
790 e_map_iter->cuts_len,
791 sizeof(*(e_map->cuts_index)),
792 sort_cmp_by_lambda_cb,
793 pair_flat);
794
795 float lambda, lambda_prev = 0.0f;
796 for (int j = 0; j < e_map_iter->cuts_len; j++) {
797 uint index = e_map_iter->cuts_index[j];
798
799 struct EDBMSplitElem *pair_elem = &pair_flat[index];
800 lambda = (pair_elem->lambda - lambda_prev) / (1.0f - lambda_prev);
801 lambda_prev = pair_elem->lambda;
802 e = pair_elem->edge;
803 if (split_faces) {
804 /* Tagged edges are ignored when split faces.
805 * Un-tag these. */
806 BM_elem_flag_disable(e, BM_ELEM_TAG);
807 }
808
809 BMVert *v_new = BM_edge_split(bm, e, e->v1, NULL, lambda);
810 pair_elem->vert = v_new;
811 }
812 }
813
814 MEM_freeN(e_map);
815 }
816 }
817 #endif
818
819 BLI_bvhtree_free(tree_verts_act);
820 BLI_bvhtree_free(tree_verts_remain);
821
822 if (r_targetmap) {
823 if (pair_len && pair_array == NULL) {
824 pair_array = MEM_mallocN(sizeof(*pair_array) * pair_len, __func__);
825 pair_iter = pair_array;
826 for (i = 0; i < BLI_STACK_PAIR_LEN; i++) {
827 if (pair_stack[i]) {
828 uint count = (uint)BLI_stack_count(pair_stack[i]);
829 BLI_stack_pop_n_reverse(pair_stack[i], pair_iter, count);
830 pair_iter += count;
831 }
832 }
833 }
834
835 if (pair_array) {
836 BMVert *v_key, *v_val;
837 pair_iter = &pair_array[0];
838 for (i = 0; i < pair_len; i++, pair_iter++) {
839 BLI_assert((*pair_iter)[0].elem->head.htype == BM_VERT);
840 BLI_assert((*pair_iter)[1].elem->head.htype == BM_VERT);
841 BLI_assert((*pair_iter)[0].elem != (*pair_iter)[1].elem);
842 v_key = (*pair_iter)[0].vert;
843 v_val = (*pair_iter)[1].vert;
844 BLI_ghash_insert(r_targetmap, v_key, v_val);
845 }
846
847 /**
848 * The weld_verts operator works best when all keys in the same group of
849 * collapsed vertices point to the same vertex.
850 * That is, if the pairs of vertices are:
851 * [1, 2], [2, 3] and [3, 4],
852 * They are better adjusted to:
853 * [1, 4], [2, 4] and [3, 4].
854 */
855 pair_iter = &pair_array[0];
856 for (i = 0; i < pair_len; i++, pair_iter++) {
857 v_key = (*pair_iter)[0].vert;
858 v_val = (*pair_iter)[1].vert;
859 BMVert *v_target;
860 while ((v_target = BLI_ghash_lookup(r_targetmap, v_val))) {
861 v_val = v_target;
862 }
863 if (v_val != (*pair_iter)[1].vert) {
864 BMVert **v_val_p = (BMVert **)BLI_ghash_lookup_p(r_targetmap, v_key);
865 *v_val_p = (*pair_iter)[1].vert = v_val;
866 }
867 if (split_faces) {
868 /* The vertex index indicates its position in the pair_array flat. */
869 BM_elem_index_set(v_key, i * 2);
870 BM_elem_index_set(v_val, i * 2 + 1);
871 }
872 }
873
874 if (split_faces) {
875 BMEdge **edgenet = NULL;
876 int edgenet_alloc_len = 0;
877
878 struct EDBMSplitElem *pair_flat = (struct EDBMSplitElem *)&pair_array[0];
879 BM_ITER_MESH (e, &iter, bm, BM_EDGES_OF_MESH) {
880 if (BM_elem_flag_test(e, BM_ELEM_TAG)) {
881 /* Edge out of context or already tested. */
882 continue;
883 }
884
885 BMVert *va, *vb, *va_dest = NULL;
886 va = e->v1;
887 vb = e->v2;
888
889 int v_cut = BM_elem_index_get(va);
890 int v_cut_other = BM_elem_index_get(vb);
891 if (v_cut == -1 && v_cut_other == -1) {
892 if (!BM_elem_flag_test(va, BM_ELEM_TAG) && !BM_elem_flag_test(vb, BM_ELEM_TAG)) {
893 /* Edge out of context. */
894 BM_elem_flag_enable(e, BM_ELEM_TAG);
895 }
896 continue;
897 }
898
899 /* Tag to avoid testing again. */
900 BM_elem_flag_enable(e, BM_ELEM_TAG);
901
902 if (v_cut == -1) {
903 SWAP(BMVert *, va, vb);
904 v_cut = v_cut_other;
905 v_cut_other = -1;
906 }
907
908 /* `v_cut` indicates the other vertex within the `pair_array`. */
909 v_cut += v_cut % 2 ? -1 : 1;
910 va_dest = pair_flat[v_cut].vert;
911
912 if (BM_vert_pair_share_face_check(va, va_dest)) {
913 /* Vert par acts on the same face.
914 * Although there are cases like this where the face can be split,
915 * for efficiency it is better to ignore then. */
916 continue;
917 }
918
919 BMFace *best_face = NULL;
920 BMVert *v_other_dest, *v_other = vb;
921 BMEdge *e_net = e;
922 int edgenet_len = 0;
923 while (true) {
924 if (v_cut_other != -1) {
925 v_cut_other += v_cut_other % 2 ? -1 : 1;
926 v_other_dest = pair_flat[v_cut_other].vert;
927
928 if (BM_vert_pair_share_face_check(v_other, v_other_dest)) {
929 /* Vert par acts on the same face.
930 * Although there are cases like this where the face can be split,
931 * for efficiency and to avoid complications, it is better to ignore these cases.
932 */
933 break;
934 }
935 }
936 else {
937 v_other_dest = v_other;
938 }
939
940 if (va_dest == v_other_dest) {
941 /* Edge/Edgenet to vertex - we can't split the face. */
942 break;
943 }
944 if (edgenet_len == 0 && BM_edge_exists(va_dest, v_other_dest)) {
945 /* Edge to edge - no need to detect face. */
946 break;
947 }
948
949 if (edgenet_alloc_len == edgenet_len) {
950 edgenet_alloc_len = (edgenet_alloc_len + 1) * 2;
951 edgenet = MEM_reallocN(edgenet, (edgenet_alloc_len) * sizeof(*edgenet));
952 }
953 edgenet[edgenet_len++] = e_net;
954
955 best_face = bm_vert_pair_best_face_get(
956 va_dest, v_other_dest, edgenet, edgenet_len, dist);
957
958 if (best_face) {
959 if ((va_dest != va) && !BM_edge_exists(va_dest, va)) {
960 /**
961 * <pre>
962 * va---vb---
963 * /
964 * va_dest
965 * </pre>
966 */
967 e_net = edgenet[0];
968 if (edgenet_len > 1) {
969 vb = BM_edge_other_vert(e_net, va);
970 }
971 else {
972 vb = v_other_dest;
973 }
974 edgenet[0] = BM_edge_create(bm, va_dest, vb, e_net, BM_CREATE_NOP);
975 }
976 if ((edgenet_len > 1) && (v_other_dest != v_other) &&
977 !BM_edge_exists(v_other_dest, v_other)) {
978 /**
979 * <pre>
980 * ---v---v_other
981 * \
982 * v_other_dest
983 * </pre>
984 */
985 e_net = edgenet[edgenet_len - 1];
986 edgenet[edgenet_len - 1] = BM_edge_create(
987 bm, v_other_dest, BM_edge_other_vert(e_net, v_other), e_net, BM_CREATE_NOP);
988 }
989 break;
990 }
991
992 BMEdge *e_test = e_net, *e_next = NULL;
993 while ((e_test = BM_DISK_EDGE_NEXT(e_test, v_other)) != (e_net)) {
994 if (!BM_edge_is_wire(e_test)) {
995 if (BM_elem_flag_test(e_test, BM_ELEM_TAG)) {
996 continue;
997 }
998 if (!BM_elem_flag_test(e_test->v1, BM_ELEM_TAG) &&
999 !BM_elem_flag_test(e_test->v2, BM_ELEM_TAG)) {
1000 continue;
1001 }
1002 /* Avoids endless loop. */
1003 BM_elem_flag_enable(e_test, BM_ELEM_TAG);
1004 }
1005 else if (!BM_edge_is_wire(e_net)) {
1006 continue;
1007 }
1008 e_next = e_test;
1009 break;
1010 }
1011
1012 if (e_next == NULL) {
1013 break;
1014 }
1015
1016 e_net = e_next;
1017 v_other = BM_edge_other_vert(e_net, v_other);
1018 if (v_other == va) {
1019 /* Endless loop. */
1020 break;
1021 }
1022 v_cut_other = BM_elem_index_get(v_other);
1023 }
1024
1025 if (best_face) {
1026 BMFace **face_arr = NULL;
1027 int face_arr_len = 0;
1028 BM_face_split_edgenet(bm, best_face, edgenet, edgenet_len, &face_arr, &face_arr_len);
1029 if (face_arr) {
1030 /* Update the new faces normal.
1031 * Normal is necessary to obtain the best face for edgenet */
1032 while (face_arr_len--) {
1033 BM_face_normal_update(face_arr[face_arr_len]);
1034 }
1035 MEM_freeN(face_arr);
1036 }
1037 }
1038 }
1039
1040 if (edgenet) {
1041 MEM_freeN(edgenet);
1042 }
1043 }
1044 ok = true;
1045 }
1046 }
1047
1048 for (i = BLI_STACK_PAIR_LEN; i--;) {
1049 if (pair_stack[i]) {
1050 BLI_stack_free(pair_stack[i]);
1051 }
1052 }
1053 if (pair_array) {
1054 MEM_freeN(pair_array);
1055 }
1056
1057 return ok;
1058 }
1059
1060 /** \} */
1061