1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2020 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
18
19
20 // Local Includes
21 #include "libmesh/boundary_info.h"
22 #include "libmesh/distributed_mesh.h"
23 #include "libmesh/elem.h"
24 #include "libmesh/ghosting_functor.h"
25 #include "libmesh/libmesh_config.h"
26 #include "libmesh/libmesh_common.h"
27 #include "libmesh/libmesh_logging.h"
28 #include "libmesh/mesh_base.h"
29 #include "libmesh/mesh_communication.h"
30 #include "libmesh/mesh_inserter_iterator.h"
31 #include "libmesh/mesh_tools.h"
32 #include "libmesh/parallel.h"
33 #include "libmesh/parallel_elem.h"
34 #include "libmesh/parallel_node.h"
35 #include "libmesh/parallel_ghost_sync.h"
36 #include "libmesh/utility.h"
37 #include "libmesh/remote_elem.h"
38 #include "libmesh/int_range.h"
39
40 // C++ Includes
41 #include <numeric>
42 #include <set>
43 #include <unordered_set>
44 #include <unordered_map>
45
46
47
48 //-----------------------------------------------
49 // anonymous namespace for implementation details
50 namespace {
51
52 using namespace libMesh;
53
54 struct SyncNeighbors
55 {
56 typedef std::vector<dof_id_type> datum;
57
SyncNeighborsSyncNeighbors58 SyncNeighbors(MeshBase & _mesh) :
59 mesh(_mesh) {}
60
61 MeshBase & mesh;
62
63 // Find the neighbor ids for each requested element
gather_dataSyncNeighbors64 void gather_data (const std::vector<dof_id_type> & ids,
65 std::vector<datum> & neighbors) const
66 {
67 neighbors.resize(ids.size());
68
69 for (auto i : index_range(ids))
70 {
71 // Look for this element in the mesh
72 // We'd better find every element we're asked for
73 const Elem & elem = mesh.elem_ref(ids[i]);
74
75 // Return the element's neighbors
76 const unsigned int n_neigh = elem.n_neighbors();
77 neighbors[i].resize(n_neigh);
78 for (unsigned int n = 0; n != n_neigh; ++n)
79 {
80 const Elem * neigh = elem.neighbor_ptr(n);
81 if (neigh)
82 {
83 libmesh_assert_not_equal_to(neigh, remote_elem);
84 neighbors[i][n] = neigh->id();
85 }
86 else
87 neighbors[i][n] = DofObject::invalid_id;
88 }
89 }
90 }
91
act_on_dataSyncNeighbors92 void act_on_data (const std::vector<dof_id_type> & ids,
93 const std::vector<datum> & neighbors) const
94 {
95 for (auto i : index_range(ids))
96 {
97 Elem & elem = mesh.elem_ref(ids[i]);
98
99 const datum & new_neigh = neighbors[i];
100
101 const unsigned int n_neigh = elem.n_neighbors();
102 libmesh_assert_equal_to (n_neigh, new_neigh.size());
103
104 for (unsigned int n = 0; n != n_neigh; ++n)
105 {
106 const dof_id_type new_neigh_id = new_neigh[n];
107 const Elem * old_neigh = elem.neighbor_ptr(n);
108 if (old_neigh && old_neigh != remote_elem)
109 {
110 libmesh_assert_equal_to(old_neigh->id(), new_neigh_id);
111 }
112 else if (new_neigh_id == DofObject::invalid_id)
113 {
114 libmesh_assert (!old_neigh);
115 }
116 else
117 {
118 Elem * neigh = mesh.query_elem_ptr(new_neigh_id);
119 if (neigh)
120 elem.set_neighbor(n, neigh);
121 else
122 elem.set_neighbor(n, const_cast<RemoteElem *>(remote_elem));
123 }
124 }
125 }
126 }
127 };
128
129
130 }
131
132
133
134 namespace libMesh
135 {
136
137
query_ghosting_functors(const MeshBase & mesh,processor_id_type pid,MeshBase::const_element_iterator elem_it,MeshBase::const_element_iterator elem_end,std::set<const Elem *,CompareElemIdsByLevel> & connected_elements)138 void query_ghosting_functors(const MeshBase & mesh,
139 processor_id_type pid,
140 MeshBase::const_element_iterator elem_it,
141 MeshBase::const_element_iterator elem_end,
142 std::set<const Elem *, CompareElemIdsByLevel> & connected_elements)
143 {
144 for (auto & gf :
145 as_range(mesh.ghosting_functors_begin(),
146 mesh.ghosting_functors_end()))
147 {
148 GhostingFunctor::map_type elements_to_ghost;
149 libmesh_assert(gf);
150 (*gf)(elem_it, elem_end, pid, elements_to_ghost);
151
152 // We can ignore the CouplingMatrix in ->second, but we
153 // need to ghost all the elements in ->first.
154 for (auto & pr : elements_to_ghost)
155 {
156 const Elem * elem = pr.first;
157 libmesh_assert(elem != remote_elem);
158 libmesh_assert(mesh.elem_ptr(elem->id()) == elem);
159 connected_elements.insert(elem);
160 }
161 }
162
163 // The GhostingFunctors won't be telling us about the elements from
164 // pid; we need to add those ourselves.
165 for (; elem_it != elem_end; ++elem_it)
166 connected_elements.insert(*elem_it);
167 }
168
169
connect_children(const MeshBase & mesh,MeshBase::const_element_iterator elem_it,MeshBase::const_element_iterator elem_end,std::set<const Elem *,CompareElemIdsByLevel> & connected_elements)170 void connect_children(const MeshBase & mesh,
171 MeshBase::const_element_iterator elem_it,
172 MeshBase::const_element_iterator elem_end,
173 std::set<const Elem *, CompareElemIdsByLevel> & connected_elements)
174 {
175 // None of these parameters are used when !LIBMESH_ENABLE_AMR.
176 libmesh_ignore(mesh, elem_it, elem_end, connected_elements);
177
178 #ifdef LIBMESH_ENABLE_AMR
179 // Our XdrIO output needs inactive local elements to not have any
180 // remote_elem children. Let's make sure that doesn't happen.
181 //
182 for (const auto & elem : as_range(elem_it, elem_end))
183 {
184 if (elem->has_children())
185 for (auto & child : elem->child_ref_range())
186 if (&child != remote_elem)
187 connected_elements.insert(&child);
188 }
189 #endif // LIBMESH_ENABLE_AMR
190 }
191
192
connect_families(std::set<const Elem *,CompareElemIdsByLevel> & connected_elements)193 void connect_families(std::set<const Elem *, CompareElemIdsByLevel> & connected_elements)
194 {
195 // This parameter is not used when !LIBMESH_ENABLE_AMR.
196 libmesh_ignore(connected_elements);
197
198 #ifdef LIBMESH_ENABLE_AMR
199
200 // Because our set is sorted by ascending level, we can traverse it
201 // in reverse order, adding parents as we go, and end up with all
202 // ancestors added. This is safe for std::set where insert doesn't
203 // invalidate iterators.
204 //
205 // This only works because we do *not* cache
206 // connected_elements.rend(), whose value can change when we insert
207 // elements which are sorted before the original rend.
208 //
209 // We're also going to get subactive descendents here, when any
210 // exist. We're iterating in the wrong direction to do that
211 // non-recursively, so we'll cop out and rely on total_family_tree.
212 // Iterating backwards does mean that we won't be querying the newly
213 // inserted subactive elements redundantly.
214
215 std::set<const Elem *, CompareElemIdsByLevel>::reverse_iterator
216 elem_rit = connected_elements.rbegin();
217
218 for (; elem_rit != connected_elements.rend(); ++elem_rit)
219 {
220 const Elem * elem = *elem_rit;
221 libmesh_assert(elem);
222 const Elem * parent = elem->parent();
223
224 // We let ghosting functors worry about only active elements,
225 // but the remote processor needs all its semilocal elements'
226 // ancestors and active semilocal elements' descendants too.
227 if (parent)
228 connected_elements.insert (parent);
229
230 if (elem->active() && elem->has_children())
231 {
232 std::vector<const Elem *> subactive_family;
233 elem->total_family_tree(subactive_family);
234 for (const auto & f : subactive_family)
235 {
236 libmesh_assert(f != remote_elem);
237 connected_elements.insert(f);
238 }
239 }
240 }
241
242 # ifdef DEBUG
243 // Let's be paranoid and make sure that all our ancestors
244 // really did get inserted. I screwed this up the first time
245 // by caching rend, and I can easily imagine screwing it up in
246 // the future by changing containers.
247 for (const auto & elem : connected_elements)
248 {
249 libmesh_assert(elem);
250 const Elem * parent = elem->parent();
251 if (parent)
252 libmesh_assert(connected_elements.count(parent));
253 }
254 # endif // DEBUG
255
256 #endif // LIBMESH_ENABLE_AMR
257 }
258
259
reconnect_nodes(const std::set<const Elem *,CompareElemIdsByLevel> & connected_elements,std::set<const Node * > & connected_nodes)260 void reconnect_nodes (const std::set<const Elem *, CompareElemIdsByLevel> & connected_elements,
261 std::set<const Node *> & connected_nodes)
262 {
263 // We're done using the nodes list for element decisions; now
264 // let's reuse it for nodes of the elements we've decided on.
265 connected_nodes.clear();
266
267 for (const auto & elem : connected_elements)
268 for (auto & n : elem->node_ref_range())
269 connected_nodes.insert(&n);
270 }
271
272
273
274
275 // ------------------------------------------------------------
276 // MeshCommunication class members
clear()277 void MeshCommunication::clear ()
278 {
279 // _neighboring_processors.clear();
280 }
281
282
283
284 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
285 // ------------------------------------------------------------
redistribute(DistributedMesh &,bool)286 void MeshCommunication::redistribute (DistributedMesh &, bool) const
287 {
288 // no MPI == one processor, no redistribution
289 return;
290 }
291
292 #else
293 // ------------------------------------------------------------
redistribute(DistributedMesh & mesh,bool newly_coarsened_only)294 void MeshCommunication::redistribute (DistributedMesh & mesh,
295 bool newly_coarsened_only) const
296 {
297 // This method will be called after a new partitioning has been
298 // assigned to the elements. This partitioning was defined in
299 // terms of the active elements, and "trickled down" to the
300 // parents and nodes as to be consistent.
301 //
302 // The point is that the entire concept of local elements is
303 // kinda shaky in this method. Elements which were previously
304 // local may now be assigned to other processors, so we need to
305 // send those off. Similarly, we need to accept elements from
306 // other processors.
307
308 // This method is also useful in the more limited case of
309 // post-coarsening redistribution: if elements are only ghosting
310 // neighbors of their active elements, but adaptive coarsening
311 // causes an inactive element to become active, then we may need a
312 // copy of that inactive element's neighbors.
313
314 // The approach is as follows:
315 // (1) send all relevant elements we have stored to their proper homes
316 // (2) receive elements from all processors, watching for duplicates
317 // (3) deleting all nonlocal elements elements
318 // (4) obtaining required ghost elements from neighboring processors
319 libmesh_parallel_only(mesh.comm());
320 libmesh_assert (!mesh.is_serial());
321 libmesh_assert (MeshTools::n_elem(mesh.unpartitioned_elements_begin(),
322 mesh.unpartitioned_elements_end()) == 0);
323
324 LOG_SCOPE("redistribute()", "MeshCommunication");
325
326 // Get a few unique message tags to use in communications; we'll
327 // default to some numbers around pi*1000
328 Parallel::MessageTag
329 nodestag = mesh.comm().get_unique_tag(3141),
330 elemstag = mesh.comm().get_unique_tag(3142);
331
332 // Figure out how many nodes and elements we have which are assigned to each
333 // processor. send_n_nodes_and_elem_per_proc contains the number of nodes/elements
334 // we will be sending to each processor, recv_n_nodes_and_elem_per_proc contains
335 // the number of nodes/elements we will be receiving from each processor.
336 // Format:
337 // send_n_nodes_and_elem_per_proc[2*pid+0] = number of nodes to send to pid
338 // send_n_nodes_and_elem_per_proc[2*pid+1] = number of elements to send to pid
339 std::vector<dof_id_type> send_n_nodes_and_elem_per_proc(2*mesh.n_processors(), 0);
340
341 std::vector<Parallel::Request>
342 node_send_requests, element_send_requests;
343
344 // We're going to sort elements-to-send by pid in one pass, to avoid
345 // sending predicated iterators through the whole mesh N_p times
346 std::unordered_map<processor_id_type, std::vector<Elem *>> send_to_pid;
347
348 const MeshBase::const_element_iterator send_elems_begin =
349 #ifdef LIBMESH_ENABLE_AMR
350 newly_coarsened_only ?
351 mesh.flagged_elements_begin(Elem::JUST_COARSENED) :
352 #endif
353 mesh.active_elements_begin();
354
355 const MeshBase::const_element_iterator send_elems_end =
356 #ifdef LIBMESH_ENABLE_AMR
357 newly_coarsened_only ?
358 mesh.flagged_elements_end(Elem::JUST_COARSENED) :
359 #endif
360 mesh.active_elements_end();
361
362 for (auto & elem : as_range(send_elems_begin, send_elems_end))
363 send_to_pid[elem->processor_id()].push_back(elem);
364
365 // If we don't have any just-coarsened elements to send to a
366 // pid, then there won't be any nodes or any elements pulled
367 // in by ghosting either, and we're done with this pid.
368 for (auto pair : send_to_pid)
369 {
370 const processor_id_type pid = pair.first;
371 // don't send to ourselves!!
372 if (pid == mesh.processor_id())
373 continue;
374
375 // Build up a list of nodes and elements to send to processor pid.
376 // We will certainly send all the elements assigned to this processor,
377 // but we will also ship off any elements which are required
378 // to be ghosted and any nodes which are used by any of the
379 // above.
380
381 const auto & p_elements = pair.second;
382 libmesh_assert(!p_elements.empty());
383
384 Elem * const * elempp = p_elements.data();
385 Elem * const * elemend = elempp + p_elements.size();
386
387 #ifndef LIBMESH_ENABLE_AMR
388 // This parameter is not used when !LIBMESH_ENABLE_AMR.
389 libmesh_ignore(newly_coarsened_only);
390 libmesh_assert(!newly_coarsened_only);
391 #endif
392
393 MeshBase::const_element_iterator elem_it =
394 MeshBase::const_element_iterator
395 (elempp, elemend, Predicates::NotNull<Elem * const *>());
396
397 const MeshBase::const_element_iterator elem_end =
398 MeshBase::const_element_iterator
399 (elemend, elemend, Predicates::NotNull<Elem * const *>());
400
401 std::set<const Elem *, CompareElemIdsByLevel> elements_to_send;
402
403 // See which to-be-ghosted elements we need to send
404 query_ghosting_functors (mesh, pid, elem_it, elem_end,
405 elements_to_send);
406
407 // The inactive elements we need to send should have their
408 // immediate children present.
409 connect_children(mesh, mesh.pid_elements_begin(pid),
410 mesh.pid_elements_end(pid),
411 elements_to_send);
412
413 // The elements we need should have their ancestors and their
414 // subactive children present too.
415 connect_families(elements_to_send);
416
417 std::set<const Node *> connected_nodes;
418 reconnect_nodes(elements_to_send, connected_nodes);
419
420 // the number of nodes we will ship to pid
421 send_n_nodes_and_elem_per_proc[2*pid+0] =
422 cast_int<dof_id_type>(connected_nodes.size());
423
424 // send any nodes off to the destination processor
425 libmesh_assert (!connected_nodes.empty());
426 node_send_requests.push_back(Parallel::request());
427
428 mesh.comm().send_packed_range (pid, &mesh,
429 connected_nodes.begin(),
430 connected_nodes.end(),
431 node_send_requests.back(),
432 nodestag);
433
434 // the number of elements we will send to this processor
435 send_n_nodes_and_elem_per_proc[2*pid+1] =
436 cast_int<dof_id_type>(elements_to_send.size());
437
438 // send the elements off to the destination processor
439 libmesh_assert (!elements_to_send.empty());
440 element_send_requests.push_back(Parallel::request());
441
442 mesh.comm().send_packed_range (pid, &mesh,
443 elements_to_send.begin(),
444 elements_to_send.end(),
445 element_send_requests.back(),
446 elemstag);
447 }
448
449 std::vector<dof_id_type> recv_n_nodes_and_elem_per_proc(send_n_nodes_and_elem_per_proc);
450
451 mesh.comm().alltoall (recv_n_nodes_and_elem_per_proc);
452
453 // In general we will only need to communicate with a subset of the other processors.
454 // I can't immediately think of a case where we will send elements but not nodes, but
455 // these are only bools and we have the information anyway...
456 std::vector<bool>
457 send_node_pair(mesh.n_processors(),false), send_elem_pair(mesh.n_processors(),false),
458 recv_node_pair(mesh.n_processors(),false), recv_elem_pair(mesh.n_processors(),false);
459
460 unsigned int
461 n_send_node_pairs=0, n_send_elem_pairs=0,
462 n_recv_node_pairs=0, n_recv_elem_pairs=0;
463
464 for (auto pid : make_range(mesh.n_processors()))
465 {
466 if (send_n_nodes_and_elem_per_proc[2*pid+0]) // we have nodes to send
467 {
468 send_node_pair[pid] = true;
469 n_send_node_pairs++;
470 }
471
472 if (send_n_nodes_and_elem_per_proc[2*pid+1]) // we have elements to send
473 {
474 send_elem_pair[pid] = true;
475 n_send_elem_pairs++;
476 }
477
478 if (recv_n_nodes_and_elem_per_proc[2*pid+0]) // we have nodes to receive
479 {
480 recv_node_pair[pid] = true;
481 n_recv_node_pairs++;
482 }
483
484 if (recv_n_nodes_and_elem_per_proc[2*pid+1]) // we have elements to receive
485 {
486 recv_elem_pair[pid] = true;
487 n_recv_elem_pairs++;
488 }
489 }
490 libmesh_assert_equal_to (n_send_node_pairs, node_send_requests.size());
491 libmesh_assert_equal_to (n_send_elem_pairs, element_send_requests.size());
492
493 // Receive nodes.
494 // We now know how many processors will be sending us nodes.
495 for (unsigned int node_comm_step=0; node_comm_step<n_recv_node_pairs; node_comm_step++)
496 // but we don't necessarily want to impose an ordering, so
497 // just process whatever message is available next.
498 mesh.comm().receive_packed_range (Parallel::any_source,
499 &mesh,
500 mesh_inserter_iterator<Node>(mesh),
501 (Node**)nullptr,
502 nodestag);
503
504 // Receive elements.
505 // Similarly we know how many processors are sending us elements,
506 // but we don't really care in what order we receive them.
507 for (unsigned int elem_comm_step=0; elem_comm_step<n_recv_elem_pairs; elem_comm_step++)
508 mesh.comm().receive_packed_range (Parallel::any_source,
509 &mesh,
510 mesh_inserter_iterator<Elem>(mesh),
511 (Elem**)nullptr,
512 elemstag);
513
514 // Wait for all sends to complete
515 Parallel::wait (node_send_requests);
516 Parallel::wait (element_send_requests);
517
518 // Check on the redistribution consistency
519 #ifdef DEBUG
520 MeshTools::libmesh_assert_equal_n_systems(mesh);
521
522 MeshTools::libmesh_assert_valid_refinement_tree(mesh);
523 #endif
524
525 // If we had a point locator, it's invalid now that there are new
526 // elements it can't locate.
527 mesh.clear_point_locator();
528
529 // We now have all elements and nodes redistributed; our ghosting
530 // functors should be ready to redistribute and/or recompute any
531 // cached data they use too.
532 for (auto & gf : as_range(mesh.ghosting_functors_begin(), mesh.ghosting_functors_end()))
533 gf->redistribute();
534 }
535 #endif // LIBMESH_HAVE_MPI
536
537
538
539 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
540 // ------------------------------------------------------------
gather_neighboring_elements(DistributedMesh &)541 void MeshCommunication::gather_neighboring_elements (DistributedMesh &) const
542 {
543 // no MPI == one processor, no need for this method...
544 return;
545 }
546 #else
547 // ------------------------------------------------------------
gather_neighboring_elements(DistributedMesh & mesh)548 void MeshCommunication::gather_neighboring_elements (DistributedMesh & mesh) const
549 {
550 // Don't need to do anything if there is
551 // only one processor.
552 if (mesh.n_processors() == 1)
553 return;
554
555 // This function must be run on all processors at once
556 libmesh_parallel_only(mesh.comm());
557
558 LOG_SCOPE("gather_neighboring_elements()", "MeshCommunication");
559
560 //------------------------------------------------------------------
561 // The purpose of this function is to provide neighbor data structure
562 // consistency for a parallel, distributed mesh. In libMesh we require
563 // that each local element have access to a full set of valid face
564 // neighbors. In some cases this requires us to store "ghost elements" -
565 // elements that belong to other processors but we store to provide
566 // data structure consistency. Also, it is assumed that any element
567 // with a nullptr neighbor resides on a physical domain boundary. So,
568 // even our "ghost elements" must have non-nullptr neighbors. To handle
569 // this the concept of "RemoteElem" is used - a special construct which
570 // is used to denote that an element has a face neighbor, but we do
571 // not actually store detailed information about that neighbor. This
572 // is required to prevent data structure explosion.
573 //
574 // So when this method is called we should have only local elements.
575 // These local elements will then find neighbors among the local
576 // element set. After this is completed, any element with a nullptr
577 // neighbor has either (i) a face on the physical boundary of the mesh,
578 // or (ii) a neighboring element which lives on a remote processor.
579 // To handle case (ii), we communicate the global node indices connected
580 // to all such faces to our neighboring processors. They then send us
581 // all their elements with a nullptr neighbor that are connected to any
582 // of the nodes in our list.
583 //------------------------------------------------------------------
584
585 // Let's begin with finding consistent neighbor data information
586 // for all the elements we currently have. We'll use a clean
587 // slate here - clear any existing information, including RemoteElem's.
588 mesh.find_neighbors (/* reset_remote_elements = */ true,
589 /* reset_current_list = */ true);
590
591 // Get a unique message tag to use in communications
592 Parallel::MessageTag
593 element_neighbors_tag = mesh.comm().get_unique_tag();
594
595 // Now any element with a nullptr neighbor either
596 // (i) lives on the physical domain boundary, or
597 // (ii) lives on an inter-processor boundary.
598 // We will now gather all the elements from adjacent processors
599 // which are of the same state, which should address all the type (ii)
600 // elements.
601
602 // A list of all the processors which *may* contain neighboring elements.
603 // (for development simplicity, just make this the identity map)
604 std::vector<processor_id_type> adjacent_processors;
605 for (auto pid : make_range(mesh.n_processors()))
606 if (pid != mesh.processor_id())
607 adjacent_processors.push_back (pid);
608
609
610 const processor_id_type n_adjacent_processors =
611 cast_int<processor_id_type>(adjacent_processors.size());
612
613 //-------------------------------------------------------------------------
614 // Let's build a list of all nodes which live on nullptr-neighbor sides.
615 // For simplicity, we will use a set to build the list, then transfer
616 // it to a vector for communication.
617 std::vector<dof_id_type> my_interface_node_list;
618 std::vector<const Elem *> my_interface_elements;
619 {
620 std::set<dof_id_type> my_interface_node_set;
621
622 // Pull objects out of the loop to reduce heap operations
623 std::unique_ptr<const Elem> side;
624
625 // since parent nodes are a subset of children nodes, this should be sufficient
626 for (const auto & elem : mesh.active_local_element_ptr_range())
627 {
628 libmesh_assert(elem);
629
630 if (elem->on_boundary()) // denotes *any* side has a nullptr neighbor
631 {
632 my_interface_elements.push_back(elem); // add the element, but only once, even
633 // if there are multiple nullptr neighbors
634 for (auto s : elem->side_index_range())
635 if (elem->neighbor_ptr(s) == nullptr)
636 {
637 elem->build_side_ptr(side, s);
638
639 for (auto n : make_range(side->n_vertices()))
640 my_interface_node_set.insert (side->node_id(n));
641 }
642 }
643 }
644
645 my_interface_node_list.reserve (my_interface_node_set.size());
646 my_interface_node_list.insert (my_interface_node_list.end(),
647 my_interface_node_set.begin(),
648 my_interface_node_set.end());
649 }
650
651 // we will now send my_interface_node_list to all of the adjacent processors.
652 // note that for the time being we will copy the list to a unique buffer for
653 // each processor so that we can use a nonblocking send and not access the
654 // buffer again until the send completes. it is my understanding that the
655 // MPI 2.1 standard seeks to remove this restriction as unnecessary, so in
656 // the future we should change this to send the same buffer to each of the
657 // adjacent processors. - BSK 11/17/2008
658 std::vector<std::vector<dof_id_type>>
659 my_interface_node_xfer_buffers (n_adjacent_processors, my_interface_node_list);
660 std::map<processor_id_type, unsigned char> n_comm_steps;
661
662 std::vector<Parallel::Request> send_requests (3*n_adjacent_processors);
663 unsigned int current_request = 0;
664
665 for (unsigned int comm_step=0; comm_step<n_adjacent_processors; comm_step++)
666 {
667 n_comm_steps[adjacent_processors[comm_step]]=1;
668 mesh.comm().send (adjacent_processors[comm_step],
669 my_interface_node_xfer_buffers[comm_step],
670 send_requests[current_request++],
671 element_neighbors_tag);
672 }
673
674 //-------------------------------------------------------------------------
675 // processor pairings are symmetric - I expect to receive an interface node
676 // list from each processor in adjacent_processors as well!
677 // now we will catch an incoming node list for each of our adjacent processors.
678 //
679 // we are done with the adjacent_processors list - note that it is in general
680 // a superset of the processors we truly share elements with. so let's
681 // clear the superset list, and we will fill it with the true list.
682 adjacent_processors.clear();
683
684 std::vector<dof_id_type> common_interface_node_list;
685
686 // we expect two classes of messages -
687 // (1) incoming interface node lists, to which we will reply with our elements
688 // touching nodes in the list, and
689 // (2) replies from the requests we sent off previously.
690 // (2.a) - nodes
691 // (2.b) - elements
692 // so we expect 3 communications from each adjacent processor.
693 // by structuring the communication in this way we hopefully impose no
694 // order on the handling of the arriving messages. in particular, we
695 // should be able to handle the case where we receive a request and
696 // all replies from processor A before even receiving a request from
697 // processor B.
698
699 for (unsigned int comm_step=0; comm_step<3*n_adjacent_processors; comm_step++)
700 {
701 //------------------------------------------------------------------
702 // catch incoming node list
703 Parallel::Status
704 status(mesh.comm().probe (Parallel::any_source,
705 element_neighbors_tag));
706 const processor_id_type
707 source_pid_idx = cast_int<processor_id_type>(status.source()),
708 dest_pid_idx = source_pid_idx;
709
710 //------------------------------------------------------------------
711 // first time - incoming request
712 if (n_comm_steps[source_pid_idx] == 1)
713 {
714 n_comm_steps[source_pid_idx]++;
715
716 mesh.comm().receive (source_pid_idx,
717 common_interface_node_list,
718 element_neighbors_tag);
719
720 // const std::size_t
721 // their_interface_node_list_size = common_interface_node_list.size();
722
723 // we now have the interface node list from processor source_pid_idx.
724 // now we can find all of our elements which touch any of these nodes
725 // and send copies back to this processor. however, we can make our
726 // search more efficient by first excluding all the nodes in
727 // their list which are not also contained in
728 // my_interface_node_list. we can do this in place as a set
729 // intersection.
730 common_interface_node_list.erase
731 (std::set_intersection (my_interface_node_list.begin(),
732 my_interface_node_list.end(),
733 common_interface_node_list.begin(),
734 common_interface_node_list.end(),
735 common_interface_node_list.begin()),
736 common_interface_node_list.end());
737
738 // if (false)
739 // libMesh::out << "[" << mesh.processor_id() << "] "
740 // << "my_interface_node_list.size()=" << my_interface_node_list.size()
741 // << ", [" << source_pid_idx << "] "
742 // << "their_interface_node_list.size()=" << their_interface_node_list_size
743 // << ", common_interface_node_list.size()=" << common_interface_node_list.size()
744 // << std::endl;
745
746 // Now we need to see which of our elements touch the nodes in the list.
747 // We will certainly send all the active elements which intersect source_pid_idx,
748 // but we will also ship off the other elements in the same family tree
749 // as the active ones for data structure consistency.
750 //
751 // FIXME - shipping full family trees is unnecessary and inefficient.
752 //
753 // We also ship any nodes connected to these elements. Note
754 // some of these nodes and elements may be replicated from
755 // other processors, but that is OK.
756 std::set<const Elem *, CompareElemIdsByLevel> elements_to_send;
757 std::set<const Node *> connected_nodes;
758
759 // Check for quick return?
760 if (common_interface_node_list.empty())
761 {
762 // let's try to be smart here - if we have no nodes in common,
763 // we cannot share elements. so post the messages expected
764 // from us here and go on about our business.
765 // note that even though these are nonblocking sends
766 // they should complete essentially instantly, because
767 // in all cases the send buffers are empty
768 mesh.comm().send_packed_range (dest_pid_idx,
769 &mesh,
770 connected_nodes.begin(),
771 connected_nodes.end(),
772 send_requests[current_request++],
773 element_neighbors_tag);
774
775 mesh.comm().send_packed_range (dest_pid_idx,
776 &mesh,
777 elements_to_send.begin(),
778 elements_to_send.end(),
779 send_requests[current_request++],
780 element_neighbors_tag);
781
782 continue;
783 }
784 // otherwise, this really *is* an adjacent processor.
785 adjacent_processors.push_back(source_pid_idx);
786
787 std::vector<const Elem *> family_tree;
788
789 for (auto & elem : my_interface_elements)
790 {
791 std::size_t n_shared_nodes = 0;
792
793 for (auto n : make_range(elem->n_vertices()))
794 if (std::binary_search (common_interface_node_list.begin(),
795 common_interface_node_list.end(),
796 elem->node_id(n)))
797 {
798 n_shared_nodes++;
799
800 // TBD - how many nodes do we need to share
801 // before we care? certainly 2, but 1? not
802 // sure, so let's play it safe...
803 if (n_shared_nodes > 0) break;
804 }
805
806 if (n_shared_nodes) // share at least one node?
807 {
808 elem = elem->top_parent();
809
810 // avoid a lot of duplicated effort -- if we already have elem
811 // in the set its entire family tree is already in the set.
812 if (!elements_to_send.count(elem))
813 {
814 #ifdef LIBMESH_ENABLE_AMR
815 elem->family_tree(family_tree);
816 #else
817 family_tree.clear();
818 family_tree.push_back(elem);
819 #endif
820 for (const auto & f : family_tree)
821 {
822 elem = f;
823 elements_to_send.insert (elem);
824
825 for (auto & n : elem->node_ref_range())
826 connected_nodes.insert (&n);
827 }
828 }
829 }
830 }
831
832 // The elements_to_send and connected_nodes sets now contain all
833 // the elements and nodes we need to send to this processor.
834 // All that remains is to pack up the objects (along with
835 // any boundary conditions) and send the messages off.
836 {
837 libmesh_assert (connected_nodes.empty() || !elements_to_send.empty());
838 libmesh_assert (!connected_nodes.empty() || elements_to_send.empty());
839
840 // send the nodes off to the destination processor
841 mesh.comm().send_packed_range (dest_pid_idx,
842 &mesh,
843 connected_nodes.begin(),
844 connected_nodes.end(),
845 send_requests[current_request++],
846 element_neighbors_tag);
847
848 // send the elements off to the destination processor
849 mesh.comm().send_packed_range (dest_pid_idx,
850 &mesh,
851 elements_to_send.begin(),
852 elements_to_send.end(),
853 send_requests[current_request++],
854 element_neighbors_tag);
855 }
856 }
857 //------------------------------------------------------------------
858 // second time - reply of nodes
859 else if (n_comm_steps[source_pid_idx] == 2)
860 {
861 n_comm_steps[source_pid_idx]++;
862
863 mesh.comm().receive_packed_range (source_pid_idx,
864 &mesh,
865 mesh_inserter_iterator<Node>(mesh),
866 (Node**)nullptr,
867 element_neighbors_tag);
868 }
869 //------------------------------------------------------------------
870 // third time - reply of elements
871 else if (n_comm_steps[source_pid_idx] == 3)
872 {
873 n_comm_steps[source_pid_idx]++;
874
875 mesh.comm().receive_packed_range (source_pid_idx,
876 &mesh,
877 mesh_inserter_iterator<Elem>(mesh),
878 (Elem**)nullptr,
879 element_neighbors_tag);
880 }
881 //------------------------------------------------------------------
882 // fourth time - shouldn't happen
883 else
884 {
885 libMesh::err << "ERROR: unexpected number of replies: "
886 << n_comm_steps[source_pid_idx]
887 << std::endl;
888 }
889 } // done catching & processing replies associated with tag ~ 100,000pi
890
891 // allow any pending requests to complete
892 Parallel::wait (send_requests);
893
894 // If we had a point locator, it's invalid now that there are new
895 // elements it can't locate.
896 mesh.clear_point_locator();
897
898 // We can now find neighbor information for the interfaces between
899 // local elements and ghost elements.
900 mesh.find_neighbors (/* reset_remote_elements = */ true,
901 /* reset_current_list = */ false);
902
903 // Ghost elements may not have correct remote_elem neighbor links,
904 // and we may not be able to locally infer correct neighbor links to
905 // remote elements. So we synchronize ghost element neighbor links.
906 SyncNeighbors nsync(mesh);
907
908 Parallel::sync_dofobject_data_by_id
909 (mesh.comm(), mesh.elements_begin(), mesh.elements_end(), nsync);
910 }
911 #endif // LIBMESH_HAVE_MPI
912
913
914 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
915 // ------------------------------------------------------------
send_coarse_ghosts(MeshBase &)916 void MeshCommunication::send_coarse_ghosts(MeshBase &) const
917 {
918 // no MPI == one processor, no need for this method...
919 return;
920 }
921 #else
send_coarse_ghosts(MeshBase & mesh)922 void MeshCommunication::send_coarse_ghosts(MeshBase & mesh) const
923 {
924
925 // Don't need to do anything if all processors already ghost all non-local
926 // elements.
927 if (mesh.is_serial())
928 return;
929
930 // This algorithm uses the MeshBase::flagged_pid_elements_begin/end iterators
931 // which are only available when AMR is enabled.
932 #ifndef LIBMESH_ENABLE_AMR
933 libmesh_error_msg("Calling MeshCommunication::send_coarse_ghosts() requires AMR to be enabled. "
934 "Please configure libmesh with --enable-amr.");
935 #else
936 // When we coarsen elements on a DistributedMesh, we make their
937 // parents active. This may increase the ghosting requirements on
938 // the processor which owns the newly-activated parent element. To
939 // ensure ghosting requirements are satisfied, processors which
940 // coarsen an element will send all the associated ghosted elements
941 // to all processors which own any of the coarsened-away-element's
942 // siblings.
943 typedef std::unordered_map<processor_id_type, std::vector<Elem *>> ghost_map;
944 ghost_map coarsening_elements_to_ghost;
945
946 const processor_id_type proc_id = mesh.processor_id();
947 // Look for just-coarsened elements
948 for (auto elem : as_range(mesh.flagged_pid_elements_begin(Elem::COARSEN, proc_id),
949 mesh.flagged_pid_elements_end(Elem::COARSEN, proc_id)))
950 {
951 // If it's flagged for coarsening it had better have a parent
952 libmesh_assert(elem->parent());
953
954 // On a distributed mesh:
955 // If we don't own this element's parent but we do own it, then
956 // there is a chance that we are aware of ghost elements which
957 // the parent's owner needs us to send them.
958 const processor_id_type their_proc_id = elem->parent()->processor_id();
959 if (their_proc_id != proc_id)
960 coarsening_elements_to_ghost[their_proc_id].push_back(elem);
961 }
962
963 const processor_id_type n_proc = mesh.n_processors();
964
965 // Get a few unique message tags to use in communications; we'll
966 // default to some numbers around pi*1000
967 Parallel::MessageTag
968 nodestag = mesh.comm().get_unique_tag(3141),
969 elemstag = mesh.comm().get_unique_tag(3142);
970
971 std::vector<Parallel::Request> send_requests;
972
973 // Using unsigned char instead of bool since it'll get converted for
974 // MPI use later anyway
975 std::vector<unsigned char> send_to_proc(n_proc, 0);
976
977 for (processor_id_type p=0; p != n_proc; ++p)
978 {
979 if (p == proc_id)
980 break;
981
982 // We'll send these asynchronously, but their data will be
983 // packed into Parallel:: buffers so it will be okay when the
984 // original containers are destructed before the sends complete.
985 std::set<const Elem *, CompareElemIdsByLevel> elements_to_send;
986 std::set<const Node *> nodes_to_send;
987
988 const ghost_map::const_iterator it =
989 coarsening_elements_to_ghost.find(p);
990 if (it != coarsening_elements_to_ghost.end())
991 {
992 const std::vector<Elem *> & elems = it->second;
993 libmesh_assert(elems.size());
994
995 // Make some fake element iterators defining this vector of
996 // elements
997 Elem * const * elempp = const_cast<Elem * const *>(elems.data());
998 Elem * const * elemend = elempp+elems.size();
999 const MeshBase::const_element_iterator elem_it =
1000 MeshBase::const_element_iterator(elempp, elemend, Predicates::NotNull<Elem * const *>());
1001 const MeshBase::const_element_iterator elem_end =
1002 MeshBase::const_element_iterator(elemend, elemend, Predicates::NotNull<Elem * const *>());
1003
1004 for (auto & gf : as_range(mesh.ghosting_functors_begin(),
1005 mesh.ghosting_functors_end()))
1006 {
1007 GhostingFunctor::map_type elements_to_ghost;
1008 libmesh_assert(gf);
1009 (*gf)(elem_it, elem_end, p, elements_to_ghost);
1010
1011 // We can ignore the CouplingMatrix in ->second, but we
1012 // need to ghost all the elements in ->first.
1013 for (auto & pr : elements_to_ghost)
1014 {
1015 const Elem * elem = pr.first;
1016 libmesh_assert(elem);
1017 while (elem)
1018 {
1019 libmesh_assert(elem != remote_elem);
1020 elements_to_send.insert(elem);
1021 for (auto & n : elem->node_ref_range())
1022 nodes_to_send.insert(&n);
1023 elem = elem->parent();
1024 }
1025 }
1026 }
1027
1028 send_requests.push_back(Parallel::request());
1029
1030 mesh.comm().send_packed_range (p, &mesh,
1031 nodes_to_send.begin(),
1032 nodes_to_send.end(),
1033 send_requests.back(),
1034 nodestag);
1035
1036 send_requests.push_back(Parallel::request());
1037
1038 send_to_proc[p] = 1; // true
1039
1040 mesh.comm().send_packed_range (p, &mesh,
1041 elements_to_send.begin(),
1042 elements_to_send.end(),
1043 send_requests.back(),
1044 elemstag);
1045 }
1046 }
1047
1048 // Find out how many other processors are sending us elements+nodes.
1049 std::vector<unsigned char> recv_from_proc(send_to_proc);
1050 mesh.comm().alltoall(recv_from_proc);
1051
1052 const processor_id_type n_receives = cast_int<processor_id_type>
1053 (std::count(recv_from_proc.begin(), recv_from_proc.end(), 1));
1054
1055 // Receive nodes first since elements will need to attach to them
1056 for (processor_id_type recv_i = 0; recv_i != n_receives; ++recv_i)
1057 {
1058 mesh.comm().receive_packed_range
1059 (Parallel::any_source,
1060 &mesh,
1061 mesh_inserter_iterator<Node>(mesh),
1062 (Node**)nullptr,
1063 nodestag);
1064 }
1065
1066 for (processor_id_type recv_i = 0; recv_i != n_receives; ++recv_i)
1067 {
1068 mesh.comm().receive_packed_range
1069 (Parallel::any_source,
1070 &mesh,
1071 mesh_inserter_iterator<Elem>(mesh),
1072 (Elem**)nullptr,
1073 elemstag);
1074 }
1075
1076 // Wait for all sends to complete
1077 Parallel::wait (send_requests);
1078 #endif // LIBMESH_ENABLE_AMR
1079 }
1080
1081 #endif // LIBMESH_HAVE_MPI
1082
1083 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
1084 // ------------------------------------------------------------
broadcast(MeshBase &)1085 void MeshCommunication::broadcast (MeshBase &) const
1086 {
1087 // no MPI == one processor, no need for this method...
1088 return;
1089 }
1090 #else
1091 // ------------------------------------------------------------
broadcast(MeshBase & mesh)1092 void MeshCommunication::broadcast (MeshBase & mesh) const
1093 {
1094 // Don't need to do anything if there is
1095 // only one processor.
1096 if (mesh.n_processors() == 1)
1097 return;
1098
1099 // This function must be run on all processors at once
1100 libmesh_parallel_only(mesh.comm());
1101
1102 LOG_SCOPE("broadcast()", "MeshCommunication");
1103
1104 // Explicitly clear the mesh on all but processor 0.
1105 if (mesh.processor_id() != 0)
1106 mesh.clear();
1107
1108 // We may have set extra data only on processor 0 in a read()
1109 mesh.comm().broadcast(mesh._node_integer_names);
1110 mesh.comm().broadcast(mesh._node_integer_default_values);
1111 mesh.comm().broadcast(mesh._elem_integer_names);
1112 mesh.comm().broadcast(mesh._elem_integer_default_values);
1113
1114 // We may have set mapping data only on processor 0 in a read()
1115 unsigned char map_type = mesh.default_mapping_type();
1116 unsigned char map_data = mesh.default_mapping_data();
1117 mesh.comm().broadcast(map_type);
1118 mesh.comm().broadcast(map_data);
1119 mesh.set_default_mapping_type(ElemMappingType(map_type));
1120 mesh.set_default_mapping_data(map_data);
1121
1122 // Broadcast nodes
1123 mesh.comm().broadcast_packed_range(&mesh,
1124 mesh.nodes_begin(),
1125 mesh.nodes_end(),
1126 &mesh,
1127 mesh_inserter_iterator<Node>(mesh));
1128
1129 // Broadcast elements from coarsest to finest, so that child
1130 // elements will see their parents already in place.
1131 //
1132 // When restarting from a checkpoint, we may have elements which are
1133 // assigned to a processor but which have not yet been sent to that
1134 // processor, so we need to use a paranoid n_levels() count and not
1135 // the usual fast algorithm.
1136 const unsigned int n_levels = MeshTools::paranoid_n_levels(mesh);
1137
1138 for (unsigned int l=0; l != n_levels; ++l)
1139 mesh.comm().broadcast_packed_range(&mesh,
1140 mesh.level_elements_begin(l),
1141 mesh.level_elements_end(l),
1142 &mesh,
1143 mesh_inserter_iterator<Elem>(mesh));
1144
1145 // Make sure mesh_dimension and elem_dimensions are consistent.
1146 mesh.cache_elem_dims();
1147
1148 // Broadcast all of the named entity information
1149 mesh.comm().broadcast(mesh.set_subdomain_name_map());
1150 mesh.comm().broadcast(mesh.get_boundary_info().set_sideset_name_map());
1151 mesh.comm().broadcast(mesh.get_boundary_info().set_nodeset_name_map());
1152
1153 // If we had a point locator, it's invalid now that there are new
1154 // elements it can't locate.
1155 mesh.clear_point_locator();
1156
1157 libmesh_assert (mesh.comm().verify(mesh.n_elem()));
1158 libmesh_assert (mesh.comm().verify(mesh.n_nodes()));
1159
1160 #ifdef DEBUG
1161 MeshTools::libmesh_assert_valid_procids<Elem>(mesh);
1162 MeshTools::libmesh_assert_valid_procids<Node>(mesh);
1163 #endif
1164 }
1165 #endif // LIBMESH_HAVE_MPI
1166
1167
1168
1169 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
1170 // ------------------------------------------------------------
gather(const processor_id_type,DistributedMesh &)1171 void MeshCommunication::gather (const processor_id_type, DistributedMesh &) const
1172 {
1173 // no MPI == one processor, no need for this method...
1174 return;
1175 }
1176 #else
1177 // ------------------------------------------------------------
gather(const processor_id_type root_id,DistributedMesh & mesh)1178 void MeshCommunication::gather (const processor_id_type root_id, DistributedMesh & mesh) const
1179 {
1180 // Check for quick return
1181 if (mesh.n_processors() == 1)
1182 return;
1183
1184 // This function must be run on all processors at once
1185 libmesh_parallel_only(mesh.comm());
1186
1187 LOG_SCOPE("(all)gather()", "MeshCommunication");
1188
1189 // Ensure we don't build too big a buffer at once
1190 static const std::size_t approx_total_buffer_size = 1e8;
1191 const std::size_t approx_each_buffer_size =
1192 approx_total_buffer_size / mesh.comm().size();
1193
1194 (root_id == DofObject::invalid_processor_id) ?
1195
1196 mesh.comm().allgather_packed_range (&mesh,
1197 mesh.nodes_begin(),
1198 mesh.nodes_end(),
1199 mesh_inserter_iterator<Node>(mesh),
1200 approx_each_buffer_size) :
1201
1202 mesh.comm().gather_packed_range (root_id,
1203 &mesh,
1204 mesh.nodes_begin(),
1205 mesh.nodes_end(),
1206 mesh_inserter_iterator<Node>(mesh),
1207 approx_each_buffer_size);
1208
1209 // Gather elements from coarsest to finest, so that child
1210 // elements will see their parents already in place.
1211 const unsigned int n_levels = MeshTools::n_levels(mesh);
1212
1213 for (unsigned int l=0; l != n_levels; ++l)
1214 (root_id == DofObject::invalid_processor_id) ?
1215
1216 mesh.comm().allgather_packed_range (&mesh,
1217 mesh.level_elements_begin(l),
1218 mesh.level_elements_end(l),
1219 mesh_inserter_iterator<Elem>(mesh),
1220 approx_each_buffer_size) :
1221
1222 mesh.comm().gather_packed_range (root_id,
1223 &mesh,
1224 mesh.level_elements_begin(l),
1225 mesh.level_elements_end(l),
1226 mesh_inserter_iterator<Elem>(mesh),
1227 approx_each_buffer_size);
1228
1229 // If we had a point locator, it's invalid now that there are new
1230 // elements it can't locate.
1231 mesh.clear_point_locator();
1232
1233 // If we are doing an allgather(), perform sanity check on the result.
1234 if (root_id == DofObject::invalid_processor_id)
1235 {
1236 libmesh_assert (mesh.comm().verify(mesh.n_elem()));
1237 libmesh_assert (mesh.comm().verify(mesh.n_nodes()));
1238 }
1239
1240 // Inform new elements of their neighbors,
1241 // while resetting all remote_elem links on
1242 // the ranks which did the gather.
1243 if (mesh.allow_find_neighbors())
1244 mesh.find_neighbors(root_id == DofObject::invalid_processor_id ||
1245 root_id == mesh.processor_id());
1246
1247 // All done, but let's make sure it's done correctly
1248
1249 #ifdef DEBUG
1250 MeshTools::libmesh_assert_valid_boundary_ids(mesh);
1251 #endif
1252 }
1253 #endif // LIBMESH_HAVE_MPI
1254
1255
1256
1257 // Functor for make_elems_parallel_consistent and
1258 // make_node_ids_parallel_consistent
1259 namespace {
1260
1261 struct SyncIds
1262 {
1263 typedef dof_id_type datum;
1264 typedef void (MeshBase::*renumber_obj)(dof_id_type, dof_id_type);
1265
SyncIdsSyncIds1266 SyncIds(MeshBase & _mesh, renumber_obj _renumberer) :
1267 mesh(_mesh),
1268 renumber(_renumberer) {}
1269
1270 MeshBase & mesh;
1271 renumber_obj renumber;
1272 // renumber_obj & renumber;
1273
1274 // Find the id of each requested DofObject -
1275 // Parallel::sync_* already did the work for us
gather_dataSyncIds1276 void gather_data (const std::vector<dof_id_type> & ids,
1277 std::vector<datum> & ids_out) const
1278 {
1279 ids_out = ids;
1280 }
1281
act_on_dataSyncIds1282 void act_on_data (const std::vector<dof_id_type> & old_ids,
1283 const std::vector<datum> & new_ids) const
1284 {
1285 for (auto i : index_range(old_ids))
1286 if (old_ids[i] != new_ids[i])
1287 (mesh.*renumber)(old_ids[i], new_ids[i]);
1288 }
1289 };
1290
1291
1292 struct SyncNodeIds
1293 {
1294 typedef dof_id_type datum;
1295
SyncNodeIdsSyncNodeIds1296 SyncNodeIds(MeshBase & _mesh) :
1297 mesh(_mesh) {}
1298
1299 MeshBase & mesh;
1300
1301 // We only know a Node id() is definitive if we own the Node or if
1302 // we're told it's definitive. We keep track of the latter cases by
1303 // putting definitively id'd ghost nodes into this set.
1304 typedef std::unordered_set<const Node *> uset_type;
1305 uset_type definitive_ids;
1306
1307 // We should never be told two different definitive ids for the same
1308 // node, but let's check on that in debug mode.
1309 #ifdef DEBUG
1310 typedef std::unordered_map<dof_id_type, dof_id_type> umap_type;
1311 umap_type definitive_renumbering;
1312 #endif
1313
1314 // Find the id of each requested DofObject -
1315 // Parallel::sync_* already tried to do the work for us, but we can
1316 // only say the result is definitive if we own the DofObject or if
1317 // we were given the definitive result from another processor.
gather_dataSyncNodeIds1318 void gather_data (const std::vector<dof_id_type> & ids,
1319 std::vector<datum> & ids_out) const
1320 {
1321 ids_out.clear();
1322 ids_out.resize(ids.size(), DofObject::invalid_id);
1323
1324 for (auto i : index_range(ids))
1325 {
1326 const dof_id_type id = ids[i];
1327 const Node * node = mesh.query_node_ptr(id);
1328 if (node && (node->processor_id() == mesh.processor_id() ||
1329 definitive_ids.count(node)))
1330 ids_out[i] = id;
1331 }
1332 }
1333
act_on_dataSyncNodeIds1334 bool act_on_data (const std::vector<dof_id_type> & old_ids,
1335 const std::vector<datum> & new_ids)
1336 {
1337 bool data_changed = false;
1338 for (auto i : index_range(old_ids))
1339 {
1340 const dof_id_type new_id = new_ids[i];
1341
1342 const dof_id_type old_id = old_ids[i];
1343
1344 Node * node = mesh.query_node_ptr(old_id);
1345
1346 // If we can't find the node we were asking about, another
1347 // processor must have already given us the definitive id
1348 // for it
1349 if (!node)
1350 {
1351 // But let's check anyway in debug mode
1352 #ifdef DEBUG
1353 libmesh_assert
1354 (definitive_renumbering.count(old_id));
1355 libmesh_assert_equal_to
1356 (new_id, definitive_renumbering[old_id]);
1357 #endif
1358 continue;
1359 }
1360
1361 // If we asked for an id but there's no definitive id ready
1362 // for us yet, then we can't quit trying to sync yet.
1363 if (new_id == DofObject::invalid_id)
1364 {
1365 // But we might have gotten a definitive id from a
1366 // different request
1367 if (!definitive_ids.count(mesh.node_ptr(old_id)))
1368 data_changed = true;
1369 }
1370 else
1371 {
1372 if (node->processor_id() != mesh.processor_id())
1373 definitive_ids.insert(node);
1374 if (old_id != new_id)
1375 {
1376 #ifdef DEBUG
1377 libmesh_assert
1378 (!definitive_renumbering.count(old_id));
1379 definitive_renumbering[old_id] = new_id;
1380 #endif
1381 mesh.renumber_node(old_id, new_id);
1382 data_changed = true;
1383 }
1384 }
1385 }
1386 return data_changed;
1387 }
1388 };
1389
1390
1391 #ifdef LIBMESH_ENABLE_AMR
1392 struct SyncPLevels
1393 {
1394 typedef unsigned char datum;
1395
SyncPLevelsSyncPLevels1396 SyncPLevels(MeshBase & _mesh) :
1397 mesh(_mesh) {}
1398
1399 MeshBase & mesh;
1400
1401 // Find the p_level of each requested Elem
gather_dataSyncPLevels1402 void gather_data (const std::vector<dof_id_type> & ids,
1403 std::vector<datum> & ids_out) const
1404 {
1405 ids_out.reserve(ids.size());
1406
1407 for (const auto & id : ids)
1408 {
1409 Elem & elem = mesh.elem_ref(id);
1410 ids_out.push_back(cast_int<unsigned char>(elem.p_level()));
1411 }
1412 }
1413
act_on_dataSyncPLevels1414 void act_on_data (const std::vector<dof_id_type> & old_ids,
1415 const std::vector<datum> & new_p_levels) const
1416 {
1417 for (auto i : index_range(old_ids))
1418 {
1419 Elem & elem = mesh.elem_ref(old_ids[i]);
1420 elem.set_p_level(new_p_levels[i]);
1421 }
1422 }
1423 };
1424 #endif // LIBMESH_ENABLE_AMR
1425
1426
1427 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1428 template <typename DofObjSubclass>
1429 struct SyncUniqueIds
1430 {
1431 typedef unique_id_type datum;
1432 typedef DofObjSubclass* (MeshBase::*query_obj)(const dof_id_type);
1433
SyncUniqueIdsSyncUniqueIds1434 SyncUniqueIds(MeshBase &_mesh, query_obj _querier) :
1435 mesh(_mesh),
1436 query(_querier) {}
1437
1438 MeshBase & mesh;
1439 query_obj query;
1440
1441 // Find the id of each requested DofObject -
1442 // Parallel::sync_* already did the work for us
gather_dataSyncUniqueIds1443 void gather_data (const std::vector<dof_id_type> & ids,
1444 std::vector<datum> & ids_out) const
1445 {
1446 ids_out.reserve(ids.size());
1447
1448 for (const auto & id : ids)
1449 {
1450 DofObjSubclass * d = (mesh.*query)(id);
1451 libmesh_assert(d);
1452 ids_out.push_back(d->unique_id());
1453 }
1454 }
1455
act_on_dataSyncUniqueIds1456 void act_on_data (const std::vector<dof_id_type> & ids,
1457 const std::vector<datum> & unique_ids) const
1458 {
1459 for (auto i : index_range(ids))
1460 {
1461 DofObjSubclass * d = (mesh.*query)(ids[i]);
1462 libmesh_assert(d);
1463 d->set_unique_id(unique_ids[i]);
1464 }
1465 }
1466 };
1467 #endif // LIBMESH_ENABLE_UNIQUE_ID
1468 }
1469
1470
1471
1472 // ------------------------------------------------------------
make_node_ids_parallel_consistent(MeshBase & mesh)1473 void MeshCommunication::make_node_ids_parallel_consistent (MeshBase & mesh)
1474 {
1475 // This function must be run on all processors at once
1476 libmesh_parallel_only(mesh.comm());
1477
1478 // We need to agree on which processor owns every node, but we can't
1479 // easily assert that here because we don't currently agree on which
1480 // id every node has, and some of our temporary ids on unrelated
1481 // nodes will "overlap".
1482 //#ifdef DEBUG
1483 // MeshTools::libmesh_assert_parallel_consistent_procids<Node> (mesh);
1484 //#endif // DEBUG
1485
1486 LOG_SCOPE ("make_node_ids_parallel_consistent()", "MeshCommunication");
1487
1488 SyncNodeIds syncids(mesh);
1489 Parallel::sync_node_data_by_element_id
1490 (mesh, mesh.elements_begin(), mesh.elements_end(),
1491 Parallel::SyncEverything(), Parallel::SyncEverything(), syncids);
1492
1493 // At this point, with both ids and processor ids synced, we can
1494 // finally check for topological consistency of node processor ids.
1495 #ifdef DEBUG
1496 MeshTools::libmesh_assert_topology_consistent_procids<Node> (mesh);
1497 #endif
1498 }
1499
1500
1501
make_node_unique_ids_parallel_consistent(MeshBase & mesh)1502 void MeshCommunication::make_node_unique_ids_parallel_consistent (MeshBase & mesh)
1503 {
1504 // Avoid unused variable warnings if unique ids aren't enabled.
1505 libmesh_ignore(mesh);
1506
1507 // This function must be run on all processors at once
1508 libmesh_parallel_only(mesh.comm());
1509
1510 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1511 LOG_SCOPE ("make_node_unique_ids_parallel_consistent()", "MeshCommunication");
1512
1513 SyncUniqueIds<Node> syncuniqueids(mesh, &MeshBase::query_node_ptr);
1514 Parallel::sync_dofobject_data_by_id(mesh.comm(),
1515 mesh.nodes_begin(),
1516 mesh.nodes_end(),
1517 syncuniqueids);
1518
1519 #endif
1520 }
1521
1522
1523
1524
1525 // ------------------------------------------------------------
make_elems_parallel_consistent(MeshBase & mesh)1526 void MeshCommunication::make_elems_parallel_consistent(MeshBase & mesh)
1527 {
1528 // This function must be run on all processors at once
1529 libmesh_parallel_only(mesh.comm());
1530
1531 LOG_SCOPE ("make_elems_parallel_consistent()", "MeshCommunication");
1532
1533 SyncIds syncids(mesh, &MeshBase::renumber_elem);
1534 Parallel::sync_element_data_by_parent_id
1535 (mesh, mesh.active_elements_begin(),
1536 mesh.active_elements_end(), syncids);
1537
1538 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1539 SyncUniqueIds<Elem> syncuniqueids(mesh, &MeshBase::query_elem_ptr);
1540 Parallel::sync_dofobject_data_by_id
1541 (mesh.comm(), mesh.active_elements_begin(),
1542 mesh.active_elements_end(), syncuniqueids);
1543 #endif
1544 }
1545
1546
1547
1548 // ------------------------------------------------------------
1549 #ifdef LIBMESH_ENABLE_AMR
make_p_levels_parallel_consistent(MeshBase & mesh)1550 void MeshCommunication::make_p_levels_parallel_consistent(MeshBase & mesh)
1551 {
1552 // This function must be run on all processors at once
1553 libmesh_parallel_only(mesh.comm());
1554
1555 LOG_SCOPE ("make_p_levels_parallel_consistent()", "MeshCommunication");
1556
1557 SyncPLevels syncplevels(mesh);
1558 Parallel::sync_dofobject_data_by_id
1559 (mesh.comm(), mesh.elements_begin(), mesh.elements_end(),
1560 syncplevels);
1561 }
1562 #endif // LIBMESH_ENABLE_AMR
1563
1564
1565
1566 // Functors for make_node_proc_ids_parallel_consistent
1567 namespace {
1568
1569 struct SyncProcIds
1570 {
1571 typedef processor_id_type datum;
1572
SyncProcIdsSyncProcIds1573 SyncProcIds(MeshBase & _mesh) : mesh(_mesh) {}
1574
1575 MeshBase & mesh;
1576
1577 // ------------------------------------------------------------
gather_dataSyncProcIds1578 void gather_data (const std::vector<dof_id_type> & ids,
1579 std::vector<datum> & data)
1580 {
1581 // Find the processor id of each requested node
1582 data.resize(ids.size());
1583
1584 for (auto i : index_range(ids))
1585 {
1586 // Look for this point in the mesh
1587 if (ids[i] != DofObject::invalid_id)
1588 {
1589 Node & node = mesh.node_ref(ids[i]);
1590
1591 // Return the node's correct processor id,
1592 data[i] = node.processor_id();
1593 }
1594 else
1595 data[i] = DofObject::invalid_processor_id;
1596 }
1597 }
1598
1599 // ------------------------------------------------------------
act_on_dataSyncProcIds1600 bool act_on_data (const std::vector<dof_id_type> & ids,
1601 const std::vector<datum> proc_ids)
1602 {
1603 bool data_changed = false;
1604
1605 // Set the ghost node processor ids we've now been informed of
1606 for (auto i : index_range(ids))
1607 {
1608 Node & node = mesh.node_ref(ids[i]);
1609
1610 // We may not have ids synched when this synchronization is done, so we
1611 // *can't* use id to load-balance processor id properly; we have to use
1612 // the old heuristic of choosing the smallest valid processor id.
1613 //
1614 // If someone tells us our node processor id is too low, then
1615 // they're wrong. If they tell us our node processor id is
1616 // too high, then we're wrong.
1617 if (node.processor_id() > proc_ids[i])
1618 {
1619 data_changed = true;
1620 node.processor_id() = proc_ids[i];
1621 }
1622 }
1623
1624 return data_changed;
1625 }
1626 };
1627
1628
1629 struct ElemNodesMaybeNew
1630 {
ElemNodesMaybeNewElemNodesMaybeNew1631 ElemNodesMaybeNew() {}
1632
operatorElemNodesMaybeNew1633 bool operator() (const Elem * elem) const
1634 {
1635 // If this element was just refined then it may have new nodes we
1636 // need to work on
1637 #ifdef LIBMESH_ENABLE_AMR
1638 if (elem->refinement_flag() == Elem::JUST_REFINED)
1639 return true;
1640 #endif
1641
1642 // If this element has remote_elem neighbors then there may have
1643 // been refinement of those neighbors that affect its nodes'
1644 // processor_id()
1645 for (auto neigh : elem->neighbor_ptr_range())
1646 if (neigh == remote_elem)
1647 return true;
1648 return false;
1649 }
1650 };
1651
1652
1653 struct NodeWasNew
1654 {
NodeWasNewNodeWasNew1655 NodeWasNew(const MeshBase & mesh)
1656 {
1657 for (const auto & node : mesh.node_ptr_range())
1658 if (node->processor_id() == DofObject::invalid_processor_id)
1659 was_new.insert(node);
1660 }
1661
operatorNodeWasNew1662 bool operator() (const Elem * elem, unsigned int local_node_num) const
1663 {
1664 if (was_new.count(elem->node_ptr(local_node_num)))
1665 return true;
1666 return false;
1667 }
1668
1669 std::unordered_set<const Node *> was_new;
1670 };
1671
1672 }
1673
1674
1675
1676 // ------------------------------------------------------------
make_node_proc_ids_parallel_consistent(MeshBase & mesh)1677 void MeshCommunication::make_node_proc_ids_parallel_consistent(MeshBase & mesh)
1678 {
1679 LOG_SCOPE ("make_node_proc_ids_parallel_consistent()", "MeshCommunication");
1680
1681 // This function must be run on all processors at once
1682 libmesh_parallel_only(mesh.comm());
1683
1684 // When this function is called, each section of a parallelized mesh
1685 // should be in the following state:
1686 //
1687 // All nodes should have the exact same physical location on every
1688 // processor where they exist.
1689 //
1690 // Local nodes should have unique authoritative ids,
1691 // and processor ids consistent with all processors which own
1692 // an element touching them.
1693 //
1694 // Ghost nodes touching local elements should have processor ids
1695 // consistent with all processors which own an element touching
1696 // them.
1697 SyncProcIds sync(mesh);
1698 Parallel::sync_node_data_by_element_id
1699 (mesh, mesh.elements_begin(), mesh.elements_end(),
1700 Parallel::SyncEverything(), Parallel::SyncEverything(), sync);
1701 }
1702
1703
1704
1705 // ------------------------------------------------------------
make_new_node_proc_ids_parallel_consistent(MeshBase & mesh)1706 void MeshCommunication::make_new_node_proc_ids_parallel_consistent(MeshBase & mesh)
1707 {
1708 LOG_SCOPE ("make_new_node_proc_ids_parallel_consistent()", "MeshCommunication");
1709
1710 // This function must be run on all processors at once
1711 libmesh_parallel_only(mesh.comm());
1712
1713 // When this function is called, each section of a parallelized mesh
1714 // should be in the following state:
1715 //
1716 // Local nodes should have unique authoritative ids,
1717 // and new nodes should be unpartitioned.
1718 //
1719 // New ghost nodes touching local elements should be unpartitioned.
1720
1721 // We may not have consistent processor ids for new nodes (because a
1722 // node may be old and partitioned on one processor but new and
1723 // unpartitioned on another) when we start
1724 #ifdef DEBUG
1725 MeshTools::libmesh_assert_parallel_consistent_procids<Node>(mesh);
1726 // MeshTools::libmesh_assert_parallel_consistent_new_node_procids(mesh);
1727 #endif
1728
1729 // We have two kinds of new nodes. *NEW* nodes are unpartitioned on
1730 // all processors: we need to use a id-independent (i.e. dumb)
1731 // heuristic to partition them. But "new" nodes are newly created
1732 // on some processors (when ghost elements are refined) yet
1733 // correspond to existing nodes on other processors: we need to use
1734 // the existing processor id for them.
1735 //
1736 // A node which is "new" on one processor will be associated with at
1737 // least one ghost element, and we can just query that ghost
1738 // element's owner to find out the correct processor id.
1739
1740 auto node_unpartitioned =
1741 [](const Elem * elem, unsigned int local_node_num)
1742 { return elem->node_ref(local_node_num).processor_id() ==
1743 DofObject::invalid_processor_id; };
1744
1745 SyncProcIds sync(mesh);
1746
1747 sync_node_data_by_element_id_once
1748 (mesh, mesh.not_local_elements_begin(),
1749 mesh.not_local_elements_end(), Parallel::SyncEverything(),
1750 node_unpartitioned, sync);
1751
1752 // Nodes should now be unpartitioned iff they are truly new; those
1753 // are the *only* nodes we will touch.
1754 #ifdef DEBUG
1755 MeshTools::libmesh_assert_parallel_consistent_new_node_procids(mesh);
1756 #endif
1757
1758 NodeWasNew node_was_new(mesh);
1759
1760 // Set the lowest processor id we can on truly new nodes
1761 for (auto & elem : mesh.element_ptr_range())
1762 for (auto & node : elem->node_ref_range())
1763 if (node_was_new.was_new.count(&node))
1764 {
1765 processor_id_type & pid = node.processor_id();
1766 pid = std::min(pid, elem->processor_id());
1767 }
1768
1769 // Then finally see if other processors have a lower option
1770 Parallel::sync_node_data_by_element_id
1771 (mesh, mesh.elements_begin(), mesh.elements_end(),
1772 ElemNodesMaybeNew(), node_was_new, sync);
1773
1774 // We should have consistent processor ids when we're done.
1775 #ifdef DEBUG
1776 MeshTools::libmesh_assert_parallel_consistent_procids<Node>(mesh);
1777 MeshTools::libmesh_assert_parallel_consistent_new_node_procids(mesh);
1778 #endif
1779 }
1780
1781
1782
1783 // ------------------------------------------------------------
make_nodes_parallel_consistent(MeshBase & mesh)1784 void MeshCommunication::make_nodes_parallel_consistent (MeshBase & mesh)
1785 {
1786 // This function must be run on all processors at once
1787 libmesh_parallel_only(mesh.comm());
1788
1789 // When this function is called, each section of a parallelized mesh
1790 // should be in the following state:
1791 //
1792 // All nodes should have the exact same physical location on every
1793 // processor where they exist.
1794 //
1795 // Local nodes should have unique authoritative ids,
1796 // and processor ids consistent with all processors which own
1797 // an element touching them.
1798 //
1799 // Ghost nodes touching local elements should have processor ids
1800 // consistent with all processors which own an element touching
1801 // them.
1802 //
1803 // Ghost nodes should have ids which are either already correct
1804 // or which are in the "unpartitioned" id space.
1805
1806 // First, let's sync up processor ids. Some of these processor ids
1807 // may be "wrong" from coarsening, but they're right in the sense
1808 // that they'll tell us who has the authoritative dofobject ids for
1809 // each node.
1810
1811 this->make_node_proc_ids_parallel_consistent(mesh);
1812
1813 // Second, sync up dofobject ids.
1814 this->make_node_ids_parallel_consistent(mesh);
1815
1816 // Third, sync up dofobject unique_ids if applicable.
1817 this->make_node_unique_ids_parallel_consistent(mesh);
1818
1819 // Finally, correct the processor ids to make DofMap happy
1820 MeshTools::correct_node_proc_ids(mesh);
1821 }
1822
1823
1824
1825 // ------------------------------------------------------------
make_new_nodes_parallel_consistent(MeshBase & mesh)1826 void MeshCommunication::make_new_nodes_parallel_consistent (MeshBase & mesh)
1827 {
1828 // This function must be run on all processors at once
1829 libmesh_parallel_only(mesh.comm());
1830
1831 // When this function is called, each section of a parallelized mesh
1832 // should be in the following state:
1833 //
1834 // All nodes should have the exact same physical location on every
1835 // processor where they exist.
1836 //
1837 // Local nodes should have unique authoritative ids,
1838 // and new nodes should be unpartitioned.
1839 //
1840 // New ghost nodes touching local elements should be unpartitioned.
1841 //
1842 // New ghost nodes should have ids which are either already correct
1843 // or which are in the "unpartitioned" id space.
1844 //
1845 // Non-new nodes should have correct ids and processor ids already.
1846
1847 // First, let's sync up new nodes' processor ids.
1848
1849 this->make_new_node_proc_ids_parallel_consistent(mesh);
1850
1851 // Second, sync up dofobject ids.
1852 this->make_node_ids_parallel_consistent(mesh);
1853
1854 // Third, sync up dofobject unique_ids if applicable.
1855 this->make_node_unique_ids_parallel_consistent(mesh);
1856
1857 // Finally, correct the processor ids to make DofMap happy
1858 MeshTools::correct_node_proc_ids(mesh);
1859 }
1860
1861
1862
1863 // ------------------------------------------------------------
1864 void
delete_remote_elements(DistributedMesh & mesh,const std::set<Elem * > & extra_ghost_elem_ids)1865 MeshCommunication::delete_remote_elements (DistributedMesh & mesh,
1866 const std::set<Elem *> & extra_ghost_elem_ids) const
1867 {
1868 // The mesh should know it's about to be parallelized
1869 libmesh_assert (!mesh.is_serial());
1870
1871 LOG_SCOPE("delete_remote_elements()", "MeshCommunication");
1872
1873 #ifdef DEBUG
1874 // We expect maximum ids to be in sync so we can use them to size
1875 // vectors
1876 libmesh_assert(mesh.comm().verify(mesh.max_node_id()));
1877 libmesh_assert(mesh.comm().verify(mesh.max_elem_id()));
1878 const dof_id_type par_max_node_id = mesh.parallel_max_node_id();
1879 const dof_id_type par_max_elem_id = mesh.parallel_max_elem_id();
1880 libmesh_assert_equal_to (par_max_node_id, mesh.max_node_id());
1881 libmesh_assert_equal_to (par_max_elem_id, mesh.max_elem_id());
1882 #endif
1883
1884 std::set<const Elem *, CompareElemIdsByLevel> elements_to_keep;
1885
1886 // Don't delete elements that we were explicitly told not to
1887 for (const auto & elem : extra_ghost_elem_ids)
1888 {
1889 std::vector<const Elem *> active_family;
1890 #ifdef LIBMESH_ENABLE_AMR
1891 if (!elem->subactive())
1892 elem->active_family_tree(active_family);
1893 else
1894 #endif
1895 active_family.push_back(elem);
1896
1897 for (const auto & f : active_family)
1898 elements_to_keep.insert(f);
1899 }
1900
1901 // See which elements we still need to keep ghosted, given that
1902 // we're keeping local and unpartitioned elements.
1903 query_ghosting_functors
1904 (mesh, mesh.processor_id(),
1905 mesh.active_pid_elements_begin(mesh.processor_id()),
1906 mesh.active_pid_elements_end(mesh.processor_id()),
1907 elements_to_keep);
1908 query_ghosting_functors
1909 (mesh, DofObject::invalid_processor_id,
1910 mesh.active_pid_elements_begin(DofObject::invalid_processor_id),
1911 mesh.active_pid_elements_end(DofObject::invalid_processor_id),
1912 elements_to_keep);
1913
1914 // The inactive elements we need to send should have their
1915 // immediate children present.
1916 connect_children(mesh, mesh.pid_elements_begin(mesh.processor_id()),
1917 mesh.pid_elements_end(mesh.processor_id()),
1918 elements_to_keep);
1919 connect_children(mesh,
1920 mesh.pid_elements_begin(DofObject::invalid_processor_id),
1921 mesh.pid_elements_end(DofObject::invalid_processor_id),
1922 elements_to_keep);
1923
1924 // The elements we need should have their ancestors and their
1925 // subactive children present too.
1926 connect_families(elements_to_keep);
1927
1928 // Don't delete nodes that our semilocal elements need
1929 std::set<const Node *> connected_nodes;
1930 reconnect_nodes(elements_to_keep, connected_nodes);
1931
1932 // Delete all the elements we have no reason to save,
1933 // starting with the most refined so that the mesh
1934 // is valid at all intermediate steps
1935 unsigned int n_levels = MeshTools::n_levels(mesh);
1936
1937 for (int l = n_levels - 1; l >= 0; --l)
1938 for (auto & elem : as_range(mesh.level_elements_begin(l),
1939 mesh.level_elements_end(l)))
1940 {
1941 libmesh_assert (elem);
1942 // Make sure we don't leave any invalid pointers
1943 const bool keep_me = elements_to_keep.count(elem);
1944
1945 if (!keep_me)
1946 elem->make_links_to_me_remote();
1947
1948 // delete_elem doesn't currently invalidate element
1949 // iterators... that had better not change
1950 if (!keep_me)
1951 mesh.delete_elem(elem);
1952 }
1953
1954 // Delete all the nodes we have no reason to save
1955 for (auto & node : mesh.node_ptr_range())
1956 {
1957 libmesh_assert(node);
1958 if (!connected_nodes.count(node))
1959 mesh.delete_node(node);
1960 }
1961
1962 // If we had a point locator, it's invalid now that some of the
1963 // elements it pointed to have been deleted.
1964 mesh.clear_point_locator();
1965
1966 // Much of our boundary info may have been for now-remote parts of
1967 // the mesh, in which case we don't want to keep local copies.
1968 mesh.get_boundary_info().regenerate_id_sets();
1969
1970 // We now have all remote elements and nodes deleted; our ghosting
1971 // functors should be ready to delete any now-redundant cached data
1972 // they use too.
1973 for (auto & gf : as_range(mesh.ghosting_functors_begin(), mesh.ghosting_functors_end()))
1974 gf->delete_remote_elements();
1975
1976 #ifdef DEBUG
1977 MeshTools::libmesh_assert_valid_refinement_tree(mesh);
1978 #endif
1979 }
1980
1981 } // namespace libMesh
1982