1 /**
2  * MOAB, a Mesh-Oriented datABase, is a software component for creating,
3  * storing and accessing finite element mesh data.
4  *
5  * Copyright 2004 Sandia Corporation.  Under the terms of Contract
6  * DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
7  * retains certain rights in this software.
8  *
9  * This library is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  */
15 
16 
17 #ifndef MOAB_PARALLEL_COMM_HPP
18 #define MOAB_PARALLEL_COMM_HPP
19 
20 #include "moab/Forward.hpp"
21 #include "moab/Interface.hpp"
22 #include "moab/Range.hpp"
23 #include "moab/ProcConfig.hpp"
24 #include <map>
25 #include <set>
26 #include <vector>
27 #include <iostream>
28 #include <fstream>
29 #include <assert.h>
30 #include <stdlib.h>
31 #include "math.h"
32 #include "moab/TupleList.hpp"
33 
34 namespace moab {
35 
36   class SequenceManager;
37   class Error;
38   template <typename KeyType, typename ValType, ValType NullVal> class RangeMap;
39   typedef RangeMap<EntityHandle, EntityHandle, 0> HandleMap;
40   class ParallelMergeMesh;
41   class DebugOutput;
42   class SharedSetData;
43 
44 #define MAX_SHARING_PROCS 64
45 
46   /**
47    * \brief Parallel communications in MOAB
48    * \author Tim Tautges
49    *
50    *  This class implements methods to communicate mesh between processors
51    *
52    */
53   class ParallelComm
54   {
55   public:
56 
57     friend class ParallelMergeMesh;
58 
59     // ==================================
60     // \section CONSTRUCTORS/DESTRUCTORS/PCOMM MANAGEMENT
61     // ==================================
62 
63     //! constructor
64     ParallelComm(Interface *impl,
65                  MPI_Comm comm,
66                  int* pcomm_id_out = 0);
67 
68     //! constructor taking packed buffer, for testing
69     ParallelComm(Interface *impl,
70                  std::vector<unsigned char> &tmp_buff,
71                  MPI_Comm comm,
72                  int* pcomm_id_out = 0);
73 
74     //! Get ID used to reference this PCOMM instance
get_id() const75     int get_id() const { return pcommID; }
76 
77     //! get the indexed pcomm object from the interface
78     static ParallelComm *get_pcomm(Interface *impl, const int index);
79 
80     //! Get ParallelComm instance associated with partition handle
81     //! Will create ParallelComm instance if a) one does not already
82     //! exist and b) a valid value for MPI_Comm is passed.
83     static ParallelComm *get_pcomm( Interface* impl,
84                                     EntityHandle partitioning,
85                                     const MPI_Comm* comm = 0 );
86 
87     static ErrorCode get_all_pcomm( Interface* impl,
88                                     std::vector<ParallelComm*>& list );
89 
90     //! destructor
91     ~ParallelComm();
92 
93     static unsigned char PROC_SHARED, PROC_OWNER;
94 
95     // ==================================
96     // \section GLOBAL IDS
97     // ==================================
98 
99     //! assign a global id space, for largest-dimension or all entities (and
100     //! in either case for vertices too)
101     //!\param owned_only If true, do not get global IDs for non-owned entities
102     //!                  from remote processors.
103     ErrorCode assign_global_ids(EntityHandle this_set,
104                                 const int dimension,
105                                 const int start_id = 1,
106                                 const bool largest_dim_only = true,
107                                 const bool parallel = true,
108                                 const bool owned_only = false);
109 
110   //! assign a global id space, for largest-dimension or all entities (and
111   //! in either case for vertices too)
112   ErrorCode assign_global_ids( Range entities[],
113                                const int dimension,
114                                const int start_id,
115                                const bool parallel,
116                                const bool owned_only);
117 
118     //! check for global ids; based only on tag handle being there or not;
119     //! if it's not there, create them for the specified dimensions
120     //!\param owned_only If true, do not get global IDs for non-owned entities
121     //!                  from remote processors.
122     ErrorCode check_global_ids(EntityHandle this_set,
123                                const int dimension,
124                                const int start_id = 1,
125                                const bool largest_dim_only = true,
126                                const bool parallel = true,
127                                const bool owned_only = false);
128 
129     // ==================================
130     // \section HIGH-LEVEL COMMUNICATION (send/recv/bcast/scatter ents, exchange tags)
131     // ==================================
132 
133     /** \brief send entities to another processor, optionally waiting until it's done
134      *
135      * Send entities to another processor, with adjs, sets, and tags.
136      * If store_remote_handles is true, this call receives back handles assigned to
137      * entities sent to destination processor and stores them in sharedh_tag or
138      * sharedhs_tag.
139      * \param to_proc Destination processor
140      * \param orig_ents Entities requested to send
141      * \param adjs If true, send adjacencies for equiv entities (currently unsupported)
142      * \param tags If true, send tag values for all tags assigned to entities
143      * \param store_remote_handles If true, also recv message with handles on destination processor (currently unsupported)
144      * \param final_ents Range containing all entities sent
145      * \param incoming keep track if any messages are coming to this processor (newly added)
146      * \param wait_all If true, wait until all messages received/sent complete
147      */
148     ErrorCode send_entities(const int to_proc,
149 			    Range &orig_ents,
150 			    const bool adjs,
151 			    const bool tags,
152 			    const bool store_remote_handles,
153 			    const bool is_iface,
154 			    Range &final_ents,
155 			    int &incoming1,
156 			    int &incoming2, // newly added
157 			    TupleList& entprocs, // newly added
158 			    std::vector<MPI_Request> &recv_remoteh_reqs, // newly added
159 			    bool wait_all = true);
160 
161   ErrorCode send_entities(std::vector<unsigned int>& send_procs,
162                           std::vector<Range*>& send_ents,
163                           int& incoming1, int& incoming2,
164                           const bool store_remote_handles);
165 
166     /** \brief Receive entities from another processor, optionally waiting until it's done
167      *
168      * Receive entities from another processor, with adjs, sets, and tags.
169      * If store_remote_handles is true, this call sends back handles assigned to
170      * the entities received.
171      * \param from_proc Source processor
172      * \param store_remote_handles If true, send message with new entity handles to source processor (currently unsupported)
173      * \param final_ents Range containing all entities received
174      * \param incoming keep track if any messages are coming to this processor (newly added)
175      * \param wait_all If true, wait until all messages received/sent complete
176      */
177     ErrorCode recv_entities(const int from_proc,
178 			    const bool store_remote_handles,
179 			    const bool is_iface,
180 			    Range &final_ents,
181 			    int& incomming1,
182 			    int& incoming2,
183 			    std::vector<std::vector<EntityHandle> > &L1hloc,
184 			    std::vector<std::vector<EntityHandle> > &L1hrem,
185 			    std::vector<std::vector<int> > &L1p,
186 			    std::vector<EntityHandle> &L2hloc,
187 			    std::vector<EntityHandle> &L2hrem,
188 			    std::vector<unsigned int> &L2p,
189 			    std::vector<MPI_Request> &recv_remoteh_reqs,
190 			    bool wait_all = true);
191 
192   ErrorCode recv_entities(std::set<unsigned int>& recv_procs,
193                           int incoming1, int incoming2,
194                           const bool store_remote_handles,
195                           const bool migrate = false);
196 
197     /** \brief Receive messages from another processor in while loop
198      *
199      * Receive messages from another processor.
200      * \param from_proc Source processor
201      * \param store_remote_handles If true, send message with new entity handles to source processor (currently unsupported)
202      * \param final_ents Range containing all entities received
203      * \param incoming keep track if any messages are coming to this processor (newly added)
204      */
205     ErrorCode recv_messages(const int from_proc,
206 			    const bool store_remote_handles,
207 			    const bool is_iface,
208 			    Range &final_ents,
209 			    int& incoming1,
210 			    int& incoming2,
211 			    std::vector<std::vector<EntityHandle> > &L1hloc,
212 			    std::vector<std::vector<EntityHandle> > &L1hrem,
213 			    std::vector<std::vector<int> > &L1p,
214 			    std::vector<EntityHandle> &L2hloc,
215 			    std::vector<EntityHandle> &L2hrem,
216 			    std::vector<unsigned int> &L2p,
217 			    std::vector<MPI_Request> &recv_remoteh_reqs);
218 
219     ErrorCode recv_remote_handle_messages(const int from_proc,
220 					  int& incoming2,
221 					  std::vector<EntityHandle> &L2hloc,
222 					  std::vector<EntityHandle> &L2hrem,
223 					  std::vector<unsigned int> &L2p,
224 					  std::vector<MPI_Request> &recv_remoteh_reqs);
225 
226     /** \brief Exchange ghost cells with neighboring procs
227      * Neighboring processors are those sharing an interface
228      * with this processor.  All entities of dimension ghost_dim
229      * within num_layers of interface, measured going through bridge_dim,
230      * are exchanged.  See MeshTopoUtil::get_bridge_adjacencies for description
231      * of bridge adjacencies.  If wait_all is false and store_remote_handles
232      * is true, MPI_Request objects are available in the sendReqs[2*MAX_SHARING_PROCS]
233      * member array, with inactive requests marked as MPI_REQUEST_NULL.  If
234      * store_remote_handles or wait_all is false, this function returns after
235      * all entities have been received and processed.
236      * \param ghost_dim Dimension of ghost entities to be exchanged
237      * \param bridge_dim Dimension of entities used to measure layers from interface
238      * \param num_layers Number of layers of ghosts requested
239      * \param addl_ents Dimension of additional adjacent entities to exchange with ghosts, 0 if none
240      * \param store_remote_handles If true, send message with new entity handles to source processor
241      * \param wait_all If true, function does not return until all send buffers
242      *       are cleared.
243      */
244 
245     ErrorCode exchange_ghost_cells(int ghost_dim, int bridge_dim,
246 				   int num_layers, int addl_ents,
247                                    bool store_remote_handles,
248                                    bool wait_all = true,
249                                    EntityHandle *file_set = NULL);
250 
251     /** \brief Static version of exchange_ghost_cells, exchanging info through
252      * buffers rather than messages
253      */
254     static ErrorCode exchange_ghost_cells(ParallelComm **pc,
255                                           unsigned int num_procs,
256                                           int ghost_dim, int bridge_dim,
257 					  int num_layers, int addl_ents,
258                                           bool store_remote_handles,
259                                           EntityHandle *file_sets = NULL);
260 
261     /** \brief Post "MPI_Irecv" before meshing
262      * \param exchange_procs processor vector exchanged
263      */
264     ErrorCode post_irecv(std::vector<unsigned int>& exchange_procs);
265 
266   ErrorCode post_irecv(std::vector<unsigned int>& shared_procs,
267                        std::set<unsigned int>& recv_procs);
268 
269     /** \brief Exchange owned mesh for input mesh entities and sets
270      * This function should be called collectively over the communicator for this ParallelComm.
271      * If this version is called, all shared exchanged entities should have a value for this
272      * tag (or the tag should have a default value).
273      * \param exchange_procs processor vector exchanged
274      * \param exchange_ents exchanged entities for each processors
275      * \param migrate if the owner if entities are changed or not
276      */
277     ErrorCode exchange_owned_meshs(std::vector<unsigned int>& exchange_procs,
278 				   std::vector<Range*>& exchange_ents,
279 				   std::vector<MPI_Request>& recv_ent_reqs,
280 				   std::vector<MPI_Request>& recv_remoteh_reqs,
281 				   bool store_remote_handles,
282 				   bool wait_all = true,
283 				   bool migrate = false,
284 				   int dim = 0);
285 
286     /** \brief Exchange owned mesh for input mesh entities and sets
287      * This function is called twice by exchange_owned_meshs to exchange entities before sets
288      * \param migrate if the owner if entities are changed or not
289      */
290     ErrorCode exchange_owned_mesh(std::vector<unsigned int>& exchange_procs,
291 				  std::vector<Range*>& exchange_ents,
292 				  std::vector<MPI_Request>& recv_ent_reqs,
293 				  std::vector<MPI_Request>& recv_remoteh_reqs,
294 				  const bool recv_posted,
295 				  bool store_remote_handles,
296 				  bool wait_all,
297 				  bool migrate = false);
298 
299     /** \brief Exchange tags for all shared and ghosted entities
300      * This function should be called collectively over the communicator for this ParallelComm.
301      * If this version is called, all ghosted/shared entities should have a value for this
302      * tag (or the tag should have a default value).  If the entities vector is empty, all shared entities
303      * participate in the exchange.  If a proc has no owned entities this function must still be called
304      * since it is collective.
305      * \param src_tags Vector of tag handles to be exchanged
306      * \param dst_tags Tag handles to store the tags on the non-owning procs
307      * \param entities Entities for which tags are exchanged
308      */
309     ErrorCode exchange_tags( const std::vector<Tag> &src_tags,
310                              const  std::vector<Tag> &dst_tags,
311                              const Range &entities);
312 
313     /** \brief Exchange tags for all shared and ghosted entities
314      * This function should be called collectively over the communicator for this ParallelComm.
315      * If the entities vector is empty, all shared entities
316      * participate in the exchange.  If a proc has no owned entities this function must still be called
317      * since it is collective.
318      * \param tag_name Name of tag to be exchanged
319      * \param entities Entities for which tags are exchanged
320      */
321     ErrorCode exchange_tags( const char *tag_name,
322                              const Range &entities);
323 
324     /** \brief Exchange tags for all shared and ghosted entities
325      * This function should be called collectively over the communicator for this ParallelComm.
326      * If the entities vector is empty, all shared entities
327      * participate in the exchange.  If a proc has no owned entities this function must still be called
328      * since it is collective.
329      * \param tagh Handle of tag to be exchanged
330      * \param entities Entities for which tags are exchanged
331      */
332     ErrorCode exchange_tags( Tag tagh,
333                              const Range &entities);
334 
335     /** \brief Perform data reduction operation for all shared and ghosted entities
336      * This function should be called collectively over the communicator for this ParallelComm.
337      * If this version is called, all ghosted/shared entities should have a value for this
338      * tag (or the tag should have a default value).  Operation is any MPI_Op, with result stored
339      * in destination tag.
340      * \param src_tags Vector of tag handles to be reduced
341      * \param dst_tags Vector of tag handles in which the answer will be stored
342      * \param mpi_op Operation type
343      * \param entities Entities on which reduction will be made; if empty, operates on all shared
344      *                 entities
345      */
346     ErrorCode reduce_tags( const std::vector<Tag> &src_tags,
347                            const  std::vector<Tag> &dst_tags,
348                            const MPI_Op mpi_op,
349                            const Range &entities);
350 
351     /** \brief Perform data reduction operation for all shared and ghosted entities
352      * Same as std::vector variant except for one tag specified by name
353      * \param tag_name Name of tag to be reduced
354      * \param mpi_op Operation type
355      * \param entities Entities on which reduction will be made; if empty, operates on all shared
356      *                 entities
357      */
358     ErrorCode reduce_tags( const char *tag_name,
359                            const MPI_Op mpi_op,
360                            const Range &entities);
361 
362     /** \brief Perform data reduction operation for all shared and ghosted entities
363      * Same as std::vector variant except for one tag specified by handle
364      * \param tag_name Name of tag to be reduced
365      * \param mpi_op Operation type
366      * \param entities Entities on which reduction will be made; if empty, operates on all shared
367      *                 entities
368      */
369     ErrorCode reduce_tags( Tag tag_handle,
370                            const MPI_Op mpi_op,
371                            const Range &entities);
372 
373     /** \brief Broadcast all entities resident on from_proc to other processors
374      * This function assumes remote handles are *not* being stored, since (usually)
375      * every processor will know about the whole mesh.
376      * \param from_proc Processor having the mesh to be broadcast
377      * \param entities On return, the entities sent or received in this call
378      * \param adjacencies If true, adjacencies are sent for equiv entities (currently unsupported)
379      * \param tags If true, all non-default-valued tags are sent for sent entities
380      */
381     ErrorCode broadcast_entities(const int from_proc,
382                                  Range& entities,
383                                  const bool adjacencies = false,
384                                  const bool tags = true );
385 
386     /** \brief Scatter entities on from_proc to other processors
387      * This function assumes remote handles are *not* being stored, since (usually)
388      * every processor will know about the whole mesh.
389      * \param from_proc Processor having the mesh to be broadcast
390      * \param entities On return, the entities sent or received in this call
391      * \param adjacencies If true, adjacencies are sent for equiv entities (currently unsupported)
392      * \param tags If true, all non-default-valued tags are sent for sent entities
393      */
394     ErrorCode scatter_entities(const int from_proc,
395 			       std::vector<Range> &entities,
396 			       const bool adjacencies = false,
397 			       const bool tags = true);
398 
399 
400     /////////////////////////////////////////////////////////////////////////////////
401     // Send and Receive routines for a sequence of entities: use case UMR
402     /////////////////////////////////////////////////////////////////////////////////
403 
404     /** \brief Send and receives data from a set of processors
405       */
406     ErrorCode send_recv_entities(std::vector<int> &send_procs, std::vector<std::vector<int> > &msgsizes, std::vector<std::vector<EntityHandle> > &senddata, std::vector<std::vector<EntityHandle> > &recvdata);
407 
408     ErrorCode update_remote_data(EntityHandle entity, std::vector<int> &procs, std::vector<EntityHandle> &handles);
409 
410     ErrorCode get_remote_handles(EntityHandle *local_vec, EntityHandle *rem_vec, int num_ents, int to_proc);
411 
412     /////////////////////////////////////////////////////////////////////////////////
413 
414     // ==================================
415     // \section INITIALIZATION OF PARALLEL DATA (resolve_shared_ents, etc.)
416     // ==================================
417 
418     /** \brief Resolve shared entities between processors
419      *
420      * Resolve shared entities between processors for entities in proc_ents,
421      * by comparing global id tag values on vertices on skin of elements in
422      * proc_ents.  Shared entities are assigned a tag that's either
423      * PARALLEL_SHARED_PROC_TAG_NAME, which is 1 integer in length, or
424      * PARALLEL_SHARED_PROCS_TAG_NAME, whose length depends on the maximum
425      * number of sharing processors.  Values in these tags denote the ranks
426      * of sharing processors, and the list ends with the value -1.
427      *
428      * If shared_dim is input as -1 or not input, a value one less than the
429      * maximum dimension of entities in proc_ents is used.
430      *
431      * \param proc_ents Entities for which to resolve shared entities
432      * \param shared_dim Maximum dimension of shared entities to look for
433      */
434     ErrorCode resolve_shared_ents(EntityHandle this_set,
435                                   Range &proc_ents,
436                                   int resolve_dim = -1,
437                                   int shared_dim = -1,
438                                   Range *skin_ents = NULL,
439                                   const Tag* id_tag = 0);
440 
441     /** \brief Resolve shared entities between processors
442      *
443      * Same as resolve_shared_ents(Range&), except works for
444      * all entities in instance of dimension dim.
445      *
446      * If shared_dim is input as -1 or not input, a value one less than the
447      * maximum dimension of entities is used.
448 
449      * \param dim Dimension of entities in the partition
450      * \param shared_dim Maximum dimension of shared entities to look for
451      */
452     ErrorCode resolve_shared_ents(EntityHandle this_set,
453                                   int resolve_dim = 3,
454                                   int shared_dim = -1,
455                                   const Tag* id_tag = 0);
456 
457     static ErrorCode resolve_shared_ents(ParallelComm **pc,
458                                          const unsigned int np,
459                                          EntityHandle this_set,
460                                          const int to_dim);
461 
462     /** Remove shared sets.
463      *
464      * Generates list of candidate sets using from those (directly)
465      * contained in passed set and passes them to the other version
466      * of \c resolve_shared_sets.
467      *\param this_set  Set directly containing candidate sets (e.g. file set)
468      *\param id_tag    Tag containing global IDs for entity sets.
469      */
470 
471     ErrorCode resolve_shared_sets( EntityHandle this_set, const Tag* id_tag = 0 );
472 
473     /** Remove shared sets.
474      *
475      * Use values of id_tag to match sets across processes and populate
476      * sharing data for sets.
477      *\param candidate_sets  Sets to consider as potentially shared.
478      *\param id_tag    Tag containing global IDs for entity sets.
479      */
480     ErrorCode resolve_shared_sets( Range& candidate_sets, Tag id_tag );
481 
482     /** extend shared sets with ghost entities
483      * After ghosting, ghost entities do not have yet information about
484      * the material set, partition set, Neumann or Dirichlet set they could
485      * belong to
486      * This method will assign ghosted entities to the those special entity sets
487      * In some case we might even have to create those sets, if they do not exist yet on
488      * the local processor
489      *
490      * The special entity sets all have an unique identifier, in a form of an integer
491      * tag to the set.
492      * The shared sets data is not used, because we do not use the geometry sets, as they are
493      * not uniquely identified
494      *
495      *
496      * \param file_set : file set used per application
497      *
498      */
499     ErrorCode augment_default_sets_with_ghosts( EntityHandle file_set);
500     // ==================================
501     // \section GET PARALLEL DATA (shared/owned/iface entities, etc.)
502     // ==================================
503 
504     /** \brief Get parallel status of an entity
505      * Returns the parallel status of an entity
506      *
507      * \param entity The entity being queried
508      * \param pstatus_val Parallel status of the entity
509      */
510     ErrorCode get_pstatus(EntityHandle entity,
511 			  unsigned char &pstatus_val);
512 
513     /** \brief Get entities with the given pstatus bit(s) set
514      * Returns any entities whose pstatus tag value v satisfies (v & pstatus_val)
515      *
516      * \param dim Dimension of entities to be returned, or -1 if any
517      * \param pstatus_val pstatus value of desired entities
518      * \param pstatus_ents Entities returned from function
519      */
520     ErrorCode get_pstatus_entities(int dim,
521                                    unsigned char pstatus_val,
522                                    Range &pstatus_ents);
523 
524     /** \brief Return the rank of the entity owner
525      */
526     ErrorCode get_owner(EntityHandle entity, int &owner);
527 
528     /** \brief Return the owner processor and handle of a given entity
529      */
530     ErrorCode get_owner_handle(EntityHandle entity,
531                                int &owner,
532                                EntityHandle &handle);
533 
534     /** \brief Get the shared processors/handles for an entity
535      * Get the shared processors/handles for an entity.  Arrays must
536      * be large enough to receive data for all sharing procs.  Does *not* include
537      * this proc if only shared with one other proc.
538      * \param entity Entity being queried
539      * \param ps Pointer to sharing proc data
540      * \param hs Pointer to shared proc handle data
541      * \param pstat Reference to pstatus data returned from this function
542      */
543     ErrorCode get_sharing_data(const EntityHandle entity,
544                                int *ps,
545                                EntityHandle *hs,
546                                unsigned char &pstat,
547                                unsigned int &num_ps);
548 
549     /** \brief Get the shared processors/handles for an entity
550      * Same as other version but with int num_ps
551      * \param entity Entity being queried
552      * \param ps Pointer to sharing proc data
553      * \param hs Pointer to shared proc handle data
554      * \param pstat Reference to pstatus data returned from this function
555      */
556     ErrorCode get_sharing_data(const EntityHandle entity,
557                                int *ps,
558                                EntityHandle *hs,
559                                unsigned char &pstat,
560                                int &num_ps);
561 
562     /** \brief Get the intersection or union of all sharing processors
563      * Get the intersection or union of all sharing processors.  Processor set
564      * is cleared as part of this function.
565      * \param entities Entity list ptr
566      * \param num_entities Number of entities
567      * \param procs Processors returned
568      * \param op Either Interface::UNION or Interface::INTERSECT
569      */
570     ErrorCode get_sharing_data(const EntityHandle *entities,
571                                int num_entities,
572                                std::set<int> &procs,
573                                int op = Interface::INTERSECT);
574 
575     /** \brief Get the intersection or union of all sharing processors
576      * Same as previous variant but with range as input
577      */
578     ErrorCode get_sharing_data(const Range &entities,
579                                std::set<int> &procs,
580                                int op = Interface::INTERSECT);
581 
582     /** \brief Get shared entities of specified dimension
583      * If other_proc is -1, any shared entities are returned.  If dim is -1,
584      * entities of all dimensions on interface are returned.
585      * \param other_proc Rank of processor for which interface entities are requested
586      * \param shared_ents Entities returned from function
587      * \param dim Dimension of interface entities requested
588      * \param iface If true, return only entities on the interface
589      * \param owned_filter If true, return only owned shared entities
590      */
591     ErrorCode get_shared_entities(int other_proc,
592                                   Range &shared_ents,
593                                   int dim = -1,
594                                   const bool iface = false,
595                                   const bool owned_filter = false);
596     /*
597     //! return partition sets; if tag_name is input, gets sets with
598     //! that tag name, otherwise uses PARALLEL_PARTITION tag
599     ErrorCode get_partition_sets(EntityHandle this_set,
600     Range &part_sets,
601     const char *tag_name = NULL);
602     */
603     //! get processors with which this processor shares an interface
604     ErrorCode get_interface_procs(std::set<unsigned int> &iface_procs,
605                                   const bool get_buffs = false);
606 
607     //! get processors with which this processor communicates
608     ErrorCode get_comm_procs(std::set<unsigned int> &procs);
609 
610     // ==================================
611     // \section SHARED SETS
612     // ==================================
613 
614     //! Get array of process IDs sharing a set.  Returns zero
615     //! and passes back NULL if set is not shared.
616     ErrorCode get_entityset_procs( EntityHandle entity_set,
617 				   std::vector<unsigned>& ranks ) const;
618 
619     //! Get rank of the owner of a shared set.
620     //! Returns this proc if set is not shared.
621     //! Optionally returns handle on owning process for shared set.
622     ErrorCode get_entityset_owner( EntityHandle entity_set,
623 				   unsigned& owner_rank,
624 				   EntityHandle* remote_handle = 0 ) const;
625 
626     //! Given set owner and handle on owner, find local set handle
627     ErrorCode get_entityset_local_handle( unsigned owning_rank,
628 					  EntityHandle remote_handle,
629 					  EntityHandle& local_handle ) const;
630 
631     //! Get all shared sets
632     ErrorCode get_shared_sets( Range& result ) const;
633 
634     //! Get ranks of all processes that own at least one set that is
635     //! shared with this process.  Will include the rank of this process
636     //! if this process owns any shared set.
637     ErrorCode get_entityset_owners( std::vector<unsigned>& ranks ) const;
638 
639     //! Get shared sets owned by process with specified rank.
640     ErrorCode get_owned_sets( unsigned owning_rank, Range& sets_out ) const;
641 
642     // ==================================
643     // \section LOW-LEVEL DATA (tags, sets on interface/partition, etc.)
644     // ==================================
645 
646     //! Get proc config for this communication object
proc_config() const647     const ProcConfig &proc_config() const {return procConfig;}
648 
649     //! Get proc config for this communication object
proc_config()650     ProcConfig &proc_config() {return procConfig;}
651 
rank() const652     unsigned rank() const { return proc_config().proc_rank(); }
size() const653     unsigned size() const { return proc_config().proc_size(); }
comm() const654     MPI_Comm comm() const { return proc_config().proc_comm(); }
655 
656     //! return the tags used to indicate shared procs and handles
657     ErrorCode get_shared_proc_tags(Tag &sharedp_tag,
658                                    Tag &sharedps_tag,
659                                    Tag &sharedh_tag,
660                                    Tag &sharedhs_tag,
661                                    Tag &pstatus_tag);
662 
663     //! return partition, interface set ranges
partition_sets()664     Range &partition_sets() {return partitionSets;}
partition_sets() const665     const Range &partition_sets() const {return partitionSets;}
interface_sets()666     Range &interface_sets() {return interfaceSets;}
interface_sets() const667     const Range &interface_sets() const {return interfaceSets;}
668 
669     //! return sharedp tag
670     Tag sharedp_tag();
671 
672     //! return sharedps tag
673     Tag sharedps_tag();
674 
675     //! return sharedh tag
676     Tag sharedh_tag();
677 
678     //! return sharedhs tag
679     Tag sharedhs_tag();
680 
681     //! return pstatus tag
682     Tag pstatus_tag();
683 
684     //! return pcomm tag; static because might not have a pcomm before going
685     //! to look for one on the interface
686     static Tag pcomm_tag(Interface *impl,
687                          bool create_if_missing = true);
688 
689     //! return partitions set tag
690     Tag partition_tag();
part_tag()691     Tag part_tag() { return partition_tag(); }
692 
693     // ==================================
694     // \section DEBUGGING AIDS
695     // ==================================
696 
697     //! print contents of pstatus value in human-readable form
698     void print_pstatus(unsigned char pstat, std::string &ostr);
699 
700     //! print contents of pstatus value in human-readable form to std::cut
701     void print_pstatus(unsigned char pstat);
702 
703     // ==================================
704     // \section IMESHP-RELATED FUNCTIONS
705     // ==================================
706 
707     //! return all the entities in parts owned locally
708     ErrorCode get_part_entities(Range &ents, int dim = -1);
709 
get_partitioning() const710     EntityHandle get_partitioning() const { return partitioningSet; }
711     ErrorCode set_partitioning( EntityHandle h );
712     ErrorCode get_global_part_count( int& count_out ) const;
713     ErrorCode get_part_owner( int part_id, int& owner_out ) const;
714     ErrorCode get_part_id( EntityHandle part, int& id_out ) const;
715     ErrorCode get_part_handle( int id, EntityHandle& handle_out ) const;
716     ErrorCode create_part( EntityHandle& part_out );
717     ErrorCode destroy_part( EntityHandle part ) ;
718     ErrorCode collective_sync_partition();
719     ErrorCode get_part_neighbor_ids( EntityHandle part,
720                                      int neighbors_out[MAX_SHARING_PROCS],
721                                      int& num_neighbors_out );
722     ErrorCode get_interface_sets( EntityHandle part,
723                                   Range& iface_sets_out,
724                                   int* adj_part_id = 0 );
725     ErrorCode get_owning_part( EntityHandle entity,
726                                int& owning_part_id_out,
727                                EntityHandle* owning_handle = 0 );
728     ErrorCode get_sharing_parts( EntityHandle entity,
729                                  int part_ids_out[MAX_SHARING_PROCS],
730                                  int& num_part_ids_out,
731                                  EntityHandle remote_handles[MAX_SHARING_PROCS] = 0);
732 
733     /** Filter the entities by pstatus tag.
734      * op is one of PSTATUS_ AND, OR, NOT; an entity is output if:
735      * AND: all bits set in pstatus_val are also set on entity
736      * OR: any bits set in pstatus_val also set on entity
737      * NOT: any bits set in pstatus_val are not set on entity
738      *
739      * Results returned in input list, unless result_ents is passed in non-null,
740      * in which case results are returned in result_ents.
741      *
742      * If ents is passed in empty, filter is done on shared entities in this
743      * pcomm instance, i.e. contents of sharedEnts.
744      *
745      *\param ents       Input entities to filter
746      *\param pstatus_val pstatus value to which entities are compared
747      *\param op Bitwise operation performed between pstatus values
748      *\param to_proc If non-negative and PSTATUS_SHARED is set on pstatus_val,
749      *               only entities shared with to_proc are returned
750      *\param result_ents If non-null, results of filter are put in the
751      *       pointed-to range
752      */
753     ErrorCode filter_pstatus( Range &ents,
754                               const unsigned char pstatus_val,
755                               const unsigned char op,
756                               int to_proc = -1,
757                               Range *returned_ents = NULL);
758 
759     /** \brief Get entities on interfaces shared with another proc
760      *
761      * \param other_proc Other proc sharing the interface
762      * \param dim Dimension of entities to return, -1 if all dims
763      * \param iface_ents Returned entities
764      */
765     ErrorCode get_iface_entities(int other_proc,
766                                  int dim,
767                                  Range &iface_ents);
768 
get_moab() const769     Interface* get_moab() const { return mbImpl; }
770 
771 
772     ErrorCode clean_shared_tags(std::vector<Range*>& exchange_ents);
773 
774     class Buffer {
775     public:
776       unsigned char *mem_ptr;
777       unsigned char *buff_ptr;
778       unsigned int alloc_size;
779 
780       Buffer(unsigned int sz = 0);
781       Buffer(const Buffer &);
782       ~Buffer();
reset_buffer(size_t buff_pos=0)783       void reset_buffer(size_t buff_pos = 0) {reset_ptr(buff_pos); reserve(INITIAL_BUFF_SIZE);}
reset_ptr(size_t buff_pos=0)784       void reset_ptr(size_t buff_pos = 0) {assert((!mem_ptr && !buff_pos)|| (alloc_size >= buff_pos)); buff_ptr = mem_ptr + buff_pos;}
785       inline void reserve(unsigned int new_size);
set_stored_size()786       void set_stored_size() {*((int*)mem_ptr) = (int)(buff_ptr - mem_ptr);}
get_stored_size()787       int get_stored_size() {return *((int*)mem_ptr);}
get_current_size()788       int get_current_size() {return (int)(buff_ptr - mem_ptr);}
789 
790       void check_space(unsigned int addl_space);
791     };
792 
793     //! public 'cuz we want to unit test these externally
794     ErrorCode pack_buffer(Range &orig_ents,
795 			  const bool adjacencies,
796 			  const bool tags,
797 			  const bool store_remote_handles,
798 			  const int to_proc,
799 			  Buffer *buff,
800 			  TupleList *entprocs = NULL,
801 			  Range *allsent = NULL);
802 
803     ErrorCode unpack_buffer(unsigned char *buff_ptr,
804                             const bool store_remote_handles,
805                             const int from_proc,
806                             const int ind,
807                             std::vector<std::vector<EntityHandle> > &L1hloc,
808                             std::vector<std::vector<EntityHandle> > &L1hrem,
809                             std::vector<std::vector<int> > &L1p,
810                             std::vector<EntityHandle> &L2hloc,
811                             std::vector<EntityHandle> &L2hrem,
812                             std::vector<unsigned int> &L2p,
813 			    std::vector<EntityHandle> &new_ents,
814 			    const bool created_iface = false);
815 
816     ErrorCode pack_entities(Range &entities,
817                             Buffer *buff,
818                             const bool store_remote_handles,
819                             const int to_proc,
820                             const bool is_iface,
821                             TupleList *entprocs = NULL,
822                             Range *allsent = NULL);
823 
824     //! unpack entities in buff_ptr
825     ErrorCode unpack_entities(unsigned char *&buff_ptr,
826                               const bool store_remote_handles,
827                               const int from_ind,
828                               const bool is_iface,
829                               std::vector<std::vector<EntityHandle> > &L1hloc,
830                               std::vector<std::vector<EntityHandle> > &L1hrem,
831                               std::vector<std::vector<int> > &L1p,
832                               std::vector<EntityHandle> &L2hloc,
833                               std::vector<EntityHandle> &L2hrem,
834                               std::vector<unsigned int> &L2p,
835 			      std::vector<EntityHandle> &new_ents,
836 			      const bool created_iface = false);
837 
838     //! Call exchange_all_shared_handles, then compare the results with tag data
839     //! on local shared entities.
840     ErrorCode check_all_shared_handles(bool print_em = false);
841 
842     static ErrorCode check_all_shared_handles(ParallelComm **pcs,
843                                               int num_pcs);
844 
845     struct SharedEntityData {
846       EntityHandle local;
847       EntityHandle remote;
848       EntityID owner;
849     };
850 
851     ErrorCode pack_shared_handles(
852 				  std::vector<std::vector<SharedEntityData> > &send_data);
853 
854     // check consistency of sharedEnts against their tags and their
855     // vertices' tags
856     ErrorCode check_local_shared();
857 
858     // check contents of communicated shared entity data against tags
859     ErrorCode check_my_shared_handles(
860 				      std::vector<std::vector<SharedEntityData> > &shents,
861                                       const char *prefix = NULL);
862 
863     //! set rank for this pcomm; USED FOR TESTING ONLY!
864     void set_rank(unsigned int r);
865 
866     //! set rank for this pcomm; USED FOR TESTING ONLY!
867     void set_size(unsigned int r);
868 
869     //! get (and possibly allocate) buffers for messages to/from to_proc; returns
870     //! index of to_proc in buffProcs vector; if is_new is non-NULL, sets to
871     //! whether new buffer was allocated
872     //! PUBLIC ONLY FOR TESTING!
873     int get_buffers(int to_proc, bool *is_new = NULL);
874 
875 
876     //! get buff processor vector
877     const std::vector<unsigned int> &buff_procs() const;
878 
879     /* \brief Unpack message with remote handles
880      * PUBLIC ONLY FOR TESTING!
881      */
882     ErrorCode unpack_remote_handles(unsigned int from_proc,
883                                     unsigned char *&buff_ptr,
884                                     std::vector<EntityHandle> &L2hloc,
885                                     std::vector<EntityHandle> &L2hrem,
886                                     std::vector<unsigned int> &L2p);
887 
888     /* \brief Pack message with remote handles
889      * PUBLIC ONLY FOR TESTING!
890      */
891     ErrorCode pack_remote_handles(std::vector<EntityHandle> &L1hloc,
892                                   std::vector<EntityHandle> &L1hrem,
893                                   std::vector<int> &procs,
894                                   unsigned int to_proc,
895                                   Buffer *buff);
896 
897     // each iterate in proc_nvecs contains a set of procs and the entities *possibly*
898     // on the interface between those procs; this function makes sets for each,
899     // and tags the set with the procs sharing it; interface sets are optionally
900     // returned; NOTE: a subsequent step is used to verify entities on the interface
901     // and remove them if they're not shared
902     ErrorCode create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs);
903 
904     // do the same but working straight from sharedEnts
905     ErrorCode create_interface_sets(EntityHandle this_set, int resolve_dim, int shared_dim);
906 
907     ErrorCode tag_shared_verts(TupleList &shared_ents,
908 			       std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
909 			       Range &proc_verts,
910 			       unsigned int i_extra = 1);
911 
912     ErrorCode list_entities(const EntityHandle *ents, int num_ents);
913 
914     ErrorCode list_entities(const Range &ents);
915 
916     void set_send_request(int n_request); // set send request array
917 
918     void set_recv_request(int n_request); // set recv request array
919 
920     //! reset message buffers to their initial state
921     // changed to public function (HJK)
922     void reset_all_buffers();
923 
924     static const unsigned int INITIAL_BUFF_SIZE;
925 
926     //! set the verbosity level of output from this pcomm
927     void set_debug_verbosity(int verb);
928 
929     //! get the verbosity level of output from this pcomm
930     int get_debug_verbosity();
931 
932     /* \brief Gather tag value from entities down to a specified root proc
933      * This function gathers data from a domain-decomposed mesh onto a global mesh
934      * represented on the root processor.  On the root, this gather mesh is distinct from
935      * the root's domain-decomposed subdomain.  Entities are matched by global id, or by
936      * another tag if its handle is input.  The dimension of all entities in gather_ents should
937      * be the same, since this is the dimension of entities in gather_set that are queried for
938      * matching global id tags.
939      * \param gather_ents (Local) entities from which to gather data
940      * \param tag_handle Tag whose values are being gathered
941      * \param id_tag Tag to use for matching entities (global id used by default)
942      * \param gather_set On root, set containing global mesh onto which to put data
943      * \param root_proc_rank Rank of the specified root processor (default rank is 0)
944      */
945     ErrorCode gather_data(Range &gather_ents, Tag &tag_handle,
946 			  Tag id_tag = 0, EntityHandle gather_set = 0, int root_proc_rank = 0);
947 
948     /* \brief communicate extra points positions on boundary
949      * This function is called after intersection of 2 meshes, to settle the
950      * position of the intersection points on the boundary (interface)
951      * The initial mesh distributed on each processor is decomposed after
952      * intersection with another mesh, such as that new points are created on the
953      * boundary. these points should better match at the interface !
954      * we perform an extra caution step, to ensure the robustness of the
955      * intersection algorithm;  only shared edges extra nodes
956      *  will be actually needed to be communicated, but we just pass by reference
957      *  the whole extraNodesVec structure, we do
958      *  not need to construct another data structure
959      *  The node positions on edges that are owned will be communicated to other
960      *  processors
961      *
962      * \param edges total range of entities
963      * \param shared_edges_owned edges for which to communicate data
964      * \param extraNodesVec handles of intersection vertices on all edges;
965      */
966     ErrorCode settle_intersection_points(Range & edges, Range & shared_edges_owned,
967         std::vector<std::vector<EntityHandle> *> & extraNodesVec, double tolerance);
968 
969     /* \brief delete entities from moab database
970      * will check the shared ents array, and clean it if necessary
971      *
972      */
973     ErrorCode delete_entities(Range & to_delete);
974 
975   private:
976 
977     ErrorCode reduce_void(int tag_data_type, const MPI_Op mpi_op, int num_ents, void *old_vals, void *new_vals);
978 
979     template <class T> ErrorCode reduce(const MPI_Op mpi_op, int num_ents, void *old_vals, void *new_vals);
980 
981     void print_debug_isend(int from, int to, unsigned char *buff,
982 			   int tag, int size);
983 
984     void print_debug_irecv(int to, int from, unsigned char *buff, int size,
985 			   int tag, int incoming);
986 
987     void print_debug_recd(MPI_Status status);
988 
989     void print_debug_waitany(std::vector<MPI_Request> &reqs, int tag, int proc);
990 
991     // common initialization code, called from various constructors
992     void initialize();
993 
994     ErrorCode set_sharing_data(EntityHandle ent, unsigned char pstatus,
995                                int old_nump, int new_nump,
996                                int *ps, EntityHandle *hs);
997 
998     ErrorCode check_clean_iface(Range &allsent);
999 
1000     void define_mpe();
1001 
1002     ErrorCode get_sent_ents(const bool is_iface,
1003                             const int bridge_dim, const int ghost_dim,
1004                             const int num_layers, const int addl_ents,
1005                             Range *sent_ents, Range &allsent,
1006                             TupleList &entprocs);
1007 
1008     /** \brief Set pstatus values on entities
1009      *
1010      * \param pstatus_ents Entities to be set
1011      * \param pstatus_val Pstatus value to be set
1012      * \param lower_dim_ents If true, lower-dimensional ents (incl. vertices) set too
1013      *        (and created if they don't exist)
1014      * \param verts_too If true, vertices also set
1015      * \param operation If UNION, pstatus_val is OR-d with existing value, otherwise
1016      *        existing value is over-written
1017      */
1018     ErrorCode set_pstatus_entities(Range &pstatus_ents,
1019                                    unsigned char pstatus_val,
1020                                    bool lower_dim_ents = false,
1021                                    bool verts_too = true,
1022                                    int operation = Interface::UNION);
1023 
1024     /** \brief Set pstatus values on entities (vector-based function)
1025      *
1026      * \param pstatus_ents Entities to be set
1027      * \param pstatus_val Pstatus value to be set
1028      * \param lower_dim_ents If true, lower-dimensional ents (incl. vertices) set too
1029      *        (and created if they don't exist)
1030      * \param verts_too If true, vertices also set
1031      * \param operation If UNION, pstatus_val is OR-d with existing value, otherwise
1032      *        existing value is over-written
1033      */
1034     ErrorCode set_pstatus_entities(EntityHandle *pstatus_ents,
1035                                    int num_ents,
1036                                    unsigned char pstatus_val,
1037                                    bool lower_dim_ents = false,
1038                                    bool verts_too = true,
1039                                    int operation = Interface::UNION);
1040 
1041     //! estimate size required to pack entities
1042     int estimate_ents_buffer_size(Range &entities,
1043 				  const bool store_remote_handles);
1044 
1045     //! estimate size required to pack sets
1046     int estimate_sets_buffer_size(Range &entities,
1047 				  const bool store_remote_handles);
1048 
1049     //! send the indicated buffer, possibly sending size first
1050     ErrorCode send_buffer(const unsigned int to_proc,
1051                           Buffer *send_buff,
1052                           const int msg_tag,
1053                           MPI_Request &send_req,
1054                           MPI_Request &ack_recv_req,
1055                           int *ack_buff,
1056                           int &this_incoming,
1057                           int next_mesg_tag = -1,
1058                           Buffer *next_recv_buff = NULL,
1059                           MPI_Request *next_recv_req = NULL,
1060                           int *next_incoming = NULL);
1061 
1062     //! process incoming message; if longer than the initial size, post
1063     //! recv for next part then send ack; if ack, send second part; else
1064     //! indicate that we're done and buffer is ready for processing
1065     ErrorCode recv_buffer(int mesg_tag_expected,
1066                           const MPI_Status &mpi_status,
1067                           Buffer *recv_buff,
1068                           MPI_Request &recv_2nd_req,
1069                           MPI_Request &ack_req,
1070                           int &this_incoming,
1071                           Buffer *send_buff,
1072                           MPI_Request &send_req,
1073                           MPI_Request &sent_ack_req,
1074                           bool &done,
1075                           Buffer *next_buff = NULL,
1076                           int next_tag = -1,
1077                           MPI_Request *next_req = NULL,
1078                           int *next_incoming = NULL);
1079 
1080     //! pack a range of entities with equal # verts per entity, along with
1081     //! the range on the sending proc
1082     ErrorCode pack_entity_seq(const int nodes_per_entity,
1083                               const bool store_remote_handles,
1084                               const int to_proc,
1085                               Range &these_ents,
1086 			      std::vector<EntityHandle> &entities,
1087                               Buffer *buff);
1088 
1089     ErrorCode print_buffer(unsigned char *buff_ptr, int mesg_type, int from_proc,
1090                            bool sent);
1091 
1092     //! for all the entities in the received buffer; for each, save
1093     //! entities in this instance which match connectivity, or zero if none found
1094     ErrorCode unpack_iface_entities(unsigned char *&buff_ptr,
1095                                     const int from_proc,
1096                                     const int ind,
1097                                     std::vector<EntityHandle> &recd_ents);
1098 
1099     ErrorCode pack_sets(Range &entities,
1100                         Buffer *buff,
1101                         const bool store_handles,
1102                         const int to_proc);
1103 
1104     ErrorCode unpack_sets(unsigned char *&buff_ptr,
1105 			  std::vector<EntityHandle> &entities,
1106                           const bool store_handles,
1107                           const int to_proc);
1108 
1109     ErrorCode pack_adjacencies(Range &entities,
1110                                Range::const_iterator &start_rit,
1111                                Range &whole_range,
1112                                unsigned char *&buff_ptr,
1113                                int &count,
1114                                const bool just_count,
1115                                const bool store_handles,
1116                                const int to_proc);
1117 
1118     ErrorCode unpack_adjacencies(unsigned char *&buff_ptr,
1119                                  Range &entities,
1120                                  const bool store_handles,
1121                                  const int from_proc);
1122 
1123 
1124     /* \brief Unpack message with remote handles (const pointer to buffer)
1125      */
1126     ErrorCode unpack_remote_handles(unsigned int from_proc,
1127                                     const unsigned char *buff_ptr,
1128                                     std::vector<EntityHandle> &L2hloc,
1129                                     std::vector<EntityHandle> &L2hrem,
1130                                     std::vector<unsigned int> &L2p);
1131 
1132     //! given connectivity and type, find an existing entity, if there is one
1133     ErrorCode find_existing_entity(const bool is_iface,
1134                                    const int owner_p,
1135                                    const EntityHandle owner_h,
1136                                    const int num_ents,
1137                                    const EntityHandle *connect,
1138                                    const int num_connect,
1139                                    const EntityType this_type,
1140                                    std::vector<EntityHandle> &L2hloc,
1141                                    std::vector<EntityHandle> &L2hrem,
1142                                    std::vector<unsigned int> &L2p,
1143                                    EntityHandle &new_h);
1144 
1145     ErrorCode build_sharedhps_list(const EntityHandle entity,
1146                                    const unsigned char pstatus,
1147                                    const int sharedp,
1148                                    const std::set<unsigned int> &procs,
1149                                    unsigned int &num_ents,
1150                                    int *tmp_procs,
1151                                    EntityHandle *tmp_handles);
1152 
1153     /**\brief Get list of tags for which to exchange data
1154      *
1155      * Get tags and entities for which to exchange tag data.  This function
1156      * was originally part of 'pack_tags' requested with the
1157      * 'all_possible_tags' parameter.
1158      *
1159      *\param all_entities  Input.  The set of entities for which data is to
1160      *                      be communicated.
1161      *\param all_tags      Output.  Populated with the handles of tags to be
1162      *                      sent.
1163      *\param tag_ranges    Output.  For each corresponding tag in all_tags, the
1164      *                      subset of 'all_entities' for which a tag value has
1165      *                      been set.
1166      */
1167     ErrorCode get_tag_send_list( const Range& all_entities,
1168                                  std::vector<Tag>& all_tags,
1169                                  std::vector<Range>& tag_ranges );
1170 
1171     /**\brief Serialize entity tag data
1172      *
1173      * This function operates in two passes.  The first phase,
1174      * specified by 'just_count == true' calculates the necessary
1175      * buffer size for the serialized data.  The second phase
1176      * writes the actual binary serialized representation of the
1177      * data to the passed buffer.
1178      *
1179      *\NOTE First two arguments are not used.  (Legacy interface?)
1180      *
1181      *\param entities      NOT USED
1182      *\param start_rit     NOT USED
1183      *\param whole_range   Should be the union of the sets of entities for
1184      *                     which tag values are to be serialized.  Also
1185      *                     specifies ordering for indexes for tag values and
1186      *                     serves as the superset from which to compose entity
1187      *                     lists from individual tags if just_count and
1188      *                     all_possible_tags are both true.
1189      *\param buff_ptr      Buffer into which to write binary serialized data
1190      *\param count         Output:  The size of the serialized data is added
1191      *                     to this parameter.  NOTE: Should probably initialize
1192      *                     to zero before calling.
1193      *\param just_count    If true, just calculate the buffer size required to
1194      *                     hold the serialized data.  Will also append to
1195      *                     'all_tags' and 'tag_ranges' if all_possible_tags
1196      *                     == true.
1197      *\param store_handles The data for each tag is preceded by a list of
1198      *                     EntityHandles designating the entity each of
1199      *                     the subsequent tag values corresponds to.  This value
1200      *                     may be one of:
1201      *                     1) If store_handles == false:
1202      *                        An invalid handle composed of {MBMAXTYPE,idx}, where
1203      *                        idx is the position of the entity in "whole_range".
1204      *                     2) If store_hanldes == true and a valid remote
1205      *                        handle exists, the remote handle.
1206      *                     3) If store_hanldes == true and no valid remote
1207      *                        handle is defined for the entity, the same as 1).
1208      *\param to_proc       If 'store_handles' is true, the processor rank for
1209      *                     which to store the corresponding remote entity
1210      *                     handles.
1211      *\param all_tags      List of tags to write
1212      *\param tag_ranges    List of entities to serialize tag data, one
1213      *                            for each corresponding tag handle in 'all_tags.
1214      */
1215     ErrorCode pack_tags(Range &entities,
1216                         const std::vector<Tag> &src_tags,
1217                         const std::vector<Tag> &dst_tags,
1218                         const std::vector<Range> &tag_ranges,
1219                         Buffer *buff,
1220                         const bool store_handles,
1221                         const int to_proc);
1222 
1223     /**\brief Calculate buffer size required to pack tag data
1224      *\param source_tag The tag for which data will be serialized
1225      *\param entities    The entities for which tag values will be serialized
1226      *\param count_out  Output: The required buffer size, in bytes.
1227      */
1228     ErrorCode packed_tag_size( Tag source_tag,
1229                                const Range& entities,
1230                                int& count_out );
1231 
1232     /**\brief Serialize tag data
1233      *\param source_tag    The tag for which data will be serialized
1234      *\param destination_tag Tag in which to store unpacked tag data.  Typically
1235      *                     the same as source_tag.
1236      *\param entities       The entities for which tag values will be serialized
1237      *\param whole_range   Calculate entity indices as location in this range
1238      *\param buff_ptr      Input/Output: As input, pointer to the start of the
1239      *                     buffer in which to serialize data.  As output, the
1240      *                     position just passed the serialized data.
1241      *\param count_out     Output: The required buffer size, in bytes.
1242      *\param store_handles The data for each tag is preceded by a list of
1243      *                     EntityHandles designating the entity each of
1244      *                     the subsequent tag values corresponds to.  This value
1245      *                     may be one of:
1246      *                     1) If store_handles == false:
1247      *                        An invalid handle composed of {MBMAXTYPE,idx}, where
1248      *                        idx is the position of the entity in "whole_range".
1249      *                     2) If store_hanldes == true and a valid remote
1250      *                        handle exists, the remote handle.
1251      *                     3) If store_hanldes == true and no valid remote
1252      *                        handle is defined for the entity, the same as 1).
1253      *\param to_proc       If 'store_handles' is true, the processor rank for
1254      *                     which to store the corresponding remote entity
1255      *                     handles.
1256      */
1257     ErrorCode pack_tag( Tag source_tag,
1258                         Tag destination_tag,
1259                         const Range &entities,
1260 			const std::vector<EntityHandle> &whole_range,
1261                         Buffer *buff,
1262                         const bool store_remote_handles,
1263                         const int to_proc );
1264 
1265     ErrorCode unpack_tags(unsigned char *&buff_ptr,
1266                           std::vector<EntityHandle> &entities,
1267                           const bool store_handles,
1268                           const int to_proc,
1269                           const MPI_Op * const mpi_op = NULL);
1270 
1271     ErrorCode tag_shared_verts(TupleList &shared_verts,
1272                                Range *skin_ents,
1273                                std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
1274                                Range &proc_verts);
1275 
1276     ErrorCode get_proc_nvecs(int resolve_dim,
1277                              int shared_dim,
1278                              Range *skin_ents,
1279                              std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs);
1280 
1281     // after verifying shared entities, now parent/child links between sets can be established
1282     ErrorCode create_iface_pc_links();
1283 
1284     //! pack a range map with keys in this_range and values a contiguous series
1285     //! of handles starting at actual_start
1286     ErrorCode pack_range_map(Range &this_range, EntityHandle actual_start,
1287                              HandleMap &handle_map);
1288 
1289     //! returns true if the set is an interface shared with to_proc
1290     bool is_iface_proc(EntityHandle this_set, int to_proc);
1291 
1292     //! for any remote_handles set to zero, remove corresponding sent_ents from
1293     //! iface_sets corresponding to from_proc
1294     ErrorCode update_iface_sets(Range &sent_ents,
1295                                 std::vector<EntityHandle> &remote_handles,
1296                                 int from_proc);
1297 
1298     //! for specified bridge/ghost dimension, to_proc, and number
1299     //! of layers, get the entities to be ghosted, and info on additional procs
1300     //! needing to communicate with to_proc
1301     ErrorCode get_ghosted_entities(int bridge_dim,
1302                                    int ghost_dim,
1303                                    int to_proc,
1304                                    int num_layers,
1305 				   int addl_ents,
1306                                    Range &ghosted_ents);
1307 
1308     //! add vertices adjacent to entities in this list
1309     ErrorCode add_verts(Range &sent_ents);
1310 
1311     //! Every processor sends shared entity handle data to every other processor
1312     //! that it shares entities with.  Passed back map is all received data,
1313     //! indexed by processor ID. This function is intended to be used for
1314     //! debugging.
1315     ErrorCode exchange_all_shared_handles(
1316 					  std::vector<std::vector<SharedEntityData> > &send_data,
1317 					  std::vector<std::vector<SharedEntityData> > &result);
1318 
1319     //! replace handles in from_vec with corresponding handles on
1320     //! to_proc (by checking shared[p/h]_tag and shared[p/h]s_tag;
1321     //! if no remote handle and new_ents is non-null, substitute
1322     //! instead CREATE_HANDLE(MBMAXTYPE, index) where index is handle's
1323     //! position in new_ents
1324     ErrorCode get_remote_handles(const bool store_remote_handles,
1325                                  EntityHandle *from_vec,
1326                                  EntityHandle *to_vec_tmp,
1327                                  int num_ents, int to_proc,
1328                                  const std::vector<EntityHandle> &new_ents);
1329 
1330     //! same as other version, except from_range and to_range should be
1331     //! different here
1332     ErrorCode get_remote_handles(const bool store_remote_handles,
1333                                  const Range &from_range,
1334                                  Range &to_range,
1335                                  int to_proc,
1336                                  const std::vector<EntityHandle> &new_ents);
1337 
1338     //! same as other version, except packs range into vector
1339     ErrorCode get_remote_handles(const bool store_remote_handles,
1340                                  const Range &from_range,
1341                                  EntityHandle *to_vec,
1342                                  int to_proc,
1343                                  const std::vector<EntityHandle> &new_ents);
1344 
1345     //! goes through from_vec, and for any with type MBMAXTYPE, replaces with
1346     //! new_ents value at index corresponding to id of entity in from_vec
1347     ErrorCode get_local_handles(EntityHandle *from_vec,
1348                                 int num_ents,
1349                                 const Range &new_ents);
1350 
1351     //! same as above except puts results in range
1352     ErrorCode get_local_handles(const Range &remote_handles,
1353                                 Range &local_handles,
1354 				const std::vector<EntityHandle> &new_ents);
1355 
1356     //! same as above except gets new_ents from vector
1357     ErrorCode get_local_handles(EntityHandle *from_vec,
1358                                 int num_ents,
1359                                 const std::vector<EntityHandle> &new_ents);
1360 
1361     ErrorCode update_remote_data(Range &local_range,
1362                                  Range &remote_range,
1363                                  int other_proc,
1364                                  const unsigned char add_pstat);
1365 
1366     ErrorCode update_remote_data(const EntityHandle new_h,
1367                                  const int *ps,
1368                                  const EntityHandle *hs,
1369                                  const int num_ps,
1370                                  const unsigned char add_pstat);
1371 
1372     ErrorCode update_remote_data_old(const EntityHandle new_h,
1373                                      const int *ps,
1374                                      const EntityHandle *hs,
1375                                      const int num_ps,
1376                                      const unsigned char add_pstat);
1377 
1378     /** \brief Set pstatus tag interface bit on entities in sets passed in
1379      */
1380     ErrorCode tag_iface_entities();
1381 
1382     //! add a pc to the iface instance tag PARALLEL_COMM
1383     int add_pcomm(ParallelComm *pc);
1384 
1385     //! remove a pc from the iface instance tag PARALLEL_COMM
1386     void remove_pcomm(ParallelComm *pc);
1387 
1388     //! check entities to make sure there are no zero-valued remote handles
1389     //! where they shouldn't be
1390     ErrorCode check_sent_ents(Range &allsent);
1391 
1392     //! assign entities to the input processor part
1393     ErrorCode assign_entities_part(std::vector<EntityHandle> &entities, const int proc);
1394 
1395     //! remove entities to the input processor part
1396     ErrorCode remove_entities_part(Range &entities, const int proc);
1397 
1398     //! MB interface associated with this writer
1399     Interface *mbImpl;
1400 
1401     //! Proc config object, keeps info on parallel stuff
1402     ProcConfig procConfig;
1403 
1404     //! Sequence manager, to get more efficient access to entities
1405     SequenceManager *sequenceManager;
1406 
1407     //! Error handler
1408     Error *errorHandler;
1409 
1410     //! more data buffers, proc-specific
1411     std::vector<Buffer*> localOwnedBuffs, remoteOwnedBuffs;
1412 
1413     //! reset message buffers to their initial state
1414     //void reset_all_buffers();
1415 
1416     //! delete all buffers, freeing up any memory held by them
1417     void delete_all_buffers();
1418 
1419     //! request objects, may be used if store_remote_handles is used
1420     std::vector<MPI_Request> sendReqs;
1421 
1422     //! receive request objects
1423     std::vector<MPI_Request> recvReqs, recvRemotehReqs;
1424 
1425     //! processor rank for each buffer index
1426     std::vector<unsigned int> buffProcs;
1427 
1428     //! the partition, interface sets for this comm'n instance
1429     Range partitionSets, interfaceSets;
1430 
1431     //! all local entities shared with others, whether ghost or ghosted
1432     std::vector<EntityHandle> sharedEnts;
1433 
1434     //! tags used to save sharing procs and handles
1435     Tag sharedpTag, sharedpsTag, sharedhTag, sharedhsTag, pstatusTag,
1436       ifaceSetsTag, partitionTag;
1437 
1438     int globalPartCount; //!< Cache of global part count
1439 
1440     EntityHandle partitioningSet; //!< entity set containing all parts
1441 
1442     std::ofstream myFile;
1443 
1444     int pcommID;
1445 
1446     int ackbuff;
1447 
1448     //! used to set verbosity level and to report output
1449     DebugOutput *myDebug;
1450 
1451     //! Data about shared sets
1452     SharedSetData* sharedSetData;
1453 
1454   };
1455 
Buffer(const Buffer & other_buff)1456   inline ParallelComm::Buffer::Buffer(const Buffer &other_buff)
1457   {
1458     alloc_size = other_buff.alloc_size;
1459     mem_ptr = (unsigned char *)malloc(alloc_size);
1460     memcpy(mem_ptr, other_buff.mem_ptr, alloc_size);
1461     buff_ptr = mem_ptr + (other_buff.buff_ptr - other_buff.mem_ptr);
1462   }
1463 
Buffer(unsigned int new_size)1464   inline ParallelComm::Buffer::Buffer(unsigned int new_size)
1465     : mem_ptr(NULL), buff_ptr(NULL), alloc_size(0)
1466   {
1467     if (new_size) this->reserve(new_size);
1468   }
1469 
~Buffer()1470   inline ParallelComm::Buffer::~Buffer()
1471   {
1472     if (mem_ptr) {
1473       free(mem_ptr);
1474       mem_ptr = NULL;
1475     }
1476   }
1477 
1478 #define DEBUG_BUFFER 0
1479 
reserve(unsigned int new_size)1480   inline void ParallelComm::Buffer::reserve(unsigned int new_size) {
1481 
1482 #ifdef DEBUG_BUFFER
1483     int tmp_pos = 0;
1484     if (mem_ptr) {
1485       tmp_pos = buff_ptr - mem_ptr;
1486     }
1487     buff_ptr = (unsigned char *)malloc(new_size);
1488     assert(0 <= tmp_pos && tmp_pos <= (int)alloc_size);
1489     if (tmp_pos) memcpy(buff_ptr, mem_ptr, tmp_pos);
1490     if (mem_ptr) free(mem_ptr);
1491     mem_ptr = buff_ptr;
1492     alloc_size = new_size;
1493     buff_ptr = mem_ptr + tmp_pos;
1494 #else
1495     if (mem_ptr && alloc_size < new_size) {
1496       size_t tmp_pos = mem_ptr ? buff_ptr - mem_ptr : 0;
1497       mem_ptr = (unsigned char *)realloc(mem_ptr, new_size);
1498       alloc_size = new_size;
1499       buff_ptr = mem_ptr + tmp_pos;
1500     }
1501     else if (!mem_ptr) {
1502       mem_ptr = (unsigned char *)malloc(new_size);
1503       alloc_size = new_size;
1504       buff_ptr = mem_ptr;
1505     }
1506 #endif
1507   }
1508 
check_space(unsigned int addl_space)1509   inline void ParallelComm::Buffer::check_space(unsigned int addl_space )
1510   {
1511     assert(buff_ptr >= mem_ptr && buff_ptr <= mem_ptr+alloc_size);
1512     unsigned int new_size = buff_ptr - mem_ptr + addl_space;
1513     if (new_size > alloc_size)
1514       reserve(3*new_size/2);
1515   }
1516 
reset_all_buffers()1517   inline void ParallelComm::reset_all_buffers()
1518   {
1519     std::vector<Buffer*>::iterator vit;
1520     for (vit = localOwnedBuffs.begin(); vit != localOwnedBuffs.end(); ++vit)
1521       (*vit)->reset_buffer();
1522     for (vit = remoteOwnedBuffs.begin(); vit != remoteOwnedBuffs.end(); ++vit)
1523       (*vit)->reset_buffer();
1524   }
1525 
delete_all_buffers()1526   inline void ParallelComm::delete_all_buffers()
1527   {
1528     std::vector<Buffer*>::iterator vit;
1529     for (vit = localOwnedBuffs.begin(); vit != localOwnedBuffs.end(); ++vit)
1530       delete (*vit);
1531     localOwnedBuffs.clear();
1532 
1533     for (vit = remoteOwnedBuffs.begin(); vit != remoteOwnedBuffs.end(); ++vit)
1534       delete (*vit);
1535     remoteOwnedBuffs.clear();
1536   }
1537 
buff_procs() const1538   inline const std::vector<unsigned int> &ParallelComm::buff_procs() const
1539   {
1540     return buffProcs;
1541   }
1542 
get_shared_proc_tags(Tag & sharedp,Tag & sharedps,Tag & sharedh,Tag & sharedhs,Tag & pstatus)1543   inline ErrorCode ParallelComm::get_shared_proc_tags(Tag &sharedp,
1544 						      Tag &sharedps,
1545 						      Tag &sharedh,
1546 						      Tag &sharedhs,
1547 						      Tag &pstatus)
1548   {
1549     sharedp = sharedp_tag();
1550     sharedps = sharedps_tag();
1551     sharedh = sharedh_tag();
1552     sharedhs = sharedhs_tag();
1553     pstatus = pstatus_tag();
1554 
1555     return MB_SUCCESS;
1556   }
1557 
exchange_tags(const char * tag_name,const Range & entities)1558   inline ErrorCode ParallelComm::exchange_tags( const char *tag_name,
1559 						const Range &entities)
1560   {
1561     // get the tag handle
1562     std::vector<Tag> tags(1);
1563     ErrorCode result = mbImpl->tag_get_handle(tag_name, 0, MB_TYPE_OPAQUE, tags[0], MB_TAG_ANY);
1564     if (MB_SUCCESS != result) return result;
1565     else if (!tags[0]) return MB_TAG_NOT_FOUND;
1566 
1567     return exchange_tags(tags, tags, entities);
1568   }
1569 
exchange_tags(Tag tagh,const Range & entities)1570   inline ErrorCode ParallelComm::exchange_tags( Tag tagh,
1571 						const Range &entities)
1572   {
1573     // get the tag handle
1574     std::vector<Tag> tags;
1575     tags.push_back(tagh);
1576 
1577     return exchange_tags(tags, tags, entities);
1578   }
1579 
reduce_tags(const char * tag_name,const MPI_Op mpi_op,const Range & entities)1580   inline ErrorCode ParallelComm::reduce_tags( const char *tag_name,
1581                                               const MPI_Op mpi_op,
1582                                               const Range &entities)
1583   {
1584     // get the tag handle
1585     std::vector<Tag> tags(1);
1586     ErrorCode result = mbImpl->tag_get_handle(tag_name, 0, MB_TYPE_OPAQUE, tags[0], MB_TAG_ANY);
1587     if (MB_SUCCESS != result) return result;
1588     else if (!tags[0]) return MB_TAG_NOT_FOUND;
1589 
1590     return reduce_tags(tags, tags, mpi_op, entities);
1591   }
1592 
reduce_tags(Tag tagh,const MPI_Op mpi_op,const Range & entities)1593   inline ErrorCode ParallelComm::reduce_tags( Tag tagh,
1594                                               const MPI_Op mpi_op,
1595                                               const Range &entities)
1596   {
1597     // get the tag handle
1598     std::vector<Tag> tags;
1599     tags.push_back(tagh);
1600 
1601     return reduce_tags(tags, tags, mpi_op, entities);
1602   }
1603 
get_comm_procs(std::set<unsigned int> & procs)1604   inline ErrorCode ParallelComm::get_comm_procs(std::set<unsigned int> &procs)
1605   {
1606     ErrorCode result = get_interface_procs(procs);
1607     if (MB_SUCCESS != result) return result;
1608 
1609     std::copy(buffProcs.begin(), buffProcs.end(), std::inserter(procs, procs.begin()));
1610 
1611     return MB_SUCCESS;
1612   }
1613 
get_owner(EntityHandle entity,int & owner)1614   inline ErrorCode ParallelComm::get_owner(EntityHandle entity,
1615 					   int &owner)
1616   {
1617     EntityHandle tmp_handle;
1618     return get_owner_handle(entity, owner, tmp_handle);
1619   }
1620 
1621   /* \brief Unpack message with remote handles (const pointer to buffer)
1622    */
unpack_remote_handles(unsigned int from_proc,const unsigned char * buff_ptr,std::vector<EntityHandle> & L2hloc,std::vector<EntityHandle> & L2hrem,std::vector<unsigned int> & L2p)1623   inline ErrorCode ParallelComm::unpack_remote_handles(unsigned int from_proc,
1624 						       const unsigned char *buff_ptr,
1625 						       std::vector<EntityHandle> &L2hloc,
1626 						       std::vector<EntityHandle> &L2hrem,
1627 						       std::vector<unsigned int> &L2p)
1628   {
1629     // cast away const-ness, we won't be passing back a modified ptr
1630     unsigned char *tmp_buff = const_cast<unsigned char*>(buff_ptr);
1631     return unpack_remote_handles(from_proc, tmp_buff, L2hloc, L2hrem, L2p);
1632   }
1633 
set_rank(unsigned int r)1634   inline void ParallelComm::set_rank(unsigned int r)
1635   {
1636     procConfig.proc_rank(r);
1637     if (procConfig.proc_size() < r) procConfig.proc_size(r+1);
1638   }
1639 
set_size(unsigned int s)1640   inline void ParallelComm::set_size(unsigned int s)
1641   {
1642     procConfig.proc_size(s);
1643   }
1644 
get_sharing_data(const EntityHandle * entities,int num_entities,std::set<int> & procs,int op)1645   inline ErrorCode ParallelComm::get_sharing_data(const EntityHandle *entities,
1646 						  int num_entities,
1647 						  std::set<int> &procs,
1648 						  int op)
1649   {
1650     Range dum_range;
1651     // cast away constness 'cuz the range is passed as const
1652     EntityHandle *ents_cast = const_cast<EntityHandle*>(entities);
1653     std::copy(ents_cast, ents_cast+num_entities, range_inserter(dum_range));
1654     return get_sharing_data(dum_range, procs, op);
1655   }
1656 
get_sharing_data(const EntityHandle entity,int * ps,EntityHandle * hs,unsigned char & pstat,int & num_ps)1657   inline ErrorCode ParallelComm::get_sharing_data(const EntityHandle entity,
1658 						  int *ps,
1659 						  EntityHandle *hs,
1660 						  unsigned char &pstat,
1661 						  int &num_ps)
1662   {
1663     unsigned int dum_ps;
1664     ErrorCode result = get_sharing_data(entity, ps, hs, pstat, dum_ps);
1665     if (MB_SUCCESS == result)
1666       num_ps = dum_ps;
1667     return result;
1668   }
1669 
set_send_request(int n_request)1670   inline void ParallelComm::set_send_request(int n_request)
1671   {
1672     sendReqs.resize(n_request, MPI_REQUEST_NULL);
1673   }
1674 
set_recv_request(int n_request)1675   inline void ParallelComm::set_recv_request(int n_request)
1676   {
1677     recvReqs.resize(n_request, MPI_REQUEST_NULL);
1678   }
1679 
1680 } // namespace moab
1681 
1682 #endif
1683