1 #include "moab/Interface.hpp"
2 #include "moab/ParallelComm.hpp"
3 #include "moab/WriteUtilIface.hpp"
4 #include "moab/ReadUtilIface.hpp"
5 #include "SequenceManager.hpp"
6 #include "moab/Error.hpp"
7 #include "EntitySequence.hpp"
8 #include "MBTagConventions.hpp"
9 #include "moab/Skinner.hpp"
10 #include "MBParallelConventions.h"
11 #include "moab/Core.hpp"
12 #include "ElementSequence.hpp"
13 #include "moab/CN.hpp"
14 #include "moab/RangeMap.hpp"
15 #include "moab/MeshTopoUtil.hpp"
16 #include "TagInfo.hpp"
17 #include "DebugOutput.hpp"
18 #include "SharedSetData.hpp"
19 #include "moab/ScdInterface.hpp"
20 #include "moab/TupleList.hpp"
21 #include "moab/gs.hpp"
22 
23 #include <iostream>
24 #include <sstream>
25 #include <algorithm>
26 #include <functional>
27 #include <numeric>
28 
29 #include <math.h>
30 #include <assert.h>
31 
32 #ifdef MOAB_HAVE_MPI
33 #include "moab_mpi.h"
34 #endif
35 #ifdef MOAB_HAVE_MPE
36 #include "mpe.h"
37 int IFACE_START, IFACE_END;
38 int GHOST_START, GHOST_END;
39 int SHAREDV_START, SHAREDV_END;
40 int RESOLVE_START, RESOLVE_END;
41 int ENTITIES_START, ENTITIES_END;
42 int RHANDLES_START, RHANDLES_END;
43 int OWNED_START, OWNED_END;
44 #endif
45 
46 namespace moab {
47 
48   const unsigned int ParallelComm::INITIAL_BUFF_SIZE = 1024;
49 
50   const int MAX_BCAST_SIZE = (1 << 28);
51 
52   std::vector<ParallelComm::Buffer*> msgs;
53   unsigned int __PACK_num = 0, __UNPACK_num = 0, __PACK_count = 0, __UNPACK_count = 0;
54   std::string __PACK_string, __UNPACK_string;
55 
56 #ifdef DEBUG_PACKING_TIMES
57 #define PC(n, m) { \
58     if (__PACK_num == (unsigned int)n && __PACK_string == m) __PACK_count++; \
59     else { \
60       if (__PACK_count > 1) std::cerr << " (" << __PACK_count << "x)"; \
61       __PACK_count = 1; __PACK_string = m; __PACK_num = n; \
62       std::cerr << std::endl << "PACK: " << n << m; \
63     }}
64 #define UPC(n, m) { \
65     if (__UNPACK_num == (unsigned int)n && __UNPACK_string == m) __UNPACK_count++; \
66     else { \
67       if (__UNPACK_count > 1) std::cerr << "(" << __UNPACK_count << "x)"; \
68       __UNPACK_count = 1; __UNPACK_string = m; __UNPACK_num = n; \
69       std::cerr << std::endl << "UNPACK: " << n << m; \
70     }}
71 #else
72 #define PC(n, m)
73 #define UPC(n, m)
74 #endif
75 
76   template <typename T> static inline
UNPACK(unsigned char * & buff,T * val,size_t count)77   void UNPACK(unsigned char*& buff, T* val, size_t count)
78   {
79     memcpy(val, buff, count*sizeof(T));
80     buff += count*sizeof(T);
81   }
82 
83   template <typename T> static inline
PACK(unsigned char * & buff,const T * val,size_t count)84   void PACK(unsigned char*& buff, const T* val, size_t count)
85   {
86     memcpy(buff, val, count*sizeof(T));
87     buff += count*sizeof(T);
88   }
89 
90   static inline
PACK_INTS(unsigned char * & buff,const int * int_val,size_t num)91   void PACK_INTS(unsigned char*& buff, const int* int_val, size_t num)
92   { PACK(buff, int_val, num); PC(num, " ints"); }
93 
94   static inline
PACK_INT(unsigned char * & buff,int int_val)95   void PACK_INT(unsigned char*& buff, int int_val)
96   { PACK_INTS(buff, &int_val, 1); }
97 
98   static inline
PACK_DBLS(unsigned char * & buff,const double * dbl_val,size_t num)99   void PACK_DBLS(unsigned char*& buff, const double* dbl_val, size_t num)
100   { PACK(buff, dbl_val, num); PC(num, " doubles"); }
101 
102   //static inline
103   //void PACK_DBL(unsigned char*& buff, const double dbl_val)
104   //{ PACK_DBLS(buff, &dbl_val, 1); }
105 
106   static inline
PACK_EH(unsigned char * & buff,const EntityHandle * eh_val,size_t num)107   void PACK_EH(unsigned char*& buff, const EntityHandle* eh_val, size_t num)
108   { PACK(buff, eh_val, num); PC(num, " handles"); }
109 
110   //static inline
111   //void PACK_CHAR_64(unsigned char*& buff, const char* str)
112   //{
113   //  memcpy(buff, str, 64);
114   //  buff += 64;
115   //  PC(64, " chars");
116   //}
117 
118   static inline
PACK_VOID(unsigned char * & buff,const void * val,size_t num)119   void PACK_VOID(unsigned char*& buff, const void* val, size_t num)
120   {
121     PACK(buff, reinterpret_cast<const unsigned char*>(val), num);
122     PC(num, " void");
123   }
124 
125   static inline
PACK_BYTES(unsigned char * & buff,const void * val,int num)126   void PACK_BYTES(unsigned char*& buff, const void* val, int num)
127   { PACK_INT(buff, num); PACK_VOID(buff, val, num); }
128 
129   static inline
PACK_RANGE(unsigned char * & buff,const Range & rng)130   void PACK_RANGE(unsigned char*& buff, const Range& rng)
131   {
132     PACK_INT(buff, rng.psize());
133     Range::const_pair_iterator cit;
134     for (cit = rng.const_pair_begin(); cit != rng.const_pair_end(); ++cit) {
135       EntityHandle eh[2] = { cit->first, cit->second };
136       PACK_EH(buff, eh, 2);
137     }
138     PC(rng.psize(), "-subranged range");
139   }
140 
141   static inline
UNPACK_INTS(unsigned char * & buff,int * int_val,size_t num)142   void UNPACK_INTS(unsigned char*& buff, int* int_val, size_t num)
143   { UNPACK(buff, int_val, num); UPC(num, " ints"); }
144 
145   static inline
UNPACK_INT(unsigned char * & buff,int & int_val)146   void UNPACK_INT(unsigned char*& buff, int& int_val)
147   { UNPACK_INTS(buff, &int_val, 1); }
148 
149   static inline
UNPACK_DBLS(unsigned char * & buff,double * dbl_val,size_t num)150   void UNPACK_DBLS(unsigned char*& buff, double* dbl_val, size_t num)
151   { UNPACK(buff, dbl_val, num); UPC(num, " doubles"); }
152 
153   static inline
UNPACK_DBL(unsigned char * & buff,double & dbl_val)154   void UNPACK_DBL(unsigned char*& buff, double &dbl_val)
155   { UNPACK_DBLS(buff, &dbl_val, 1); }
156 
157   static inline
UNPACK_EH(unsigned char * & buff,EntityHandle * eh_val,size_t num)158   void UNPACK_EH(unsigned char*& buff, EntityHandle* eh_val, size_t num)
159   { UNPACK(buff, eh_val, num); UPC(num, " handles"); }
160 
161   //static inline
162   //void UNPACK_CHAR_64(unsigned char*& buff, char* char_val)
163   //{
164   //  memcpy(buff, char_val, 64);
165   //  buff += 64;
166   //  UPC(64, " chars");
167   //}
168 
169   static inline
UNPACK_VOID(unsigned char * & buff,void * val,size_t num)170   void UNPACK_VOID(unsigned char*& buff, void* val, size_t num)
171   {
172     UNPACK(buff, reinterpret_cast<unsigned char*>(val), num);
173     UPC(num, " void");
174   }
175 
176   static inline
UNPACK_TYPE(unsigned char * & buff,EntityType & type)177   void UNPACK_TYPE(unsigned char*& buff, EntityType& type)
178   {
179     int int_type = MBMAXTYPE;
180     UNPACK_INT(buff, int_type);
181     type = static_cast<EntityType>(int_type);
182     assert(type >= MBVERTEX && type <= MBMAXTYPE);
183   }
184 
185   static inline
UNPACK_RANGE(unsigned char * & buff,Range & rng)186   void UNPACK_RANGE(unsigned char*& buff, Range& rng)
187   {
188     int num_subs;
189     EntityHandle eh[2];
190     UNPACK_INT(buff, num_subs);
191     for (int i = 0; i < num_subs; i++) {
192       UPC(num_subs, "-subranged range");
193       UNPACK_EH(buff, eh, 2);
194       rng.insert(eh[0], eh[1]);
195     }
196   }
197 
198   enum MBMessageTag {MB_MESG_ANY=MPI_ANY_TAG,
199                      MB_MESG_ENTS_ACK,
200                      MB_MESG_ENTS_SIZE,
201                      MB_MESG_ENTS_LARGE,
202                      MB_MESG_REMOTEH_ACK,
203                      MB_MESG_REMOTEH_SIZE,
204                      MB_MESG_REMOTEH_LARGE,
205                      MB_MESG_TAGS_ACK,
206                      MB_MESG_TAGS_SIZE,
207                      MB_MESG_TAGS_LARGE
208   };
209 
RANGE_SIZE(const Range & rng)210   static inline size_t RANGE_SIZE(const Range& rng)
211   { return 2*sizeof(EntityHandle)*rng.psize() + sizeof(int); }
212 
213 #define PRINT_DEBUG_ISEND(A,B,C,D,E)   print_debug_isend((A),(B),(C),(D),(E))
214 #define PRINT_DEBUG_IRECV(A,B,C,D,E,F) print_debug_irecv((A),(B),(C),(D),(E),(F))
215 #define PRINT_DEBUG_RECD(A)            print_debug_recd((A))
216 #define PRINT_DEBUG_WAITANY(A,B,C)     print_debug_waitany((A),(B),(C))
217 
print_debug_isend(int from,int to,unsigned char * buff,int tag,int sz)218   void ParallelComm::print_debug_isend(int from, int to, unsigned char *buff,
219                                        int tag, int sz)
220   {
221     myDebug->tprintf(3, "Isend, %d->%d, buffer ptr = %p, tag=%d, size=%d\n",
222                      from, to, (void*)buff, tag, sz);
223   }
224 
print_debug_irecv(int to,int from,unsigned char * buff,int sz,int tag,int incoming)225   void ParallelComm::print_debug_irecv(int to, int from, unsigned char *buff, int sz,
226                                        int tag, int incoming)
227   {
228     myDebug->tprintf(3, "Irecv, %d<-%d, buffer ptr = %p, tag=%d, size=%d",
229                      to, from, (void*)buff, tag, sz);
230     if (tag < MB_MESG_REMOTEH_ACK)
231       myDebug->printf(3, ", incoming1=%d\n", incoming);
232     else if (tag < MB_MESG_TAGS_ACK)
233       myDebug->printf(3, ", incoming2=%d\n", incoming);
234     else
235       myDebug->printf(3, ", incoming=%d\n", incoming);
236   }
237 
print_debug_recd(MPI_Status status)238   void ParallelComm::print_debug_recd(MPI_Status status)
239   {
240     if (myDebug->get_verbosity() == 3) {
241       int this_count;
242       int success = MPI_Get_count(&status, MPI_UNSIGNED_CHAR, &this_count);
243       if (MPI_SUCCESS != success)
244         this_count = -1;
245       myDebug->tprintf(3, "Received from %d, count = %d, tag = %d\n",
246                        status.MPI_SOURCE, this_count, status.MPI_TAG);
247     }
248   }
249 
print_debug_waitany(std::vector<MPI_Request> & reqs,int tag,int proc)250   void ParallelComm::print_debug_waitany(std::vector<MPI_Request> &reqs, int tag, int proc)
251   {
252     if (myDebug->get_verbosity() == 3) {
253       myDebug->tprintf(3, "Waitany, p=%d, ", proc);
254       if (tag < MB_MESG_REMOTEH_ACK)
255         myDebug->print(3, ", recv_ent_reqs=");
256       else if (tag < MB_MESG_TAGS_ACK)
257         myDebug->print(3, ", recv_remoteh_reqs=");
258       else
259         myDebug->print(3, ", recv_tag_reqs=");
260       for (unsigned int i = 0; i < reqs.size(); i++)
261         myDebug->printf(3, " %p", (void*)(intptr_t)reqs[i]);
262       myDebug->print(3, "\n");
263     }
264   }
265 
266   /** Name of tag used to store ParallelComm Index on mesh paritioning sets */
267   const char* PARTITIONING_PCOMM_TAG_NAME = "__PRTN_PCOMM";
268 
269   /** \brief Tag storing parallel communication objects
270    *
271    * This tag stores pointers to ParallelComm communication
272    * objects; one of these is allocated for each different
273    * communicator used to read mesh. ParallelComm stores
274    * partition and interface sets corresponding to its parallel mesh.
275    * By default, a parallel read uses the first ParallelComm object
276    * on the interface instance; if instantiated with one, ReadParallel
277    * adds this object to the interface instance too.
278    *
279    * Tag type: opaque
280    * Tag size: MAX_SHARING_PROCS*sizeof(ParallelComm*)
281    */
282 #define PARALLEL_COMM_TAG_NAME "__PARALLEL_COMM"
283 
ParallelComm(Interface * impl,MPI_Comm cm,int * id)284   ParallelComm::ParallelComm(Interface *impl, MPI_Comm cm, int* id)
285     : mbImpl(impl), procConfig(cm),
286       sharedpTag(0), sharedpsTag(0),
287       sharedhTag(0), sharedhsTag(0), pstatusTag(0), ifaceSetsTag(0),
288       partitionTag(0), globalPartCount(-1), partitioningSet(0),
289       myDebug(NULL)
290   {
291     initialize();
292     sharedSetData = new SharedSetData(*impl, pcommID, procConfig.proc_rank());
293     if (id)
294       *id = pcommID;
295   }
296 
ParallelComm(Interface * impl,std::vector<unsigned char> &,MPI_Comm cm,int * id)297   ParallelComm::ParallelComm(Interface *impl,
298                              std::vector<unsigned char> &/*tmp_buff*/,
299                              MPI_Comm cm,
300                              int* id)
301     : mbImpl(impl), procConfig(cm),
302       sharedpTag(0), sharedpsTag(0),
303       sharedhTag(0), sharedhsTag(0), pstatusTag(0), ifaceSetsTag(0),
304       partitionTag(0), globalPartCount(-1), partitioningSet(0),
305       myDebug(NULL)
306   {
307     initialize();
308     sharedSetData = new SharedSetData(*impl, pcommID, procConfig.proc_rank());
309     if (id)
310       *id = pcommID;
311   }
312 
~ParallelComm()313   ParallelComm::~ParallelComm()
314   {
315     remove_pcomm(this);
316     delete_all_buffers();
317     delete myDebug;
318     delete sharedSetData;
319   }
320 
initialize()321   void ParallelComm::initialize()
322   {
323     Core* core = dynamic_cast<Core*>(mbImpl);
324     sequenceManager = core->sequence_manager();
325     mbImpl->query_interface(errorHandler);
326 
327     // Initialize MPI, if necessary
328     int flag = 1;
329     int retval = MPI_Initialized(&flag);
330     if (MPI_SUCCESS != retval || !flag) {
331       int argc = 0;
332       char **argv = NULL;
333 
334       // mpi not initialized yet - initialize here
335       retval = MPI_Init(&argc, &argv);
336       assert(MPI_SUCCESS == retval);
337     }
338 
339     // Reserve space for vectors
340     buffProcs.reserve(MAX_SHARING_PROCS);
341     localOwnedBuffs.reserve(MAX_SHARING_PROCS);
342     remoteOwnedBuffs.reserve(MAX_SHARING_PROCS);
343 
344     pcommID = add_pcomm(this);
345 
346     if (!myDebug)
347     {
348       myDebug = new DebugOutput("ParallelComm", std::cerr);
349       myDebug->set_rank( procConfig.proc_rank());
350     }
351   }
352 
add_pcomm(ParallelComm * pc)353   int ParallelComm::add_pcomm(ParallelComm *pc)
354   {
355     // Add this pcomm to instance tag
356     std::vector<ParallelComm *> pc_array(MAX_SHARING_PROCS,
357                                          (ParallelComm*)NULL);
358     Tag pc_tag = pcomm_tag(mbImpl, true);
359     assert(0 != pc_tag);
360 
361     const EntityHandle root = 0;
362     ErrorCode result = mbImpl->tag_get_data(pc_tag, &root, 1, (void*)&pc_array[0]);
363     if (MB_SUCCESS != result && MB_TAG_NOT_FOUND != result)
364       return -1;
365     int index = 0;
366     while (index < MAX_SHARING_PROCS && pc_array[index])
367       index++;
368     if (index == MAX_SHARING_PROCS) {
369       index = -1;
370       assert(false);
371     }
372     else {
373       pc_array[index] = pc;
374       mbImpl->tag_set_data(pc_tag, &root, 1, (void*)&pc_array[0]);
375     }
376     return index;
377   }
378 
remove_pcomm(ParallelComm * pc)379   void ParallelComm::remove_pcomm(ParallelComm *pc)
380   {
381     // Remove this pcomm from instance tag
382     std::vector<ParallelComm *> pc_array(MAX_SHARING_PROCS);
383     Tag pc_tag = pcomm_tag(mbImpl, true);
384 
385     const EntityHandle root = 0;
386     ErrorCode result = mbImpl->tag_get_data(pc_tag, &root, 1, (void*)&pc_array[0]);
387     std::vector<ParallelComm*>::iterator pc_it =
388       std::find(pc_array.begin(), pc_array.end(), pc);
389     assert(MB_SUCCESS == result &&
390            pc_it != pc_array.end());
391     // Empty if test to get around compiler warning about unused var
392     if (MB_SUCCESS == result) {}
393 
394     *pc_it = NULL;
395     mbImpl->tag_set_data(pc_tag, &root, 1, (void*)&pc_array[0]);
396   }
397 
398   //! Assign a global id space, for largest-dimension or all entities (and
399   //! in either case for vertices too)
assign_global_ids(EntityHandle this_set,const int dimension,const int start_id,const bool largest_dim_only,const bool parallel,const bool owned_only)400   ErrorCode ParallelComm::assign_global_ids(EntityHandle this_set,
401                                             const int dimension,
402                                             const int start_id,
403                                             const bool largest_dim_only,
404                                             const bool parallel,
405                                             const bool owned_only)
406   {
407     Range entities[4];
408     ErrorCode result;
409     std::vector<unsigned char> pstatus;
410     for (int dim = 0; dim <= dimension; dim++) {
411       if (dim == 0 || !largest_dim_only || dim == dimension) {
412         result = mbImpl->get_entities_by_dimension(this_set, dim, entities[dim]);MB_CHK_SET_ERR(result, "Failed to get vertices in assign_global_ids");
413       }
414 
415       // Need to filter out non-locally-owned entities!!!
416       pstatus.resize(entities[dim].size());
417       result = mbImpl->tag_get_data(pstatus_tag(), entities[dim], &pstatus[0]);MB_CHK_SET_ERR(result, "Failed to get pstatus in assign_global_ids");
418 
419       Range dum_range;
420       Range::iterator rit;
421       unsigned int i;
422       for (rit = entities[dim].begin(), i = 0; rit != entities[dim].end(); ++rit, i++)
423         if (pstatus[i] & PSTATUS_NOT_OWNED)
424           dum_range.insert(*rit);
425       entities[dim] = subtract(entities[dim], dum_range);
426     }
427 
428     return assign_global_ids(entities, dimension, start_id, parallel, owned_only);
429   }
430 
431   //! Assign a global id space, for largest-dimension or all entities (and
432   //! in either case for vertices too)
assign_global_ids(Range entities[],const int dimension,const int start_id,const bool parallel,const bool owned_only)433   ErrorCode ParallelComm::assign_global_ids(Range entities[],
434                                             const int dimension,
435                                             const int start_id,
436                                             const bool parallel,
437                                             const bool owned_only)
438   {
439     int local_num_elements[4];
440     ErrorCode result;
441     for (int dim = 0; dim <= dimension; dim++) {
442       local_num_elements[dim] = entities[dim].size();
443     }
444 
445     // Communicate numbers
446     std::vector<int> num_elements(procConfig.proc_size() * 4);
447 #ifdef MOAB_HAVE_MPI
448     if (procConfig.proc_size() > 1 && parallel) {
449       int retval = MPI_Allgather(local_num_elements, 4, MPI_INT,
450                                  &num_elements[0], 4,
451                                  MPI_INT, procConfig.proc_comm());
452       if (0 != retval)
453         return MB_FAILURE;
454     }
455     else
456 #endif
457       for (int dim = 0; dim < 4; dim++)
458         num_elements[dim] = local_num_elements[dim];
459 
460     // My entities start at one greater than total_elems[d]
461     int total_elems[4] = {start_id, start_id, start_id, start_id};
462 
463     for (unsigned int proc = 0; proc < procConfig.proc_rank(); proc++) {
464       for (int dim = 0; dim < 4; dim++)
465         total_elems[dim] += num_elements[4*proc + dim];
466     }
467 
468     // Assign global ids now
469     Tag gid_tag;
470     int zero = 0;
471     result = mbImpl->tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER,
472                                     gid_tag, MB_TAG_DENSE | MB_TAG_CREAT, &zero);
473     if (MB_SUCCESS != result) return result;
474 
475     for (int dim = 0; dim < 4; dim++) {
476       if (entities[dim].empty())
477         continue;
478       num_elements.resize(entities[dim].size());
479       int i = 0;
480       for (Range::iterator rit = entities[dim].begin(); rit != entities[dim].end(); ++rit)
481         num_elements[i++] = total_elems[dim]++;
482 
483       result = mbImpl->tag_set_data(gid_tag, entities[dim], &num_elements[0]);MB_CHK_SET_ERR(result, "Failed to set global id tag in assign_global_ids");
484     }
485 
486     if (owned_only)
487       return MB_SUCCESS;
488 
489     // Exchange tags
490     for (int dim = 1; dim < 4; dim++)
491       entities[0].merge(entities[dim]);
492 
493     return exchange_tags(gid_tag, entities[0]);
494   }
495 
get_buffers(int to_proc,bool * is_new)496   int ParallelComm::get_buffers(int to_proc, bool *is_new)
497   {
498     int ind = -1;
499     std::vector<unsigned int>::iterator vit =
500       std::find(buffProcs.begin(), buffProcs.end(), to_proc);
501     if (vit == buffProcs.end()) {
502       assert("shouldn't need buffer to myself" && to_proc != (int)procConfig.proc_rank());
503       ind = buffProcs.size();
504       buffProcs.push_back((unsigned int)to_proc);
505       localOwnedBuffs.push_back(new Buffer(INITIAL_BUFF_SIZE));
506       remoteOwnedBuffs.push_back(new Buffer(INITIAL_BUFF_SIZE));
507       if (is_new)
508         *is_new = true;
509     }
510     else {
511       ind = vit - buffProcs.begin();
512       if (is_new)
513         *is_new = false;
514     }
515     assert(ind < MAX_SHARING_PROCS);
516     return ind;
517   }
518 
broadcast_entities(const int from_proc,Range & entities,const bool adjacencies,const bool tags)519   ErrorCode ParallelComm::broadcast_entities(const int from_proc,
520                                              Range &entities,
521                                              const bool adjacencies,
522                                              const bool tags)
523   {
524 #ifndef MOAB_HAVE_MPI
525     return MB_FAILURE;
526 #else
527 
528     ErrorCode result = MB_SUCCESS;
529     int success;
530     int buff_size;
531 
532     Buffer buff(INITIAL_BUFF_SIZE);
533     buff.reset_ptr(sizeof(int));
534     if ((int)procConfig.proc_rank() == from_proc) {
535       result = add_verts(entities);MB_CHK_SET_ERR(result, "Failed to add adj vertices");
536 
537       buff.reset_ptr(sizeof(int));
538       result = pack_buffer(entities, adjacencies, tags,
539                            false, -1, &buff);MB_CHK_SET_ERR(result, "Failed to compute buffer size in broadcast_entities");
540       buff.set_stored_size();
541       buff_size = buff.buff_ptr - buff.mem_ptr;
542     }
543 
544     success = MPI_Bcast(&buff_size, 1, MPI_INT, from_proc, procConfig.proc_comm());
545     if (MPI_SUCCESS != success) {
546       MB_SET_ERR(MB_FAILURE, "MPI_Bcast of buffer size failed");
547     }
548 
549     if (!buff_size) // No data
550       return MB_SUCCESS;
551 
552     if ((int)procConfig.proc_rank() != from_proc)
553       buff.reserve(buff_size);
554 
555     size_t offset = 0;
556     while (buff_size) {
557       int sz = std::min(buff_size, MAX_BCAST_SIZE);
558       success = MPI_Bcast(buff.mem_ptr + offset, sz, MPI_UNSIGNED_CHAR, from_proc, procConfig.proc_comm());
559       if (MPI_SUCCESS != success) {
560         MB_SET_ERR(MB_FAILURE, "MPI_Bcast of buffer failed");
561       }
562 
563       offset += sz;
564       buff_size -= sz;
565     }
566 
567     if ((int)procConfig.proc_rank() != from_proc) {
568       std::vector<std::vector<EntityHandle> > dum1a, dum1b;
569       std::vector<std::vector<int> > dum1p;
570       std::vector<EntityHandle> dum2, dum4;
571       std::vector<unsigned int> dum3;
572       buff.reset_ptr(sizeof(int));
573       result = unpack_buffer(buff.buff_ptr, false, from_proc, -1,
574                              dum1a, dum1b, dum1p, dum2, dum2, dum3, dum4);MB_CHK_SET_ERR(result, "Failed to unpack buffer in broadcast_entities");
575       std::copy(dum4.begin(), dum4.end(), range_inserter(entities));
576     }
577 
578     return MB_SUCCESS;
579 #endif
580   }
581 
scatter_entities(const int from_proc,std::vector<Range> & entities,const bool adjacencies,const bool tags)582   ErrorCode ParallelComm::scatter_entities(const int from_proc,
583                                            std::vector<Range> &entities,
584                                            const bool adjacencies,
585                                            const bool tags)
586   {
587 #ifndef MOAB_HAVE_MPI
588     return MB_FAILURE;
589 #else
590     ErrorCode result = MB_SUCCESS;
591     int i, success, buff_size, prev_size;
592     int nProcs = (int)procConfig.proc_size();
593     int* sendCounts = new int[nProcs];
594     int* displacements = new int[nProcs];
595     sendCounts[0] = sizeof(int);
596     displacements[0] = 0;
597     Buffer buff(INITIAL_BUFF_SIZE);
598     buff.reset_ptr(sizeof(int));
599     buff.set_stored_size();
600     unsigned int my_proc = procConfig.proc_rank();
601 
602     // Get buffer size array for each remote processor
603     if (my_proc == (unsigned int) from_proc) {
604       for (i = 1; i < nProcs; i++) {
605         prev_size = buff.buff_ptr - buff.mem_ptr;
606         buff.reset_ptr(prev_size + sizeof(int));
607         result = add_verts(entities[i]);MB_CHK_SET_ERR(result, "Failed to add verts");
608 
609         result = pack_buffer(entities[i], adjacencies, tags,
610                              false, -1, &buff);
611         if (MB_SUCCESS != result) {
612           delete[] sendCounts;
613           delete[] displacements;
614           MB_SET_ERR(result, "Failed to pack buffer in scatter_entities");
615         }
616 
617         buff_size = buff.buff_ptr - buff.mem_ptr - prev_size;
618         *((int*)(buff.mem_ptr + prev_size)) = buff_size;
619         sendCounts[i] = buff_size;
620       }
621     }
622 
623     // Broadcast buffer size array
624     success = MPI_Bcast(sendCounts, nProcs, MPI_INT, from_proc, procConfig.proc_comm());
625     if (MPI_SUCCESS != success) {
626       delete[] sendCounts;
627       delete[] displacements;
628       MB_SET_ERR(MB_FAILURE, "MPI_Bcast of buffer size failed");
629     }
630 
631     for (i = 1; i < nProcs; i++) {
632       displacements[i] = displacements[i-1] + sendCounts[i-1];
633     }
634 
635     Buffer rec_buff;
636     rec_buff.reserve(sendCounts[my_proc]);
637 
638     // Scatter actual geometry
639     success = MPI_Scatterv(buff.mem_ptr, sendCounts, displacements,
640                            MPI_UNSIGNED_CHAR, rec_buff.mem_ptr, sendCounts[my_proc],
641                            MPI_UNSIGNED_CHAR, from_proc, procConfig.proc_comm());
642 
643     if (MPI_SUCCESS != success) {
644       delete[] sendCounts;
645       delete[] displacements;
646       MB_SET_ERR(MB_FAILURE, "MPI_Scatterv of buffer failed");
647     }
648 
649     // Unpack in remote processors
650     if (my_proc != (unsigned int) from_proc) {
651       std::vector<std::vector<EntityHandle> > dum1a, dum1b;
652       std::vector<std::vector<int> > dum1p;
653       std::vector<EntityHandle> dum2, dum4;
654       std::vector<unsigned int> dum3;
655       rec_buff.reset_ptr(sizeof(int));
656       result = unpack_buffer(rec_buff.buff_ptr, false, from_proc, -1,
657                              dum1a, dum1b, dum1p, dum2, dum2, dum3, dum4);
658       if (MB_SUCCESS != result) {
659         delete[] sendCounts;
660         delete[] displacements;
661         MB_SET_ERR(result, "Failed to unpack buffer in scatter_entities");
662       }
663 
664       std::copy(dum4.begin(), dum4.end(), range_inserter(entities[my_proc]));
665     }
666 
667     delete[] sendCounts;
668     delete[] displacements;
669 
670     return MB_SUCCESS;
671 #endif
672   }
673 
send_entities(const int to_proc,Range & orig_ents,const bool adjs,const bool tags,const bool store_remote_handles,const bool is_iface,Range &,int & incoming1,int & incoming2,TupleList & entprocs,std::vector<MPI_Request> & recv_remoteh_reqs,bool)674   ErrorCode ParallelComm::send_entities(const int to_proc,
675                                         Range &orig_ents,
676                                         const bool adjs,
677                                         const bool tags,
678                                         const bool store_remote_handles,
679                                         const bool is_iface,
680                                         Range &/*final_ents*/,
681                                         int &incoming1,
682                                         int &incoming2,
683                                         TupleList& entprocs,
684                                         std::vector<MPI_Request> &recv_remoteh_reqs,
685                                         bool /*wait_all*/)
686   {
687 #ifndef MOAB_HAVE_MPI
688     return MB_FAILURE;
689 #else
690     // Pack entities to local buffer
691     int ind = get_buffers(to_proc);
692     localOwnedBuffs[ind]->reset_ptr(sizeof(int));
693 
694     // Add vertices
695     ErrorCode result = add_verts(orig_ents);MB_CHK_SET_ERR(result, "Failed to add verts in send_entities");
696 
697     // Filter out entities already shared with destination
698     Range tmp_range;
699     result = filter_pstatus(orig_ents, PSTATUS_SHARED, PSTATUS_AND,
700                             to_proc, &tmp_range);MB_CHK_SET_ERR(result, "Failed to filter on owner");
701     if (!tmp_range.empty()) {
702       orig_ents = subtract(orig_ents, tmp_range);
703     }
704 
705     result = pack_buffer(orig_ents, adjs, tags, store_remote_handles,
706                          to_proc, localOwnedBuffs[ind], &entprocs);MB_CHK_SET_ERR(result, "Failed to pack buffer in send_entities");
707 
708     // Send buffer
709     result = send_buffer(to_proc, localOwnedBuffs[ind], MB_MESG_ENTS_SIZE,
710                          sendReqs[2*ind], recvReqs[2*ind + 1],
711                          (int*)(remoteOwnedBuffs[ind]->mem_ptr),
712                          //&ackbuff,
713                          incoming1,
714                          MB_MESG_REMOTEH_SIZE,
715                          (!is_iface && store_remote_handles ?
716                           localOwnedBuffs[ind] : NULL),
717                          &recv_remoteh_reqs[2*ind], &incoming2);MB_CHK_SET_ERR(result, "Failed to send buffer");
718 
719     return MB_SUCCESS;
720 #endif
721   }
722 
send_entities(std::vector<unsigned int> & send_procs,std::vector<Range * > & send_ents,int & incoming1,int & incoming2,const bool store_remote_handles)723 ErrorCode ParallelComm::send_entities(std::vector<unsigned int>& send_procs,
724                                       std::vector<Range*>& send_ents,
725                                       int& incoming1, int& incoming2,
726                                       const bool store_remote_handles)
727 {
728 #ifdef MOAB_HAVE_MPE
729   if (myDebug->get_verbosity() == 2) {
730     MPE_Log_event(OWNED_START, procConfig.proc_rank(), "Starting send_entities.");
731   }
732 #endif
733   myDebug->tprintf(1, "Entering send_entities\n");
734   if (myDebug->get_verbosity() == 4) {
735     msgs.clear();
736     msgs.reserve(MAX_SHARING_PROCS);
737   }
738 
739   unsigned int i;
740   int ind;
741   ErrorCode result = MB_SUCCESS;
742 
743   // Set buffProcs with communicating procs
744   unsigned int n_proc = send_procs.size();
745   for (i = 0; i < n_proc; i++) {
746     ind = get_buffers(send_procs[i]);
747     result = add_verts(*send_ents[i]);MB_CHK_SET_ERR(result, "Failed to add verts");
748 
749     // Filter out entities already shared with destination
750     Range tmp_range;
751     result = filter_pstatus(*send_ents[i], PSTATUS_SHARED, PSTATUS_AND,
752                             buffProcs[ind], &tmp_range);MB_CHK_SET_ERR(result, "Failed to filter on owner");
753     if (!tmp_range.empty()) {
754       *send_ents[i] = subtract(*send_ents[i], tmp_range);
755     }
756   }
757 
758   //===========================================
759   // Get entities to be sent to neighbors
760   // Need to get procs each entity is sent to
761   //===========================================
762   Range allsent, tmp_range;
763   int npairs = 0;
764   TupleList entprocs;
765   for (i = 0; i < n_proc; i++) {
766     int n_ents = send_ents[i]->size();
767     if (n_ents > 0) {
768       npairs += n_ents; // Get the total # of proc/handle pairs
769       allsent.merge(*send_ents[i]);
770     }
771   }
772 
773   // Allocate a TupleList of that size
774   entprocs.initialize(1, 0, 1, 0, npairs);
775   entprocs.enableWriteAccess();
776 
777   // Put the proc/handle pairs in the list
778   for (i = 0; i < n_proc; i++) {
779     for (Range::iterator rit = send_ents[i]->begin(); rit != send_ents[i]->end(); ++rit) {
780       entprocs.vi_wr[entprocs.get_n()] = send_procs[i];
781       entprocs.vul_wr[entprocs.get_n()] = *rit;
782       entprocs.inc_n();
783     }
784   }
785 
786   // Sort by handle
787   moab::TupleList::buffer sort_buffer;
788   sort_buffer.buffer_init(npairs);
789   entprocs.sort(1, &sort_buffer);
790   entprocs.disableWriteAccess();
791   sort_buffer.reset();
792 
793   myDebug->tprintf(1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
794                   (unsigned long)allsent.size());
795 
796   //===========================================
797   // Pack and send ents from this proc to others
798   //===========================================
799   for (i = 0; i < n_proc; i++) {
800     if (send_ents[i]->size() > 0) {
801       ind = get_buffers(send_procs[i]);
802       myDebug->tprintf(1, "Sent ents compactness (size) = %f (%lu)\n", send_ents[i]->compactness(),
803                        (unsigned long)send_ents[i]->size());
804       // Reserve space on front for size and for initial buff size
805       localOwnedBuffs[ind]->reset_buffer(sizeof(int));
806       result = pack_buffer(*send_ents[i], false, true,
807                            store_remote_handles, buffProcs[ind],
808                            localOwnedBuffs[ind], &entprocs, &allsent);
809 
810       if (myDebug->get_verbosity() == 4) {
811         msgs.resize(msgs.size() + 1);
812         msgs.back() = new Buffer(*localOwnedBuffs[ind]);
813       }
814 
815       // Send the buffer (size stored in front in send_buffer)
816       result = send_buffer(send_procs[i], localOwnedBuffs[ind],
817                            MB_MESG_ENTS_SIZE, sendReqs[2*ind],
818                            recvReqs[2*ind + 1],
819                            &ackbuff,
820                            incoming1,
821                            MB_MESG_REMOTEH_SIZE,
822                            (store_remote_handles ?
823                             localOwnedBuffs[ind] : NULL),
824                            &recvRemotehReqs[2*ind], &incoming2);MB_CHK_SET_ERR(result, "Failed to Isend in ghost send");
825     }
826   }
827   entprocs.reset();
828 
829 #ifdef MOAB_HAVE_MPE
830   if (myDebug->get_verbosity() == 2) {
831     MPE_Log_event(ENTITIES_END, procConfig.proc_rank(), "Ending send_entities.");
832   }
833 #endif
834 
835   return MB_SUCCESS;
836 }
837 
838 /////////////////////////////////////////////////////////////////////////////////
839 // Send and Receive routines for a sequence of entities: use case UMR
840 /////////////////////////////////////////////////////////////////////////////////
print_buff(unsigned char * ch,int size)841 void print_buff(unsigned char * ch, int size)
842 {
843   for (int i=0; i<size; i++)
844     std::cout<<ch[i];
845   std::cout<<"\n";
846 }
send_recv_entities(std::vector<int> & send_procs,std::vector<std::vector<int>> & msgsizes,std::vector<std::vector<EntityHandle>> & senddata,std::vector<std::vector<EntityHandle>> & recvdata)847 ErrorCode ParallelComm::send_recv_entities(std::vector<int> &send_procs, std::vector<std::vector<int> > &msgsizes, std::vector<std::vector<EntityHandle> > &senddata, std::vector<std::vector<EntityHandle> > &recvdata)
848 {
849 #ifdef USE_MPE
850   if (myDebug->get_verbosity() == 2) {
851     MPE_Log_event(OWNED_START, procConfig.proc_rank(), "Starting send_recv_entities.");
852   }
853 #endif
854   myDebug->tprintf(1, "Entering send_recv_entities\n");
855   if (myDebug->get_verbosity() == 4) {
856     msgs.clear();
857     msgs.reserve(MAX_SHARING_PROCS);
858   }
859 
860   //unsigned int i;
861   int i, ind, success;
862   ErrorCode error = MB_SUCCESS;
863 
864   //===========================================
865   // Pack and send ents from this proc to others
866   //===========================================
867 
868  // std::cout<<"resetting all buffers"<<std::endl;
869 
870   reset_all_buffers();
871   sendReqs.resize(3*buffProcs.size(), MPI_REQUEST_NULL);
872   std::vector<MPI_Request> recv_ent_reqs(3*buffProcs.size(), MPI_REQUEST_NULL);
873   int ack_buff;
874   int incoming = 0;
875 
876   std::vector<unsigned int>::iterator sit;
877 
878   for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++) {
879     incoming++;
880     PRINT_DEBUG_IRECV(*sit, procConfig.proc_rank(), remoteOwnedBuffs[ind]->mem_ptr,
881                       INITIAL_BUFF_SIZE, MB_MESG_ENTS_SIZE, incoming);
882 
883     success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
884                         MPI_UNSIGNED_CHAR, *sit,
885                         MB_MESG_ENTS_SIZE, procConfig.proc_comm(),
886                         &recv_ent_reqs[3*ind]);
887     if (success != MPI_SUCCESS) {
888       MB_SET_ERR(MB_FAILURE, "Failed to post irecv in send_recv_entities");
889     }
890   }
891 
892 
893 
894 //  std::set<unsigned int>::iterator it;
895   for ( i=0; i< (int) send_procs.size(); i++)
896     {
897       //Get index of the shared processor in the local buffer
898       ind = get_buffers(send_procs[i]);
899       localOwnedBuffs[ind]->reset_buffer(sizeof(int));
900 
901       int buff_size = msgsizes[i].size()*sizeof(int) + senddata[i].size()*sizeof(EntityHandle);
902       localOwnedBuffs[ind]->check_space(buff_size);
903 
904       //Pack entities
905       std::vector<int> msg;
906       msg.insert(msg.end(), msgsizes[i].begin(), msgsizes[i].end());
907       PACK_INTS(localOwnedBuffs[ind]->buff_ptr, &msg[0], msg.size());
908 
909       std::vector<EntityHandle> entities;
910       entities.insert(entities.end(), senddata[i].begin(), senddata[i].end());
911       PACK_EH(localOwnedBuffs[ind]->buff_ptr, &entities[0], entities.size());
912       localOwnedBuffs[ind]->set_stored_size();
913 
914       if (myDebug->get_verbosity() == 4) {
915           msgs.resize(msgs.size() + 1);
916           msgs.back() = new Buffer(*localOwnedBuffs[ind]);
917         }
918 
919       // Send the buffer (size stored in front in send_buffer)
920       error = send_buffer(send_procs[i], localOwnedBuffs[ind],
921                            MB_MESG_ENTS_SIZE, sendReqs[3*ind],
922           recv_ent_reqs[3*ind+2],
923           &ack_buff,
924           incoming);MB_CHK_SET_ERR(error, "Failed to Isend in send_recv_entities");
925     }
926 
927 
928   //===========================================
929   // Receive and unpack ents from received data
930   //===========================================
931 
932   while (incoming) {
933 
934     MPI_Status status;
935     int index_in_recv_requests;
936 
937     PRINT_DEBUG_WAITANY(recv_ent_reqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank());
938     success = MPI_Waitany(3*buffProcs.size(), &recv_ent_reqs[0], &index_in_recv_requests, &status);
939     if (MPI_SUCCESS != success) {
940       MB_SET_ERR(MB_FAILURE, "Failed in waitany in send_recv_entities");
941     }
942 
943     // Processor index in the list is divided by 3
944     ind = index_in_recv_requests / 3;
945 
946     PRINT_DEBUG_RECD(status);
947 
948     // OK, received something; decrement incoming counter
949     incoming--;
950 
951     bool done = false;
952 
953     error = recv_buffer(MB_MESG_ENTS_SIZE,
954                          status,
955                          remoteOwnedBuffs[ind],
956                          recv_ent_reqs[3*ind + 1], // This is for receiving the second message
957                          recv_ent_reqs[3*ind + 2], // This would be for ack, but it is not used; consider removing it
958                          incoming,
959                          localOwnedBuffs[ind],
960                          sendReqs[3*ind + 1], // Send request for sending the second message
961                          sendReqs[3*ind + 2], // This is for sending the ack
962                          done);MB_CHK_SET_ERR(error, "Failed to resize recv buffer");
963 
964     if (done) {
965       remoteOwnedBuffs[ind]->reset_ptr(sizeof(int));
966 
967       int from_proc = status.MPI_SOURCE;
968       int idx = std::find(send_procs.begin(), send_procs.end(), from_proc) - send_procs.begin();
969 
970       int msg = msgsizes[idx].size(); std::vector<int> recvmsg(msg);
971       int ndata = senddata[idx].size(); std::vector<EntityHandle> dum_vec(ndata);
972 
973       UNPACK_INTS(remoteOwnedBuffs[ind]->buff_ptr, &recvmsg[0], msg);
974       UNPACK_EH(remoteOwnedBuffs[ind]->buff_ptr, &dum_vec[0], ndata);
975 
976       recvdata[idx].insert(recvdata[idx].end(), dum_vec.begin(), dum_vec.end());
977     }
978   }
979 
980 #ifdef USE_MPE
981   if (myDebug->get_verbosity() == 2) {
982       MPE_Log_event(ENTITIES_END, procConfig.proc_rank(), "Ending send_recv_entities.");
983     }
984 #endif
985 
986 
987   return MB_SUCCESS;
988 }
989 
update_remote_data(EntityHandle entity,std::vector<int> & procs,std::vector<EntityHandle> & handles)990 ErrorCode ParallelComm::update_remote_data(EntityHandle entity, std::vector<int> &procs, std::vector<EntityHandle> &handles)
991 {
992   ErrorCode error;
993   unsigned char pstatus = PSTATUS_INTERFACE;
994 
995   int procmin = *std::min_element(procs.begin(), procs.end());
996 
997   if ((int)rank() > procmin)
998     pstatus |= PSTATUS_NOT_OWNED;
999   else
1000     procmin = rank();
1001 
1002 
1003   //DBG
1004  // std::cout<<"entity = "<<entity<<std::endl;
1005  // for (int j=0; j<procs.size(); j++)
1006   // std::cout<<"procs["<<j<<"] = "<<procs[j]<<", handles["<<j<<"] = "<<handles[j]<<std::endl;
1007   //DBG
1008 
1009 
1010   if ((int)procs.size() > 1)
1011     {
1012       procs.push_back(rank());
1013       handles.push_back(entity);
1014 
1015       int idx = std::find(procs.begin(), procs.end(), procmin) - procs.begin();
1016 
1017       std::iter_swap(procs.begin(), procs.begin()+idx);
1018       std::iter_swap(handles.begin(), handles.begin()+idx);
1019 
1020 
1021       //DBG
1022     //  std::cout<<"entity = "<<entity<<std::endl;
1023      // for (int j=0; j<procs.size(); j++)
1024       // std::cout<<"procs["<<j<<"] = "<<procs[j]<<", handles["<<j<<"] = "<<handles[j]<<std::endl;
1025       //DBG
1026 
1027 
1028     }
1029 
1030  // if ((entity == 10388) && (rank()==1))
1031 //    std::cout<<"Here"<<std::endl;
1032 
1033   error = update_remote_data(entity, &procs[0], &handles[0], procs.size(), pstatus);MB_CHK_ERR(error);
1034 
1035   return MB_SUCCESS;
1036 }
1037 
get_remote_handles(EntityHandle * local_vec,EntityHandle * rem_vec,int num_ents,int to_proc)1038 ErrorCode ParallelComm::get_remote_handles(EntityHandle *local_vec, EntityHandle *rem_vec, int num_ents, int to_proc)
1039 {
1040   ErrorCode error;
1041  std::vector<EntityHandle> newents;
1042   error = get_remote_handles(true, local_vec, rem_vec, num_ents, to_proc, newents);MB_CHK_ERR(error);
1043 
1044   return MB_SUCCESS;
1045 }
1046 
1047 
1048 
1049 //////////////////////////////////////////////////////////////////
1050 
recv_entities(const int from_proc,const bool store_remote_handles,const bool is_iface,Range & final_ents,int & incoming1,int & incoming2,std::vector<std::vector<EntityHandle>> & L1hloc,std::vector<std::vector<EntityHandle>> & L1hrem,std::vector<std::vector<int>> & L1p,std::vector<EntityHandle> & L2hloc,std::vector<EntityHandle> & L2hrem,std::vector<unsigned int> & L2p,std::vector<MPI_Request> & recv_remoteh_reqs,bool)1051   ErrorCode ParallelComm::recv_entities(const int from_proc,
1052                                         const bool store_remote_handles,
1053                                         const bool is_iface,
1054                                         Range &final_ents,
1055                                         int& incoming1,
1056                                         int& incoming2,
1057                                         std::vector<std::vector<EntityHandle> > &L1hloc,
1058                                         std::vector<std::vector<EntityHandle> > &L1hrem,
1059                                         std::vector<std::vector<int> > &L1p,
1060                                         std::vector<EntityHandle> &L2hloc,
1061                                         std::vector<EntityHandle> &L2hrem,
1062                                         std::vector<unsigned int> &L2p,
1063                                         std::vector<MPI_Request> &recv_remoteh_reqs,
1064                                         bool /*wait_all*/)
1065   {
1066 #ifndef MOAB_HAVE_MPI
1067     return MB_FAILURE;
1068 #else
1069     // Non-blocking receive for the first message (having size info)
1070     int ind1 = get_buffers(from_proc);
1071     incoming1++;
1072     PRINT_DEBUG_IRECV(procConfig.proc_rank(), from_proc,
1073                       remoteOwnedBuffs[ind1]->mem_ptr, INITIAL_BUFF_SIZE,
1074                       MB_MESG_ENTS_SIZE, incoming1);
1075     int success = MPI_Irecv(remoteOwnedBuffs[ind1]->mem_ptr, INITIAL_BUFF_SIZE,
1076                             MPI_UNSIGNED_CHAR, from_proc,
1077                             MB_MESG_ENTS_SIZE, procConfig.proc_comm(),
1078                             &recvReqs[2*ind1]);
1079     if (success != MPI_SUCCESS) {
1080       MB_SET_ERR(MB_FAILURE, "Failed to post irecv in ghost exchange");
1081     }
1082 
1083     // Receive messages in while loop
1084     return recv_messages(from_proc, store_remote_handles, is_iface, final_ents,
1085                          incoming1, incoming2, L1hloc, L1hrem, L1p, L2hloc,
1086                          L2hrem, L2p, recv_remoteh_reqs);
1087 #endif
1088   }
1089 
recv_entities(std::set<unsigned int> & recv_procs,int incoming1,int incoming2,const bool store_remote_handles,const bool migrate)1090   ErrorCode ParallelComm::recv_entities(std::set<unsigned int>& recv_procs,
1091                                         int incoming1, int incoming2,
1092                                         const bool store_remote_handles,
1093                                         const bool migrate)
1094   {
1095     //===========================================
1096     // Receive/unpack new entities
1097     //===========================================
1098     // Number of incoming messages is the number of procs we communicate with
1099     int success, ind, i;
1100     ErrorCode result;
1101     MPI_Status status;
1102     std::vector<std::vector<EntityHandle> > recd_ents(buffProcs.size());
1103     std::vector<std::vector<EntityHandle> > L1hloc(buffProcs.size()), L1hrem(buffProcs.size());
1104     std::vector<std::vector<int> > L1p(buffProcs.size());
1105     std::vector<EntityHandle> L2hloc, L2hrem;
1106     std::vector<unsigned int> L2p;
1107     std::vector<EntityHandle> new_ents;
1108 
1109     while (incoming1) {
1110       // Wait for all recvs of ents before proceeding to sending remote handles,
1111       // b/c some procs may have sent to a 3rd proc ents owned by me;
1112       PRINT_DEBUG_WAITANY(recvReqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank());
1113 
1114       success = MPI_Waitany(2*buffProcs.size(), &recvReqs[0], &ind, &status);
1115       if (MPI_SUCCESS != success) {
1116         MB_SET_ERR(MB_FAILURE, "Failed in waitany in owned entity exchange");
1117       }
1118 
1119       PRINT_DEBUG_RECD(status);
1120 
1121       // OK, received something; decrement incoming counter
1122       incoming1--;
1123       bool done = false;
1124 
1125       // In case ind is for ack, we need index of one before it
1126       unsigned int base_ind = 2*(ind/2);
1127       result = recv_buffer(MB_MESG_ENTS_SIZE,
1128                            status,
1129                            remoteOwnedBuffs[ind/2],
1130                            recvReqs[ind], recvReqs[ind + 1],
1131                            incoming1,
1132                            localOwnedBuffs[ind/2], sendReqs[base_ind], sendReqs[base_ind + 1],
1133                            done,
1134                            (store_remote_handles ?
1135                             localOwnedBuffs[ind/2] : NULL),
1136                            MB_MESG_REMOTEH_SIZE,
1137                            &recvRemotehReqs[base_ind], &incoming2);MB_CHK_SET_ERR(result, "Failed to receive buffer");
1138 
1139       if (done) {
1140         if (myDebug->get_verbosity() == 4) {
1141           msgs.resize(msgs.size() + 1);
1142           msgs.back() = new Buffer(*remoteOwnedBuffs[ind/2]);
1143         }
1144 
1145         // Message completely received - process buffer that was sent
1146         remoteOwnedBuffs[ind/2]->reset_ptr(sizeof(int));
1147         result = unpack_buffer(remoteOwnedBuffs[ind/2]->buff_ptr,
1148                                store_remote_handles, buffProcs[ind/2], ind/2,
1149                                L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p,
1150                                new_ents, true);
1151         if (MB_SUCCESS != result) {
1152           std::cout << "Failed to unpack entities. Buffer contents:" << std::endl;
1153           print_buffer(remoteOwnedBuffs[ind/2]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind/2], false);
1154           return result;
1155         }
1156 
1157         if (recvReqs.size() != 2*buffProcs.size()) {
1158           // Post irecv's for remote handles from new proc
1159           recvRemotehReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
1160           for (i = recvReqs.size(); i < (int)(2*buffProcs.size()); i += 2) {
1161             localOwnedBuffs[i/2]->reset_buffer();
1162             incoming2++;
1163             PRINT_DEBUG_IRECV(procConfig.proc_rank(), buffProcs[i/2],
1164                               localOwnedBuffs[i/2]->mem_ptr, INITIAL_BUFF_SIZE,
1165                               MB_MESG_REMOTEH_SIZE, incoming2);
1166             success = MPI_Irecv(localOwnedBuffs[i/2]->mem_ptr, INITIAL_BUFF_SIZE,
1167                                 MPI_UNSIGNED_CHAR, buffProcs[i/2],
1168                                 MB_MESG_REMOTEH_SIZE, procConfig.proc_comm(),
1169                                 &recvRemotehReqs[i]);
1170             if (success != MPI_SUCCESS) {
1171               MB_SET_ERR(MB_FAILURE, "Failed to post irecv for remote handles in ghost exchange");
1172             }
1173           }
1174           recvReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
1175           sendReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
1176         }
1177       }
1178     }
1179 
1180     // Assign and remove newly created elements from/to receive processor
1181     result = assign_entities_part(new_ents, procConfig.proc_rank());MB_CHK_SET_ERR(result, "Failed to assign entities to part");
1182     if (migrate) {
1183       //result = remove_entities_part(allsent, procConfig.proc_rank());MB_CHK_SET_ERR(ressult, "Failed to remove entities to part");
1184     }
1185 
1186     // Add requests for any new addl procs
1187     if (recvReqs.size() != 2*buffProcs.size()) {
1188       // Shouldn't get here...
1189       MB_SET_ERR(MB_FAILURE, "Requests length doesn't match proc count in entity exchange");
1190     }
1191 
1192   #ifdef MOAB_HAVE_MPE
1193     if (myDebug->get_verbosity() == 2) {
1194       MPE_Log_event(ENTITIES_END, procConfig.proc_rank(), "Ending recv entities.");
1195     }
1196   #endif
1197 
1198     //===========================================
1199     // Send local handles for new entity to owner
1200     //===========================================
1201     std::set<unsigned int>::iterator it = recv_procs.begin();
1202     std::set<unsigned int>::iterator eit = recv_procs.end();
1203     for (; it != eit; ++it) {
1204       ind = get_buffers(*it);
1205       // Reserve space on front for size and for initial buff size
1206       remoteOwnedBuffs[ind]->reset_buffer(sizeof(int));
1207 
1208       result = pack_remote_handles(L1hloc[ind], L1hrem[ind], L1p[ind],
1209                                    buffProcs[ind], remoteOwnedBuffs[ind]);MB_CHK_SET_ERR(result, "Failed to pack remote handles");
1210       remoteOwnedBuffs[ind]->set_stored_size();
1211 
1212       if (myDebug->get_verbosity() == 4) {
1213         msgs.resize(msgs.size() + 1);
1214         msgs.back() = new Buffer(*remoteOwnedBuffs[ind]);
1215       }
1216       result = send_buffer(buffProcs[ind], remoteOwnedBuffs[ind],
1217                            MB_MESG_REMOTEH_SIZE,
1218                            sendReqs[2*ind], recvRemotehReqs[2*ind + 1],
1219                            &ackbuff,
1220                            incoming2);MB_CHK_SET_ERR(result, "Failed to send remote handles");
1221     }
1222 
1223     //===========================================
1224     // Process remote handles of my ghosteds
1225     //===========================================
1226     while (incoming2) {
1227       PRINT_DEBUG_WAITANY(recvRemotehReqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank());
1228       success = MPI_Waitany(2*buffProcs.size(), &recvRemotehReqs[0], &ind, &status);
1229       if (MPI_SUCCESS != success) {
1230         MB_SET_ERR(MB_FAILURE, "Failed in waitany in owned entity exchange");
1231       }
1232 
1233       // OK, received something; decrement incoming counter
1234       incoming2--;
1235 
1236       PRINT_DEBUG_RECD(status);
1237       bool done = false;
1238       unsigned int base_ind = 2*(ind/2);
1239       result = recv_buffer(MB_MESG_REMOTEH_SIZE, status,
1240                            localOwnedBuffs[ind/2],
1241                            recvRemotehReqs[ind], recvRemotehReqs[ind + 1], incoming2,
1242                            remoteOwnedBuffs[ind/2],
1243                            sendReqs[base_ind], sendReqs[base_ind + 1],
1244                            done);MB_CHK_SET_ERR(result, "Failed to receive remote handles");
1245       if (done) {
1246         // Incoming remote handles
1247         if (myDebug->get_verbosity() == 4) {
1248           msgs.resize(msgs.size() + 1);
1249           msgs.back() = new Buffer(*localOwnedBuffs[ind]);
1250         }
1251 
1252         localOwnedBuffs[ind/2]->reset_ptr(sizeof(int));
1253         result = unpack_remote_handles(buffProcs[ind/2],
1254                                        localOwnedBuffs[ind/2]->buff_ptr,
1255                                        L2hloc, L2hrem, L2p);MB_CHK_SET_ERR(result, "Failed to unpack remote handles");
1256       }
1257     }
1258 
1259   #ifdef MOAB_HAVE_MPE
1260     if (myDebug->get_verbosity() == 2) {
1261       MPE_Log_event(RHANDLES_END, procConfig.proc_rank(), "Ending remote handles.");
1262       MPE_Log_event(OWNED_END, procConfig.proc_rank(),
1263                     "Ending recv entities (still doing checks).");
1264     }
1265   #endif
1266     myDebug->tprintf(1, "Exiting recv_entities.\n");
1267 
1268     return MB_SUCCESS;
1269   }
1270 
recv_messages(const int from_proc,const bool store_remote_handles,const bool is_iface,Range & final_ents,int & incoming1,int & incoming2,std::vector<std::vector<EntityHandle>> & L1hloc,std::vector<std::vector<EntityHandle>> & L1hrem,std::vector<std::vector<int>> & L1p,std::vector<EntityHandle> & L2hloc,std::vector<EntityHandle> & L2hrem,std::vector<unsigned int> & L2p,std::vector<MPI_Request> & recv_remoteh_reqs)1271   ErrorCode ParallelComm::recv_messages(const int from_proc,
1272                                         const bool store_remote_handles,
1273                                         const bool is_iface,
1274                                         Range &final_ents,
1275                                         int& incoming1,
1276                                         int& incoming2,
1277                                         std::vector<std::vector<EntityHandle> > &L1hloc,
1278                                         std::vector<std::vector<EntityHandle> > &L1hrem,
1279                                         std::vector<std::vector<int> > &L1p,
1280                                         std::vector<EntityHandle> &L2hloc,
1281                                         std::vector<EntityHandle> &L2hrem,
1282                                         std::vector<unsigned int> &L2p,
1283                                         std::vector<MPI_Request> &recv_remoteh_reqs)
1284   {
1285 #ifndef MOAB_HAVE_MPI
1286     return MB_FAILURE;
1287 #else
1288     MPI_Status status;
1289     ErrorCode result;
1290     int ind1 = get_buffers(from_proc);
1291     int success, ind2;
1292     std::vector<EntityHandle> new_ents;
1293 
1294     // Wait and receive messages
1295     while (incoming1) {
1296       PRINT_DEBUG_WAITANY(recvReqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank());
1297       success = MPI_Waitany(2, &recvReqs[2*ind1], &ind2, &status);
1298       if (MPI_SUCCESS != success) {
1299         MB_SET_ERR(MB_FAILURE, "Failed in waitany in recv_messages");
1300       }
1301 
1302       PRINT_DEBUG_RECD(status);
1303 
1304       // OK, received something; decrement incoming counter
1305       incoming1--;
1306       bool done = false;
1307 
1308       // In case ind is for ack, we need index of one before it
1309       ind2 += 2*ind1;
1310       unsigned int base_ind = 2*(ind2/2);
1311 
1312       result = recv_buffer(MB_MESG_ENTS_SIZE, status,
1313                            remoteOwnedBuffs[ind2/2],
1314                            //recvbuff,
1315                            recvReqs[ind2], recvReqs[ind2 + 1],
1316                            incoming1, localOwnedBuffs[ind2/2],
1317                            sendReqs[base_ind], sendReqs[base_ind + 1],
1318                            done,
1319                            (!is_iface && store_remote_handles ?
1320                             localOwnedBuffs[ind2/2] : NULL),
1321                            MB_MESG_REMOTEH_SIZE,
1322                            &recv_remoteh_reqs[base_ind], &incoming2);MB_CHK_SET_ERR(result, "Failed to receive buffer");
1323 
1324       if (done) {
1325         // If it is done, unpack buffer
1326         remoteOwnedBuffs[ind2/2]->reset_ptr(sizeof(int));
1327         result = unpack_buffer(remoteOwnedBuffs[ind2/2]->buff_ptr,
1328                                store_remote_handles, from_proc, ind2/2,
1329                                L1hloc, L1hrem, L1p, L2hloc, L2hrem,
1330                                L2p, new_ents);MB_CHK_SET_ERR(result, "Failed to unpack buffer in recev_messages");
1331 
1332         std::copy(new_ents.begin(), new_ents.end(), range_inserter(final_ents));
1333 
1334         // Send local handles for new elements to owner
1335         // Reserve space on front for size and for initial buff size
1336         remoteOwnedBuffs[ind2/2]->reset_buffer(sizeof(int));
1337 
1338         result = pack_remote_handles(L1hloc[ind2/2], L1hrem[ind2/2], L1p[ind2/2],
1339                                      from_proc, remoteOwnedBuffs[ind2/2]);MB_CHK_SET_ERR(result, "Failed to pack remote handles");
1340         remoteOwnedBuffs[ind2/2]->set_stored_size();
1341 
1342         result = send_buffer(buffProcs[ind2/2], remoteOwnedBuffs[ind2/2],
1343                              MB_MESG_REMOTEH_SIZE,
1344                              sendReqs[ind2], recv_remoteh_reqs[ind2 + 1],
1345                              (int*)(localOwnedBuffs[ind2/2]->mem_ptr),
1346                              //&ackbuff,
1347                              incoming2);MB_CHK_SET_ERR(result, "Failed to send remote handles");
1348       }
1349     }
1350 
1351     return MB_SUCCESS;
1352 #endif
1353   }
1354 
recv_remote_handle_messages(const int from_proc,int & incoming2,std::vector<EntityHandle> & L2hloc,std::vector<EntityHandle> & L2hrem,std::vector<unsigned int> & L2p,std::vector<MPI_Request> & recv_remoteh_reqs)1355   ErrorCode ParallelComm::recv_remote_handle_messages(const int from_proc,
1356                                                       int& incoming2,
1357                                                       std::vector<EntityHandle> &L2hloc,
1358                                                       std::vector<EntityHandle> &L2hrem,
1359                                                       std::vector<unsigned int> &L2p,
1360                                                       std::vector<MPI_Request> &recv_remoteh_reqs)
1361   {
1362 #ifndef MOAB_HAVE_MPI
1363     return MB_FAILURE;
1364 #else
1365     MPI_Status status;
1366     ErrorCode result;
1367     int ind1 = get_buffers(from_proc);
1368     int success, ind2;
1369 
1370     while (incoming2) {
1371       PRINT_DEBUG_WAITANY(recv_remoteh_reqs, MB_MESG_REMOTEH_SIZE,
1372                           procConfig.proc_rank());
1373       success = MPI_Waitany(2, &recv_remoteh_reqs[2*ind1],
1374                             &ind2, &status);
1375       if (MPI_SUCCESS != success) {
1376         MB_SET_ERR(MB_FAILURE, "Failed in waitany in recv_remote_handle_messages");
1377       }
1378 
1379       // OK, received something; decrement incoming counter
1380       incoming2--;
1381 
1382       PRINT_DEBUG_RECD(status);
1383 
1384       bool done = false;
1385       ind2 += 2*ind1;
1386       unsigned int base_ind = 2*(ind2/2);
1387       result = recv_buffer(MB_MESG_REMOTEH_SIZE, status,
1388                            localOwnedBuffs[ind2/2],
1389                            recv_remoteh_reqs[ind2], recv_remoteh_reqs[ind2 + 1], incoming2,
1390                            remoteOwnedBuffs[ind2/2],
1391                            sendReqs[base_ind], sendReqs[base_ind + 1],
1392                            done);MB_CHK_SET_ERR(result, "Failed to receive remote handles");
1393       if (done) {
1394         // Incoming remote handles
1395         localOwnedBuffs[ind2/2]->reset_ptr(sizeof(int));
1396         result = unpack_remote_handles(buffProcs[ind2/2],
1397                                        localOwnedBuffs[ind2/2]->buff_ptr,
1398                                        L2hloc, L2hrem, L2p);MB_CHK_SET_ERR(result, "Failed to unpack remote handles");
1399       }
1400     }
1401 
1402     return MB_SUCCESS;
1403 #endif
1404   }
1405 
pack_buffer(Range & orig_ents,const bool,const bool tags,const bool store_remote_handles,const int to_proc,Buffer * buff,TupleList * entprocs,Range * allsent)1406   ErrorCode ParallelComm::pack_buffer(Range &orig_ents,
1407                                       const bool /*adjacencies*/,
1408                                       const bool tags,
1409                                       const bool store_remote_handles,
1410                                       const int to_proc,
1411                                       Buffer *buff,
1412                                       TupleList *entprocs,
1413                                       Range *allsent)
1414   {
1415     // Pack the buffer with the entity ranges, adjacencies, and tags sections
1416     //
1417     // Note: new entities used in subsequent connectivity lists, sets, or tags,
1418     // are referred to as (MBMAXTYPE + index), where index is into vector
1419     // of new entities, 0-based
1420     ErrorCode result;
1421 
1422     Range set_range;
1423     std::vector<Tag> all_tags;
1424     std::vector<Range> tag_ranges;
1425 
1426     Range::const_iterator rit;
1427 
1428     // Entities
1429     result = pack_entities(orig_ents, buff,
1430                            store_remote_handles, to_proc, false,
1431                            entprocs, allsent);MB_CHK_SET_ERR(result, "Packing entities failed");
1432 
1433     // Sets
1434     result = pack_sets(orig_ents, buff,
1435                        store_remote_handles, to_proc);MB_CHK_SET_ERR(result, "Packing sets (count) failed");
1436 
1437     // Tags
1438     Range final_ents;
1439     if (tags) {
1440       result = get_tag_send_list(orig_ents, all_tags, tag_ranges);MB_CHK_SET_ERR(result, "Failed to get tagged entities");
1441       result = pack_tags(orig_ents, all_tags, all_tags, tag_ranges,
1442                          buff, store_remote_handles, to_proc);MB_CHK_SET_ERR(result, "Packing tags (count) failed");
1443     }
1444     else { // Set tag size to 0
1445       buff->check_space(sizeof(int));
1446       PACK_INT(buff->buff_ptr, 0);
1447       buff->set_stored_size();
1448     }
1449 
1450     return result;
1451   }
1452 
unpack_buffer(unsigned char * buff_ptr,const bool store_remote_handles,const int from_proc,const int ind,std::vector<std::vector<EntityHandle>> & L1hloc,std::vector<std::vector<EntityHandle>> & L1hrem,std::vector<std::vector<int>> & L1p,std::vector<EntityHandle> & L2hloc,std::vector<EntityHandle> & L2hrem,std::vector<unsigned int> & L2p,std::vector<EntityHandle> & new_ents,const bool created_iface)1453   ErrorCode ParallelComm::unpack_buffer(unsigned char *buff_ptr,
1454                                         const bool store_remote_handles,
1455                                         const int from_proc,
1456                                         const int ind,
1457                                         std::vector<std::vector<EntityHandle> > &L1hloc,
1458                                         std::vector<std::vector<EntityHandle> > &L1hrem,
1459                                         std::vector<std::vector<int> > &L1p,
1460                                         std::vector<EntityHandle> &L2hloc,
1461                                         std::vector<EntityHandle> &L2hrem,
1462                                         std::vector<unsigned int> &L2p,
1463                                         std::vector<EntityHandle> &new_ents,
1464                                         const bool created_iface)
1465   {
1466     unsigned char *tmp_buff = buff_ptr;
1467     ErrorCode result;
1468     result = unpack_entities(buff_ptr, store_remote_handles,
1469                              ind, false, L1hloc, L1hrem, L1p,
1470                              L2hloc, L2hrem, L2p, new_ents,
1471                              created_iface);MB_CHK_SET_ERR(result, "Unpacking entities failed");
1472     if (myDebug->get_verbosity() == 3) {
1473       myDebug->tprintf(4, "unpack_entities buffer space: %ld bytes.\n", (long int)(buff_ptr - tmp_buff));
1474       tmp_buff = buff_ptr;
1475     }
1476     result = unpack_sets(buff_ptr, new_ents, store_remote_handles, from_proc);MB_CHK_SET_ERR(result, "Unpacking sets failed");
1477     if (myDebug->get_verbosity() == 3) {
1478       myDebug->tprintf(4, "unpack_sets buffer space: %ld bytes.\n", (long int)(buff_ptr - tmp_buff));
1479       tmp_buff = buff_ptr;
1480     }
1481     result = unpack_tags(buff_ptr, new_ents, store_remote_handles, from_proc);MB_CHK_SET_ERR(result, "Unpacking tags failed");
1482     if (myDebug->get_verbosity() == 3) {
1483       myDebug->tprintf(4, "unpack_tags buffer space: %ld bytes.\n", (long int)(buff_ptr - tmp_buff));
1484       //tmp_buff = buff_ptr;
1485     }
1486 
1487     if (myDebug->get_verbosity() == 3)
1488       myDebug->print(4, "\n");
1489 
1490     return MB_SUCCESS;
1491   }
1492 
estimate_ents_buffer_size(Range & entities,const bool store_remote_handles)1493   int ParallelComm::estimate_ents_buffer_size(Range &entities,
1494                                               const bool store_remote_handles)
1495   {
1496     int buff_size = 0;
1497     std::vector<EntityHandle> dum_connect_vec;
1498     const EntityHandle *connect;
1499     int num_connect;
1500 
1501     int num_verts = entities.num_of_type(MBVERTEX);
1502     // # verts + coords + handles
1503     buff_size += 2*sizeof(int) + 3*sizeof(double)*num_verts;
1504     if (store_remote_handles) buff_size += sizeof(EntityHandle)*num_verts;
1505 
1506     // Do a rough count by looking at first entity of each type
1507     for (EntityType t = MBEDGE; t < MBENTITYSET; t++) {
1508       const Range::iterator rit = entities.lower_bound(t);
1509       if (TYPE_FROM_HANDLE(*rit) != t)
1510         continue;
1511 
1512       ErrorCode result = mbImpl->get_connectivity(*rit, connect, num_connect,
1513                                                   false, &dum_connect_vec);MB_CHK_SET_ERR_RET_VAL(result, "Failed to get connectivity to estimate buffer size", -1);
1514 
1515       // Number, type, nodes per entity
1516       buff_size += 3*sizeof(int);
1517       int num_ents = entities.num_of_type(t);
1518       // Connectivity, handle for each ent
1519       buff_size += (num_connect + 1)*sizeof(EntityHandle)*num_ents;
1520     }
1521 
1522     // Extra entity type at end, passed as int
1523     buff_size += sizeof(int);
1524 
1525     return buff_size;
1526   }
1527 
estimate_sets_buffer_size(Range & entities,const bool)1528   int ParallelComm::estimate_sets_buffer_size(Range &entities,
1529                                               const bool /*store_remote_handles*/)
1530   {
1531     // Number of sets
1532     int buff_size = sizeof(int);
1533 
1534     // Do a rough count by looking at first entity of each type
1535     Range::iterator rit = entities.lower_bound(MBENTITYSET);
1536     ErrorCode result;
1537 
1538     for (; rit != entities.end(); ++rit) {
1539       unsigned int options;
1540       result = mbImpl->get_meshset_options(*rit, options);MB_CHK_SET_ERR_RET_VAL(result, "Failed to get meshset options", -1);
1541 
1542       buff_size += sizeof(int);
1543 
1544       Range set_range;
1545       if (options & MESHSET_SET) {
1546         // Range-based set; count the subranges
1547         result = mbImpl->get_entities_by_handle(*rit, set_range);MB_CHK_SET_ERR_RET_VAL(result, "Failed to get set entities", -1);
1548 
1549         // Set range
1550         buff_size += RANGE_SIZE(set_range);
1551       }
1552       else if (options & MESHSET_ORDERED) {
1553         // Just get the number of entities in the set
1554         int num_ents;
1555         result = mbImpl->get_number_entities_by_handle(*rit, num_ents);MB_CHK_SET_ERR_RET_VAL(result, "Failed to get number entities in ordered set", -1);
1556 
1557         // Set vec
1558         buff_size += sizeof(EntityHandle) * num_ents + sizeof(int);
1559       }
1560 
1561       // Get numbers of parents/children
1562       int num_par, num_ch;
1563       result = mbImpl->num_child_meshsets(*rit, &num_ch);MB_CHK_SET_ERR_RET_VAL(result, "Failed to get num children", -1);
1564       result = mbImpl->num_parent_meshsets(*rit, &num_par);MB_CHK_SET_ERR_RET_VAL(result, "Failed to get num parents", -1);
1565 
1566       buff_size += (num_ch + num_par) * sizeof(EntityHandle) + 2*sizeof(int);
1567     }
1568 
1569     return buff_size;
1570   }
1571 
pack_entities(Range & entities,Buffer * buff,const bool store_remote_handles,const int to_proc,const bool,TupleList * entprocs,Range *)1572   ErrorCode ParallelComm::pack_entities(Range &entities,
1573                                         Buffer *buff,
1574                                         const bool store_remote_handles,
1575                                         const int to_proc,
1576                                         const bool /*is_iface*/,
1577                                         TupleList *entprocs,
1578                                         Range */*allsent*/)
1579   {
1580     // Packed information:
1581     // 1. # entities = E
1582     // 2. for e in E
1583     //   a. # procs sharing e, incl. sender and receiver = P
1584     //   b. for p in P (procs sharing e)
1585     //   c. for p in P (handle for e on p) (Note1)
1586     // 3. vertex/entity info
1587 
1588     // Get an estimate of the buffer size & pre-allocate buffer size
1589     int buff_size = estimate_ents_buffer_size(entities, store_remote_handles);
1590     if (buff_size < 0)
1591       MB_SET_ERR(MB_FAILURE, "Failed to estimate ents buffer size");
1592     buff->check_space(buff_size);
1593     myDebug->tprintf(3, "estimate buffer size for %d entities: %d \n", (int)entities.size(), buff_size  );
1594 
1595     unsigned int num_ents;
1596     ErrorCode result;
1597 
1598     std::vector<EntityHandle> entities_vec(entities.size());
1599     std::copy(entities.begin(), entities.end(), entities_vec.begin());
1600 
1601     // First pack procs/handles sharing this ent, not including this dest but including
1602     // others (with zero handles)
1603     if (store_remote_handles) {
1604       // Buff space is at least proc + handle for each entity; use avg of 4 other procs
1605       // to estimate buff size, but check later
1606       buff->check_space(sizeof(int) + (5*sizeof(int) + sizeof(EntityHandle))*entities.size());
1607 
1608       // 1. # entities = E
1609       PACK_INT(buff->buff_ptr, entities.size());
1610 
1611       Range::iterator rit;
1612 
1613       // Pre-fetch sharedp and pstatus
1614       std::vector<int> sharedp_vals(entities.size());
1615       result = mbImpl->tag_get_data(sharedp_tag(), entities, &sharedp_vals[0]);MB_CHK_SET_ERR(result, "Failed to get sharedp tag data");
1616       std::vector<char> pstatus_vals(entities.size());
1617       result = mbImpl->tag_get_data(pstatus_tag(), entities, &pstatus_vals[0]);MB_CHK_SET_ERR(result, "Failed to get pstatus tag data");
1618 
1619       unsigned int i;
1620       int tmp_procs[MAX_SHARING_PROCS];
1621       EntityHandle tmp_handles[MAX_SHARING_PROCS];
1622       std::set<unsigned int> dumprocs;
1623 
1624       // 2. for e in E
1625       for (rit = entities.begin(), i = 0;
1626            rit != entities.end(); ++rit, i++) {
1627         unsigned int ind = std::lower_bound(entprocs->vul_rd, entprocs->vul_rd + entprocs->get_n(), *rit) - entprocs->vul_rd;
1628         assert(ind < entprocs->get_n());
1629 
1630         while (ind < entprocs->get_n() && entprocs->vul_rd[ind] == *rit)
1631           dumprocs.insert(entprocs->vi_rd[ind++]);
1632 
1633         result = build_sharedhps_list(*rit, pstatus_vals[i], sharedp_vals[i],
1634                                       dumprocs, num_ents, tmp_procs, tmp_handles);MB_CHK_SET_ERR(result, "Failed to build sharedhps");
1635 
1636         dumprocs.clear();
1637 
1638         // Now pack them
1639         buff->check_space((num_ents + 1)*sizeof(int) +
1640                           num_ents*sizeof(EntityHandle));
1641         PACK_INT(buff->buff_ptr, num_ents);
1642         PACK_INTS(buff->buff_ptr, tmp_procs, num_ents);
1643         PACK_EH(buff->buff_ptr, tmp_handles, num_ents);
1644 
1645 #ifndef NDEBUG
1646         // Check for duplicates in proc list
1647         unsigned int dp = 0;
1648         for (; dp < MAX_SHARING_PROCS && -1 != tmp_procs[dp]; dp++)
1649           dumprocs.insert(tmp_procs[dp]);
1650         assert(dumprocs.size() == dp);
1651         dumprocs.clear();
1652 #endif
1653       }
1654     }
1655 
1656     // Pack vertices
1657     Range these_ents = entities.subset_by_type(MBVERTEX);
1658     num_ents = these_ents.size();
1659 
1660     if (num_ents) {
1661       buff_size = 2*sizeof(int) + 3*num_ents*sizeof(double);
1662       buff->check_space(buff_size);
1663 
1664       // Type, # ents
1665       PACK_INT(buff->buff_ptr, ((int) MBVERTEX));
1666       PACK_INT(buff->buff_ptr, ((int) num_ents));
1667 
1668       std::vector<double> tmp_coords(3*num_ents);
1669       result = mbImpl->get_coords(these_ents, &tmp_coords[0]);MB_CHK_SET_ERR(result, "Failed to get vertex coordinates");
1670       PACK_DBLS(buff->buff_ptr, &tmp_coords[0], 3*num_ents);
1671 
1672       myDebug->tprintf(4, "Packed %lu ents of type %s\n", (unsigned long)these_ents.size(),
1673                        CN::EntityTypeName(TYPE_FROM_HANDLE(*these_ents.begin())));
1674     }
1675 
1676     // Now entities; go through range, packing by type and equal # verts per element
1677     Range::iterator start_rit = entities.find(*these_ents.rbegin());
1678     ++start_rit;
1679     int last_nodes = -1;
1680     EntityType last_type = MBMAXTYPE;
1681     these_ents.clear();
1682     Range::iterator end_rit = start_rit;
1683     EntitySequence *seq;
1684     ElementSequence *eseq;
1685 
1686     while (start_rit != entities.end() || !these_ents.empty()) {
1687       // Cases:
1688       // A: !end, last_type == MBMAXTYPE, seq: save contig sequence in these_ents
1689       // B: !end, last type & nodes same, seq: save contig sequence in these_ents
1690       // C: !end, last type & nodes different: pack these_ents, then save contig sequence in these_ents
1691       // D: end: pack these_ents
1692 
1693       // Find the sequence holding current start entity, if we're not at end
1694       eseq = NULL;
1695       if (start_rit != entities.end()) {
1696         result = sequenceManager->find(*start_rit, seq);MB_CHK_SET_ERR(result, "Failed to find entity sequence");
1697         if (NULL == seq)
1698           return MB_FAILURE;
1699         eseq = dynamic_cast<ElementSequence*>(seq);
1700       }
1701 
1702       // Pack the last batch if at end or next one is different
1703       if (!these_ents.empty() &&
1704           (!eseq || eseq->type() != last_type ||
1705            last_nodes != (int) eseq->nodes_per_element())) {
1706         result = pack_entity_seq(last_nodes, store_remote_handles,
1707                                  to_proc, these_ents, entities_vec, buff);MB_CHK_SET_ERR(result, "Failed to pack entities from a sequence");
1708         these_ents.clear();
1709       }
1710 
1711       if (eseq) {
1712         // Continuation of current range, just save these entities
1713         // Get position in entities list one past end of this sequence
1714         end_rit = entities.lower_bound(start_rit, entities.end(), eseq->end_handle() + 1);
1715 
1716         // Put these entities in the range
1717         std::copy(start_rit, end_rit, range_inserter(these_ents));
1718 
1719         last_type = eseq->type();
1720         last_nodes = eseq->nodes_per_element();
1721       }
1722       else if (start_rit != entities.end() &&
1723                TYPE_FROM_HANDLE(*start_rit) == MBENTITYSET)
1724         break;
1725 
1726       start_rit = end_rit;
1727     }
1728 
1729     // Pack MBMAXTYPE to indicate end of ranges
1730     buff->check_space(sizeof(int));
1731     PACK_INT(buff->buff_ptr, ((int)MBMAXTYPE));
1732 
1733     buff->set_stored_size();
1734     return MB_SUCCESS;
1735   }
1736 
build_sharedhps_list(const EntityHandle entity,const unsigned char pstatus,const int sharedp,const std::set<unsigned int> & procs,unsigned int & num_ents,int * tmp_procs,EntityHandle * tmp_handles)1737   ErrorCode ParallelComm::build_sharedhps_list(const EntityHandle entity,
1738                                                const unsigned char pstatus,
1739                                                const int
1740 #ifndef NDEBUG
1741                                                sharedp
1742 #endif
1743                                                ,
1744                                                const std::set<unsigned int> &procs,
1745                                                unsigned int &num_ents,
1746                                                int *tmp_procs,
1747                                                EntityHandle *tmp_handles)
1748   {
1749     num_ents = 0;
1750     unsigned char pstat;
1751     ErrorCode result = get_sharing_data(entity, tmp_procs, tmp_handles,
1752                                         pstat, num_ents);MB_CHK_SET_ERR(result, "Failed to get sharing data");
1753     assert(pstat == pstatus);
1754 
1755     // Build shared proc/handle lists
1756     // Start with multi-shared, since if it is the owner will be first
1757     if (pstatus & PSTATUS_MULTISHARED) {
1758     }
1759     else if (pstatus & PSTATUS_NOT_OWNED) {
1760       // If not multishared and not owned, other sharing proc is owner, put that
1761       // one first
1762       assert("If not owned, I should be shared too" &&
1763              pstatus & PSTATUS_SHARED &&
1764              1 == num_ents);
1765       tmp_procs[1] = procConfig.proc_rank();
1766       tmp_handles[1] = entity;
1767       num_ents = 2;
1768     }
1769     else if (pstatus & PSTATUS_SHARED) {
1770       // If not multishared and owned, I'm owner
1771       assert("shared and owned, should be only 1 sharing proc" && 1 == num_ents);
1772       tmp_procs[1] = tmp_procs[0];
1773       tmp_procs[0] = procConfig.proc_rank();
1774       tmp_handles[1] = tmp_handles[0];
1775       tmp_handles[0] = entity;
1776       num_ents = 2;
1777     }
1778     else {
1779       // Not shared yet, just add owner (me)
1780       tmp_procs[0] = procConfig.proc_rank();
1781       tmp_handles[0] = entity;
1782       num_ents = 1;
1783     }
1784 
1785 #ifndef NDEBUG
1786     int tmp_ps = num_ents;
1787 #endif
1788 
1789     // Now add others, with zero handle for now
1790     for (std::set<unsigned int>::iterator sit = procs.begin();
1791          sit != procs.end(); ++sit) {
1792 #ifndef NDEBUG
1793       if (tmp_ps && std::find(tmp_procs, tmp_procs + tmp_ps, *sit) != tmp_procs + tmp_ps) {
1794         std::cerr << "Trouble with something already in shared list on proc " << procConfig.proc_rank()
1795                   << ". Entity:" << std::endl;
1796         list_entities(&entity, 1);
1797         std::cerr << "pstatus = " << (int) pstatus << ", sharedp = " << sharedp << std::endl;
1798         std::cerr << "tmp_ps = ";
1799         for (int i = 0; i < tmp_ps; i++)
1800           std::cerr << tmp_procs[i] << " ";
1801         std::cerr << std::endl;
1802         std::cerr << "procs = ";
1803         for (std::set<unsigned int>::iterator sit2 = procs.begin(); sit2 != procs.end(); ++sit2)
1804           std::cerr << *sit2 << " ";
1805         assert(false);
1806       }
1807 #endif
1808       tmp_procs[num_ents] = *sit;
1809       tmp_handles[num_ents] = 0;
1810       num_ents++;
1811     }
1812 
1813     // Put -1 after procs and 0 after handles
1814     if (MAX_SHARING_PROCS > num_ents) {
1815       tmp_procs[num_ents] = -1;
1816       tmp_handles[num_ents] = 0;
1817     }
1818 
1819     return MB_SUCCESS;
1820   }
1821 
pack_entity_seq(const int nodes_per_entity,const bool store_remote_handles,const int to_proc,Range & these_ents,std::vector<EntityHandle> & entities_vec,Buffer * buff)1822   ErrorCode ParallelComm::pack_entity_seq(const int nodes_per_entity,
1823                                           const bool store_remote_handles,
1824                                           const int to_proc,
1825                                           Range &these_ents,
1826                                           std::vector<EntityHandle> &entities_vec,
1827                                           Buffer *buff)
1828   {
1829     int tmp_space = 3*sizeof(int) + nodes_per_entity*these_ents.size()*sizeof(EntityHandle);
1830     buff->check_space(tmp_space);
1831 
1832     // Pack the entity type
1833     PACK_INT(buff->buff_ptr, ((int)TYPE_FROM_HANDLE(*these_ents.begin())));
1834 
1835     // Pack # ents
1836     PACK_INT(buff->buff_ptr, these_ents.size());
1837 
1838     // Pack the nodes per entity
1839     PACK_INT(buff->buff_ptr, nodes_per_entity);
1840     myDebug->tprintf(3, "after some pack int  %d \n", buff->get_current_size() );
1841 
1842     // Pack the connectivity
1843     std::vector<EntityHandle> connect;
1844     ErrorCode result = MB_SUCCESS;
1845     for (Range::const_iterator rit = these_ents.begin(); rit != these_ents.end(); ++rit) {
1846       connect.clear();
1847       result = mbImpl->get_connectivity(&(*rit), 1, connect, false);MB_CHK_SET_ERR(result, "Failed to get connectivity");
1848       assert((int)connect.size() == nodes_per_entity);
1849       result = get_remote_handles(store_remote_handles, &connect[0], &connect[0],
1850                                   connect.size(), to_proc, entities_vec);MB_CHK_SET_ERR(result, "Failed in get_remote_handles");
1851       PACK_EH(buff->buff_ptr, &connect[0], connect.size());
1852     }
1853 
1854     myDebug->tprintf(3, "Packed %lu ents of type %s\n", (unsigned long)these_ents.size(),
1855                      CN::EntityTypeName(TYPE_FROM_HANDLE(*these_ents.begin())));
1856 
1857     return result;
1858   }
1859 
get_remote_handles(const bool store_remote_handles,EntityHandle * from_vec,EntityHandle * to_vec_tmp,int num_ents,int to_proc,const std::vector<EntityHandle> & new_ents)1860   ErrorCode ParallelComm::get_remote_handles(const bool store_remote_handles,
1861                                              EntityHandle *from_vec,
1862                                              EntityHandle *to_vec_tmp,
1863                                              int num_ents, int to_proc,
1864                                              const std::vector<EntityHandle> &new_ents)
1865   {
1866     // NOTE: THIS IMPLEMENTATION IS JUST LIKE THE RANGE-BASED VERSION, NO REUSE
1867     // AT THIS TIME, SO IF YOU FIX A BUG IN THIS VERSION, IT MAY BE IN THE
1868     // OTHER VERSION TOO!!!
1869     if (0 == num_ents)
1870       return MB_SUCCESS;
1871 
1872     // Use a local destination ptr in case we're doing an in-place copy
1873     std::vector<EntityHandle> tmp_vector;
1874     EntityHandle *to_vec = to_vec_tmp;
1875     if (to_vec == from_vec) {
1876       tmp_vector.resize(num_ents);
1877       to_vec = &tmp_vector[0];
1878     }
1879 
1880     if (!store_remote_handles) {
1881       int err;
1882       // In this case, substitute position in new_ents list
1883       for (int i = 0; i < num_ents; i++) {
1884         int ind = std::lower_bound(new_ents.begin(), new_ents.end(), from_vec[i]) - new_ents.begin();
1885         assert(new_ents[ind] == from_vec[i]);
1886         to_vec[i] = CREATE_HANDLE(MBMAXTYPE, ind, err);
1887         assert(to_vec[i] != 0 && !err && -1 != ind);
1888       }
1889     }
1890     else {
1891       Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
1892       ErrorCode result = get_shared_proc_tags(shp_tag, shps_tag,
1893                                               shh_tag, shhs_tag, pstat_tag);MB_CHK_SET_ERR(result, "Failed to get shared proc tags");
1894 
1895       // Get single-proc destination handles and shared procs
1896       std::vector<int> sharing_procs(num_ents);
1897       result = mbImpl->tag_get_data(shh_tag, from_vec, num_ents,
1898                                     to_vec);MB_CHK_SET_ERR(result, "Failed to get shared handle tag for remote_handles");
1899       result = mbImpl->tag_get_data(shp_tag, from_vec, num_ents, &sharing_procs[0]);MB_CHK_SET_ERR(result, "Failed to get sharing proc tag in remote_handles");
1900       for (int j = 0; j < num_ents; j++) {
1901         if (to_vec[j] && sharing_procs[j] != to_proc)
1902           to_vec[j] = 0;
1903       }
1904 
1905       EntityHandle tmp_handles[MAX_SHARING_PROCS];
1906       int tmp_procs[MAX_SHARING_PROCS];
1907       int i;
1908       // Go through results, and for 0-valued ones, look for multiple shared proc
1909       for (i = 0; i < num_ents; i++) {
1910         if (!to_vec[i]) {
1911           result = mbImpl->tag_get_data(shps_tag, from_vec + i, 1, tmp_procs);
1912           if (MB_SUCCESS == result) {
1913             for (int j = 0; j < MAX_SHARING_PROCS; j++) {
1914               if (-1 == tmp_procs[j])
1915                 break;
1916               else if (tmp_procs[j] == to_proc) {
1917                 result = mbImpl->tag_get_data(shhs_tag, from_vec + i, 1, tmp_handles);MB_CHK_SET_ERR(result, "Failed to get sharedhs tag data");
1918                 to_vec[i] = tmp_handles[j];
1919                 assert(to_vec[i]);
1920                 break;
1921               }
1922             }
1923           }
1924           if (!to_vec[i]) {
1925             int j = std::lower_bound(new_ents.begin(), new_ents.end(), from_vec[i]) - new_ents.begin();
1926             if ((int)new_ents.size() == j) {
1927               std::cout << "Failed to find new entity in send list, proc "
1928                         << procConfig.proc_rank() << std::endl;
1929               for (int k = 0; k <= num_ents; k++)
1930                 std::cout << k << ": " << from_vec[k] << " " << to_vec[k]
1931                           << std::endl;
1932               MB_SET_ERR(MB_FAILURE, "Failed to find new entity in send list");
1933             }
1934             int err;
1935             to_vec[i] = CREATE_HANDLE(MBMAXTYPE, j, err);
1936             if (err) {
1937               MB_SET_ERR(MB_FAILURE, "Failed to create handle in remote_handles");
1938             }
1939           }
1940         }
1941       }
1942     }
1943 
1944     // memcpy over results if from_vec and to_vec are the same
1945     if (to_vec_tmp == from_vec)
1946       memcpy(from_vec, to_vec, num_ents * sizeof(EntityHandle));
1947 
1948     return MB_SUCCESS;
1949   }
1950 
get_remote_handles(const bool store_remote_handles,const Range & from_range,EntityHandle * to_vec,int to_proc,const std::vector<EntityHandle> & new_ents)1951   ErrorCode ParallelComm::get_remote_handles(const bool store_remote_handles,
1952                                              const Range &from_range,
1953                                              EntityHandle *to_vec,
1954                                              int to_proc,
1955                                              const std::vector<EntityHandle> &new_ents)
1956   {
1957     // NOTE: THIS IMPLEMENTATION IS JUST LIKE THE VECTOR-BASED VERSION, NO REUSE
1958     // AT THIS TIME, SO IF YOU FIX A BUG IN THIS VERSION, IT MAY BE IN THE
1959     // OTHER VERSION TOO!!!
1960     if (from_range.empty())
1961       return MB_SUCCESS;
1962 
1963     if (!store_remote_handles) {
1964       int err;
1965       // In this case, substitute position in new_ents list
1966       Range::iterator rit;
1967       unsigned int i;
1968       for (rit = from_range.begin(), i = 0; rit != from_range.end(); ++rit, i++) {
1969         int ind = std::lower_bound(new_ents.begin(), new_ents.end(), *rit) - new_ents.begin();
1970         assert(new_ents[ind] == *rit);
1971         to_vec[i] = CREATE_HANDLE(MBMAXTYPE, ind, err);
1972         assert(to_vec[i] != 0 && !err && -1 != ind);
1973       }
1974     }
1975     else {
1976       Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
1977       ErrorCode result = get_shared_proc_tags(shp_tag, shps_tag,
1978                                               shh_tag, shhs_tag, pstat_tag);MB_CHK_SET_ERR(result, "Failed to get shared proc tags");
1979 
1980       // Get single-proc destination handles and shared procs
1981       std::vector<int> sharing_procs(from_range.size());
1982       result = mbImpl->tag_get_data(shh_tag, from_range, to_vec);MB_CHK_SET_ERR(result, "Failed to get shared handle tag for remote_handles");
1983       result = mbImpl->tag_get_data(shp_tag, from_range, &sharing_procs[0]);MB_CHK_SET_ERR(result, "Failed to get sharing proc tag in remote_handles");
1984       for (unsigned int j = 0; j < from_range.size(); j++) {
1985         if (to_vec[j] && sharing_procs[j] != to_proc)
1986           to_vec[j] = 0;
1987       }
1988 
1989       EntityHandle tmp_handles[MAX_SHARING_PROCS];
1990       int tmp_procs[MAX_SHARING_PROCS];
1991       // Go through results, and for 0-valued ones, look for multiple shared proc
1992       Range::iterator rit;
1993       unsigned int i;
1994       for (rit = from_range.begin(), i = 0; rit != from_range.end(); ++rit, i++) {
1995         if (!to_vec[i]) {
1996           result = mbImpl->tag_get_data(shhs_tag, &(*rit), 1, tmp_handles);
1997           if (MB_SUCCESS == result) {
1998             result = mbImpl->tag_get_data(shps_tag, &(*rit), 1, tmp_procs);MB_CHK_SET_ERR(result, "Failed to get sharedps tag data");
1999             for (int j = 0; j < MAX_SHARING_PROCS; j++)
2000               if (tmp_procs[j] == to_proc) {
2001                 to_vec[i] = tmp_handles[j];
2002                 break;
2003               }
2004           }
2005 
2006           if (!to_vec[i]) {
2007             int j = std::lower_bound(new_ents.begin(), new_ents.end(), *rit) - new_ents.begin();
2008             if ((int)new_ents.size() == j) {
2009               MB_SET_ERR(MB_FAILURE, "Failed to find new entity in send list");
2010             }
2011             int err;
2012             to_vec[i] = CREATE_HANDLE(MBMAXTYPE, j, err);
2013             if (err) {
2014               MB_SET_ERR(MB_FAILURE, "Failed to create handle in remote_handles");
2015             }
2016           }
2017         }
2018       }
2019     }
2020 
2021     return MB_SUCCESS;
2022   }
2023 
get_remote_handles(const bool store_remote_handles,const Range & from_range,Range & to_range,int to_proc,const std::vector<EntityHandle> & new_ents)2024   ErrorCode ParallelComm::get_remote_handles(const bool store_remote_handles,
2025                                              const Range &from_range,
2026                                              Range &to_range,
2027                                              int to_proc,
2028                                              const std::vector<EntityHandle> &new_ents)
2029   {
2030     std::vector<EntityHandle> to_vector(from_range.size());
2031 
2032     ErrorCode result =
2033       get_remote_handles(store_remote_handles, from_range, &to_vector[0],
2034                          to_proc, new_ents);MB_CHK_SET_ERR(result, "Failed to get remote handles");
2035     std::copy(to_vector.begin(), to_vector.end(), range_inserter(to_range));
2036     return result;
2037   }
2038 
unpack_entities(unsigned char * & buff_ptr,const bool store_remote_handles,const int,const bool is_iface,std::vector<std::vector<EntityHandle>> & L1hloc,std::vector<std::vector<EntityHandle>> & L1hrem,std::vector<std::vector<int>> & L1p,std::vector<EntityHandle> & L2hloc,std::vector<EntityHandle> & L2hrem,std::vector<unsigned int> & L2p,std::vector<EntityHandle> & new_ents,const bool created_iface)2039   ErrorCode ParallelComm::unpack_entities(unsigned char *&buff_ptr,
2040                                           const bool store_remote_handles,
2041                                           const int /*from_ind*/,
2042                                           const bool is_iface,
2043                                           std::vector<std::vector<EntityHandle> > &L1hloc,
2044                                           std::vector<std::vector<EntityHandle> > &L1hrem,
2045                                           std::vector<std::vector<int> > &L1p,
2046                                           std::vector<EntityHandle> &L2hloc,
2047                                           std::vector<EntityHandle> &L2hrem,
2048                                           std::vector<unsigned int> &L2p,
2049                                           std::vector<EntityHandle> &new_ents,
2050                                           const bool created_iface)
2051   {
2052     // General algorithm:
2053     // - unpack # entities
2054     // - save start of remote handle info, then scan forward to entity definition data
2055     // - for all vertices or entities w/ same # verts:
2056     //   . get entity type, num ents, and (if !vert) # verts
2057     //   . for each ent:
2058     //      o get # procs/handles in remote handle info
2059     //      o if # procs/handles > 2, check for already-created entity:
2060     //        x get index of owner proc (1st in proc list), resize L1 list if nec
2061     //        x look for already-arrived entity in L2 by owner handle
2062     //      o if no existing entity:
2063     //        x if iface, look for existing entity with same connect & type
2064     //        x if none found, create vertex or element
2065     //        x if !iface & multi-shared, save on L2
2066     //        x if !iface, put new entity on new_ents list
2067     //      o update proc/handle, pstatus tags, adjusting to put owner first if iface
2068     //      o if !iface, save new handle on L1 for all sharing procs
2069 
2070     // Lists of handles/procs to return to sending/other procs
2071     // L1hloc[p], L1hrem[p]: handle pairs [h, h'], where h is the local proc handle
2072     //         and h' is either the remote proc handle (if that is known) or
2073     //         the owner proc handle (otherwise);
2074     // L1p[p]: indicates whether h is remote handle (= -1) or owner (rank of owner)
2075     // L2hloc, L2hrem: local/remote handles for entities shared by > 2 procs;
2076     //         remote handles are on owning proc
2077     // L2p: owning procs for handles in L2hrem
2078 
2079     ErrorCode result;
2080     bool done = false;
2081     ReadUtilIface *ru = NULL;
2082 
2083     result = mbImpl->query_interface(ru);MB_CHK_SET_ERR(result, "Failed to get ReadUtilIface");
2084 
2085     // 1. # entities = E
2086     int num_ents = 0;
2087     unsigned char *buff_save = buff_ptr;
2088     int i, j;
2089 
2090     if (store_remote_handles) {
2091       UNPACK_INT(buff_ptr, num_ents);
2092 
2093       buff_save = buff_ptr;
2094 
2095       // Save place where remote handle info starts, then scan forward to ents
2096       for (i = 0; i < num_ents; i++) {
2097         UNPACK_INT(buff_ptr, j);
2098         if (j < 0) {
2099           std::cout << "Should be non-negative # proc/handles.";
2100           return MB_FAILURE;
2101         }
2102 
2103         buff_ptr += j * (sizeof(int) + sizeof(EntityHandle));
2104       }
2105     }
2106 
2107     std::vector<EntityHandle> msg_ents;
2108 
2109     while (!done) {
2110       EntityType this_type = MBMAXTYPE;
2111       UNPACK_TYPE(buff_ptr, this_type);
2112       assert(this_type != MBENTITYSET);
2113 
2114       // MBMAXTYPE signifies end of entities data
2115       if (MBMAXTYPE == this_type)
2116         break;
2117 
2118       // Get the number of ents
2119       int num_ents2, verts_per_entity = 0;
2120       UNPACK_INT(buff_ptr, num_ents2);
2121 
2122       // Unpack the nodes per entity
2123       if (MBVERTEX != this_type && num_ents2) {
2124         UNPACK_INT(buff_ptr, verts_per_entity);
2125       }
2126 
2127       std::vector<int> ps(MAX_SHARING_PROCS, -1);
2128       std::vector<EntityHandle> hs(MAX_SHARING_PROCS, 0);
2129       for (int e = 0; e < num_ents2; e++) {
2130         // Check for existing entity, otherwise make new one
2131         EntityHandle new_h = 0;
2132         EntityHandle connect[CN::MAX_NODES_PER_ELEMENT];
2133         double coords[3];
2134         int num_ps = -1;
2135 
2136         //=======================================
2137         // Unpack all the data at once, to make sure the buffer pointers
2138         // are tracked correctly
2139         //=======================================
2140         if (store_remote_handles) {
2141           // Pointers to other procs/handles
2142           UNPACK_INT(buff_save, num_ps);
2143           if (0 >= num_ps) {
2144             std::cout << "Shouldn't ever be fewer than 1 procs here." << std::endl;
2145             return MB_FAILURE;
2146           }
2147 
2148           UNPACK_INTS(buff_save, &ps[0], num_ps);
2149           UNPACK_EH(buff_save, &hs[0], num_ps);
2150         }
2151 
2152         if (MBVERTEX == this_type) {
2153           UNPACK_DBLS(buff_ptr, coords, 3);
2154         }
2155         else {
2156           assert(verts_per_entity <= CN::MAX_NODES_PER_ELEMENT);
2157           UNPACK_EH(buff_ptr, connect, verts_per_entity);
2158 
2159           // Update connectivity to local handles
2160           result = get_local_handles(connect, verts_per_entity, msg_ents);MB_CHK_SET_ERR(result, "Failed to get local handles");
2161         }
2162 
2163         //=======================================
2164         // Now, process that data; begin by finding an identical
2165         // entity, if there is one
2166         //=======================================
2167         if (store_remote_handles) {
2168           result = find_existing_entity(is_iface, ps[0], hs[0], num_ps,
2169                                         connect, verts_per_entity,
2170                                         this_type,
2171                                         L2hloc, L2hrem, L2p,
2172                                         new_h);MB_CHK_SET_ERR(result, "Failed to get existing entity");
2173         }
2174 
2175         //=======================================
2176         // If we didn't find one, we'll have to create one
2177         //=======================================
2178         bool created_here = false;
2179         if (!new_h && !is_iface) {
2180           if (MBVERTEX == this_type) {
2181             // Create a vertex
2182             result = mbImpl->create_vertex(coords, new_h);MB_CHK_SET_ERR(result, "Failed to make new vertex");
2183           }
2184           else {
2185             // Create the element
2186             result = mbImpl->create_element(this_type, connect, verts_per_entity,
2187                                             new_h);MB_CHK_SET_ERR(result, "Failed to make new element");
2188 
2189             // Update adjacencies
2190             result = ru->update_adjacencies(new_h, 1, verts_per_entity,
2191                                             connect);MB_CHK_SET_ERR(result, "Failed to update adjacencies");
2192           }
2193 
2194           // Should have a new handle now
2195           assert(new_h);
2196 
2197           created_here = true;
2198         }
2199 
2200         //=======================================
2201         // Take care of sharing data
2202         //=======================================
2203 
2204         // Need to save entities found in order, for interpretation of
2205         // later parts of this message
2206         if (!is_iface) {
2207           assert(new_h);
2208           msg_ents.push_back(new_h);
2209         }
2210 
2211         if (created_here)
2212           new_ents.push_back(new_h);
2213 
2214         if (new_h && store_remote_handles) {
2215           unsigned char new_pstat = 0x0;
2216           if (is_iface) {
2217             new_pstat = PSTATUS_INTERFACE;
2218             // Here, lowest rank proc should be first
2219             int idx = std::min_element(&ps[0], &ps[0] + num_ps) - &ps[0];
2220             if (idx) {
2221               std::swap(ps[0], ps[idx]);
2222               std::swap(hs[0], hs[idx]);
2223             }
2224             // Set ownership based on lowest rank; can't be in update_remote_data, because
2225             // there we don't know whether it resulted from ghosting or not
2226             if ((num_ps > 1 && ps[0] != (int) rank()))
2227               new_pstat |= PSTATUS_NOT_OWNED;
2228           }
2229           else if (created_here) {
2230             if (created_iface)
2231               new_pstat = PSTATUS_NOT_OWNED;
2232             else
2233               new_pstat = PSTATUS_GHOST | PSTATUS_NOT_OWNED;
2234           }
2235 
2236           // Update sharing data and pstatus, adjusting order if iface
2237           result = update_remote_data(new_h, &ps[0], &hs[0], num_ps, new_pstat);MB_CHK_SET_ERR(result, "unpack_entities");
2238 
2239           // If a new multi-shared entity, save owner for subsequent lookup in L2 lists
2240           if (store_remote_handles && !is_iface && num_ps > 2) {
2241             L2hrem.push_back(hs[0]);
2242             L2hloc.push_back(new_h);
2243             L2p.push_back(ps[0]);
2244           }
2245 
2246           // Need to send this new handle to all sharing procs
2247           if (!is_iface) {
2248             for (j = 0; j < num_ps; j++) {
2249               if (ps[j] == (int)procConfig.proc_rank())
2250                 continue;
2251               int idx = get_buffers(ps[j]);
2252               if (idx == (int)L1hloc.size()) {
2253                 L1hloc.resize(idx + 1);
2254                 L1hrem.resize(idx + 1);
2255                 L1p.resize(idx + 1);
2256               }
2257 
2258               // Don't bother adding if it's already in the list
2259               std::vector<EntityHandle>::iterator vit =
2260                 std::find(L1hloc[idx].begin(), L1hloc[idx].end(), new_h);
2261               if (vit != L1hloc[idx].end()) {
2262                 // If it's in the list but remote handle isn't known but we know
2263                 // it, replace in the list
2264                 if (L1p[idx][vit-L1hloc[idx].begin()] != -1 && hs[j]) {
2265                   L1hrem[idx][vit-L1hloc[idx].begin()] = hs[j];
2266                   L1p[idx][vit-L1hloc[idx].begin()] = -1;
2267                 }
2268                 else
2269                   continue;
2270               }
2271               else {
2272                 if (!hs[j]) {
2273                   assert(-1 != ps[0] && num_ps > 2);
2274                   L1p[idx].push_back(ps[0]);
2275                   L1hrem[idx].push_back(hs[0]);
2276                 }
2277                 else {
2278                   assert("either this remote handle isn't in the remote list, or it's for another proc" &&
2279                          (std::find(L1hrem[idx].begin(), L1hrem[idx].end(), hs[j]) ==
2280                           L1hrem[idx].end() ||
2281                           L1p[idx][std::find(L1hrem[idx].begin(), L1hrem[idx].end(), hs[j]) -
2282                                    L1hrem[idx].begin()] != -1));
2283                   L1p[idx].push_back(-1);
2284                   L1hrem[idx].push_back(hs[j]);
2285                 }
2286                 L1hloc[idx].push_back(new_h);
2287               }
2288             }
2289           }
2290 
2291           assert("Shouldn't be here for non-shared entities" && -1 != num_ps);
2292           std::fill(&ps[0], &ps[num_ps], -1);
2293           std::fill(&hs[0], &hs[num_ps], 0);
2294         }
2295       }
2296 
2297       myDebug->tprintf(4, "Unpacked %d ents of type %s", num_ents2,
2298                        CN::EntityTypeName(this_type));
2299     }
2300 
2301     myDebug->tprintf(4, "Done unpacking entities.\n");
2302 
2303     // Need to sort here, to enable searching
2304     std::sort(new_ents.begin(), new_ents.end());
2305 
2306     return MB_SUCCESS;
2307   }
2308 
print_buffer(unsigned char * buff_ptr,int mesg_tag,int from_proc,bool sent)2309   ErrorCode ParallelComm::print_buffer(unsigned char *buff_ptr,
2310                                        int mesg_tag,
2311                                        int from_proc, bool sent)
2312   {
2313     std::cerr << procConfig.proc_rank();
2314     if (sent)
2315       std::cerr << " sent";
2316     else
2317       std::cerr << " received";
2318     std::cerr << " message type " << mesg_tag
2319               << " to/from proc " << from_proc << "; contents:" << std::endl;
2320 
2321     int msg_length, num_ents;
2322     unsigned char *orig_ptr = buff_ptr;
2323     UNPACK_INT(buff_ptr, msg_length);
2324     std::cerr << msg_length << " bytes..." << std::endl;
2325 
2326     if (MB_MESG_ENTS_SIZE == mesg_tag || MB_MESG_ENTS_LARGE == mesg_tag) {
2327       // 1. # entities = E
2328       int i, j, k;
2329       std::vector<int> ps;
2330       std::vector<EntityHandle> hs;
2331 
2332       UNPACK_INT(buff_ptr, num_ents);
2333       std::cerr << num_ents << " entities..." << std::endl;
2334 
2335       // Save place where remote handle info starts, then scan forward to ents
2336       for (i = 0; i < num_ents; i++) {
2337         UNPACK_INT(buff_ptr, j);
2338         if (0 > j)
2339           return MB_FAILURE;
2340         ps.resize(j);
2341         hs.resize(j);
2342         std::cerr << "Entity " << i << ", # procs = " << j << std::endl;
2343         UNPACK_INTS(buff_ptr, &ps[0], j);
2344         UNPACK_EH(buff_ptr, &hs[0], j);
2345         std::cerr << "   Procs: ";
2346         for (k = 0; k < j; k++)
2347           std::cerr << ps[k] << " ";
2348         std::cerr << std::endl;
2349         std::cerr << "   Handles: ";
2350         for (k = 0; k < j; k++)
2351           std::cerr << hs[k] << " ";
2352         std::cerr << std::endl;
2353 
2354         if (buff_ptr-orig_ptr > msg_length) {
2355           std::cerr << "End of buffer..." << std::endl;
2356           std::cerr.flush();
2357           return MB_FAILURE;
2358         }
2359       }
2360 
2361       while (true) {
2362         EntityType this_type = MBMAXTYPE;
2363         UNPACK_TYPE(buff_ptr, this_type);
2364         assert(this_type != MBENTITYSET);
2365 
2366         // MBMAXTYPE signifies end of entities data
2367         if (MBMAXTYPE == this_type)
2368           break;
2369 
2370         // Get the number of ents
2371         int num_ents2, verts_per_entity = 0;
2372         UNPACK_INT(buff_ptr, num_ents2);
2373 
2374         // Unpack the nodes per entity
2375         if (MBVERTEX != this_type && num_ents2) {
2376           UNPACK_INT(buff_ptr, verts_per_entity);
2377         }
2378 
2379         std::cerr << "Type: " << CN::EntityTypeName(this_type)
2380                   << "; num_ents = " << num_ents2;
2381         if (MBVERTEX != this_type)
2382           std::cerr << "; verts_per_ent = " << verts_per_entity;
2383         std::cerr << std::endl;
2384         if (num_ents2 < 0 || num_ents2 > msg_length) {
2385           std::cerr << "Wrong number of entities, returning." << std::endl;
2386           return MB_FAILURE;
2387         }
2388 
2389         for (int e = 0; e < num_ents2; e++) {
2390           // Check for existing entity, otherwise make new one
2391           if (MBVERTEX == this_type) {
2392             double coords[3];
2393             UNPACK_DBLS(buff_ptr, coords, 3);
2394             std::cerr << "xyz = " << coords[0] << ", " << coords[1] << ", "
2395                       << coords[2] << std::endl;
2396           }
2397           else {
2398             EntityHandle connect[CN::MAX_NODES_PER_ELEMENT];
2399             assert(verts_per_entity <= CN::MAX_NODES_PER_ELEMENT);
2400             UNPACK_EH(buff_ptr, connect, verts_per_entity);
2401 
2402             // Update connectivity to local handles
2403             std::cerr << "Connectivity: ";
2404             for (k = 0; k < verts_per_entity; k++)
2405               std::cerr << connect[k] << " ";
2406             std::cerr << std::endl;
2407           }
2408 
2409           if (buff_ptr-orig_ptr > msg_length) {
2410             std::cerr << "End of buffer..." << std::endl;
2411             std::cerr.flush();
2412             return MB_FAILURE;
2413           }
2414         }
2415       }
2416     }
2417     else if (MB_MESG_REMOTEH_SIZE == mesg_tag || MB_MESG_REMOTEH_LARGE == mesg_tag) {
2418       UNPACK_INT(buff_ptr, num_ents);
2419       std::cerr << num_ents << " entities..." << std::endl;
2420       if (0 > num_ents || num_ents > msg_length) {
2421         std::cerr << "Wrong number of entities, returning." << std::endl;
2422         return MB_FAILURE;
2423       }
2424       std::vector<EntityHandle> L1hloc(num_ents), L1hrem(num_ents);
2425       std::vector<int> L1p(num_ents);
2426       UNPACK_INTS(buff_ptr, &L1p[0], num_ents);
2427       UNPACK_EH(buff_ptr, &L1hrem[0], num_ents);
2428       UNPACK_EH(buff_ptr, &L1hloc[0], num_ents);
2429       std::cerr << num_ents << " Entity pairs; hremote/hlocal/proc: " << std::endl;
2430       for (int i = 0; i < num_ents; i++) {
2431         EntityType etype = TYPE_FROM_HANDLE(L1hloc[i]);
2432         std::cerr << CN::EntityTypeName(etype) << ID_FROM_HANDLE(L1hrem[i]) << ", "
2433                   << CN::EntityTypeName(etype) << ID_FROM_HANDLE(L1hloc[i]) << ", "
2434                   << L1p[i] << std::endl;
2435       }
2436 
2437       if (buff_ptr-orig_ptr > msg_length) {
2438         std::cerr << "End of buffer..." << std::endl;
2439         std::cerr.flush();
2440         return MB_FAILURE;
2441       }
2442     }
2443     else if (mesg_tag == MB_MESG_TAGS_SIZE || mesg_tag == MB_MESG_TAGS_LARGE) {
2444       int num_tags, dum1, data_type, tag_size;
2445       UNPACK_INT(buff_ptr, num_tags);
2446       std::cerr << "Number of tags = " << num_tags << std::endl;
2447       for (int i = 0; i < num_tags; i++) {
2448         std::cerr << "Tag " << i << ":" << std::endl;
2449         UNPACK_INT(buff_ptr, tag_size);
2450         UNPACK_INT(buff_ptr, dum1);
2451         UNPACK_INT(buff_ptr, data_type);
2452         std::cerr << "Tag size, type, data type = " << tag_size << ", "
2453                   << dum1 << ", " << data_type << std::endl;
2454         UNPACK_INT(buff_ptr, dum1);
2455         std::cerr << "Default value size = " << dum1 << std::endl;
2456         buff_ptr += dum1;
2457         UNPACK_INT(buff_ptr, dum1);
2458         std::string name((char*)buff_ptr, dum1);
2459         std::cerr << "Tag name = " << name.c_str() << std::endl;
2460         buff_ptr += dum1;
2461         UNPACK_INT(buff_ptr, num_ents);
2462         std::cerr << "Number of ents = " << num_ents << std::endl;
2463         std::vector<EntityHandle> tmp_buff(num_ents);
2464         UNPACK_EH(buff_ptr, &tmp_buff[0], num_ents);
2465         int tot_length = 0;
2466         for (int j = 0; j < num_ents; j++) {
2467           EntityType etype = TYPE_FROM_HANDLE(tmp_buff[j]);
2468           std::cerr << CN::EntityTypeName(etype) << " "
2469                     << ID_FROM_HANDLE(tmp_buff[j])
2470                     << ", tag = ";
2471           if (tag_size == MB_VARIABLE_LENGTH) {
2472             UNPACK_INT(buff_ptr, dum1);
2473             tot_length += dum1;
2474             std::cerr << "(variable, length = " << dum1 << ")" << std::endl;
2475           }
2476           else if (data_type == MB_TYPE_DOUBLE) {
2477             double dum_dbl;
2478             UNPACK_DBL(buff_ptr, dum_dbl);
2479             std::cerr << dum_dbl << std::endl;
2480           }
2481           else if (data_type == MB_TYPE_INTEGER) {
2482             int dum_int;
2483             UNPACK_INT(buff_ptr, dum_int);
2484             std::cerr << dum_int << std::endl;
2485           }
2486           else if (data_type == MB_TYPE_OPAQUE) {
2487             std::cerr << "(opaque)" << std::endl;
2488             buff_ptr += tag_size;
2489           }
2490           else if (data_type == MB_TYPE_HANDLE) {
2491             EntityHandle dum_eh;
2492             UNPACK_EH(buff_ptr, &dum_eh, 1);
2493             std::cerr <<  dum_eh << std::endl;
2494           }
2495           else if (data_type == MB_TYPE_BIT) {
2496             std::cerr << "(bit)" << std::endl;
2497             buff_ptr += tag_size;
2498           }
2499         }
2500         if (tag_size == MB_VARIABLE_LENGTH)
2501           buff_ptr += tot_length;
2502       }
2503     }
2504     else {
2505       assert(false);
2506       return MB_FAILURE;
2507     }
2508 
2509     std::cerr.flush();
2510 
2511     return MB_SUCCESS;
2512   }
2513 
list_entities(const EntityHandle * ents,int num_ents)2514   ErrorCode ParallelComm::list_entities(const EntityHandle *ents, int num_ents)
2515   {
2516     if (NULL == ents && 0 == num_ents) {
2517       Range shared_ents;
2518       std::copy(sharedEnts.begin(), sharedEnts.end(), range_inserter(shared_ents));
2519       shared_ents.print("Shared entities:\n");
2520       return MB_SUCCESS;
2521     }
2522     else if (NULL == ents && 0 != num_ents) {
2523       return list_entities(&sharedEnts[0], sharedEnts.size());
2524     }
2525 
2526     unsigned char pstat;
2527     EntityHandle tmp_handles[MAX_SHARING_PROCS];
2528     int tmp_procs[MAX_SHARING_PROCS];
2529     unsigned int num_ps;
2530     ErrorCode result;
2531 
2532     for (int i = 0; i < num_ents; i++) {
2533       result = mbImpl->list_entities(ents + i, 1);MB_CHK_ERR(result);
2534 
2535       result = get_sharing_data(ents[i], tmp_procs, tmp_handles, pstat, num_ps);MB_CHK_SET_ERR(result, "Failed to get sharing data");
2536 
2537       std::cout << "Pstatus: ";
2538       if (!num_ps)
2539         std::cout << "local " << std::endl;
2540       else {
2541         if (pstat & PSTATUS_NOT_OWNED)
2542           std::cout << "NOT_OWNED; ";
2543         if (pstat & PSTATUS_SHARED)
2544           std::cout << "SHARED; ";
2545         if (pstat & PSTATUS_MULTISHARED)
2546           std::cout << "MULTISHARED; ";
2547         if (pstat & PSTATUS_INTERFACE)
2548           std::cout << "INTERFACE; ";
2549         if (pstat & PSTATUS_GHOST)
2550           std::cout << "GHOST; ";
2551         std::cout << std::endl;
2552         for (unsigned int j = 0; j < num_ps; j++) {
2553           std::cout << "  proc " << tmp_procs[j] << " id (handle) "
2554                     << mbImpl->id_from_handle(tmp_handles[j])
2555                     << "(" << tmp_handles[j] << ")" << std::endl;
2556         }
2557       }
2558       std::cout << std::endl;
2559     }
2560 
2561     return MB_SUCCESS;
2562   }
2563 
list_entities(const Range & ents)2564   ErrorCode ParallelComm::list_entities(const Range &ents)
2565   {
2566     for (Range::iterator rit = ents.begin(); rit != ents.end(); ++rit)
2567       list_entities(&(*rit), 1);
2568 
2569     return MB_SUCCESS;
2570   }
2571 
update_remote_data(Range & local_range,Range & remote_range,int other_proc,const unsigned char add_pstat)2572   ErrorCode ParallelComm::update_remote_data(Range &local_range,
2573                                              Range &remote_range,
2574                                              int other_proc,
2575                                              const unsigned char add_pstat)
2576   {
2577     Range::iterator rit, rit2;
2578     ErrorCode result = MB_SUCCESS;
2579 
2580     // For each pair of local/remote handles:
2581     for (rit = local_range.begin(), rit2 = remote_range.begin();
2582          rit != local_range.end(); ++rit, ++rit2) {
2583       result = update_remote_data(*rit, &other_proc, &(*rit2), 1, add_pstat);MB_CHK_ERR(result);
2584     }
2585 
2586     return MB_SUCCESS;
2587   }
2588 
update_remote_data(const EntityHandle new_h,const int * ps,const EntityHandle * hs,const int num_ps,const unsigned char add_pstat)2589   ErrorCode ParallelComm::update_remote_data(const EntityHandle new_h,
2590                                              const int *ps,
2591                                              const EntityHandle *hs,
2592                                              const int num_ps,
2593                                              const unsigned char add_pstat
2594 // The following lines left in for future debugging, at least until I trust this function; tjt, 10/4/2013
2595 //                                           , int *new_ps,
2596 //                                           EntityHandle *new_hs,
2597 //                                           int &new_numps,
2598 //                                           unsigned char &new_pstat
2599                                              )
2600   {
2601     // Get initial sharing data; tag_ps and tag_hs get terminated with -1 and 0
2602     // in this function, so no need to initialize; sharing data does not include
2603     // this proc if shared with only one other
2604 
2605     // Following variables declared here to avoid compiler errors
2606     int new_numps;
2607     unsigned char new_pstat;
2608     std::vector<int> new_ps(MAX_SHARING_PROCS, -1);
2609     std::vector<EntityHandle> new_hs(MAX_SHARING_PROCS, 0);
2610 
2611     new_numps = 0;
2612     ErrorCode result = get_sharing_data(new_h, &new_ps[0], &new_hs[0], new_pstat, new_numps);MB_CHK_SET_ERR(result, "Failed to get sharing data in update_remote_data");
2613     int num_exist = new_numps;
2614 
2615     // Add new pstat info to the flag
2616     new_pstat |= add_pstat;
2617 
2618 /*
2619 #define plist(str, lst, siz)                                          \
2620     std::cout << str << "(";                                          \
2621     for (int i = 0; i < (int)siz; i++) std::cout << lst[i] << " ";    \
2622     std::cout << ") ";                                                \
2623 
2624     std::cout << "update_remote_data: rank = " << rank() << ", new_h = " << new_h << std::endl;
2625     std::string ostr;
2626     plist("ps", ps, num_ps);
2627     plist("hs", hs, num_ps);
2628     print_pstatus(add_pstat, ostr);
2629     std::cout << ", add_pstat = " << ostr.c_str() << std::endl;
2630     plist("tag_ps", new_ps, new_numps);
2631     plist("tag_hs", new_hs, new_numps);
2632     assert(new_numps <= size());
2633     print_pstatus(new_pstat, ostr);
2634     std::cout << ", tag_pstat=" << ostr.c_str() << std::endl;
2635 */
2636 
2637 #ifndef NDEBUG
2638     {
2639       // Check for duplicates in proc list
2640       std::set<unsigned int> dumprocs;
2641       unsigned int dp = 0;
2642       for (; (int) dp < num_ps && -1 != ps[dp]; dp++)
2643         dumprocs.insert(ps[dp]);
2644       assert(dp == dumprocs.size());
2645     }
2646 #endif
2647 
2648     // If only one sharer and I'm the owner, insert myself in the list;
2649     // otherwise, my data is checked at the end
2650     if (1 == new_numps && !(new_pstat & PSTATUS_NOT_OWNED)) {
2651       new_hs[1] = new_hs[0];
2652       new_ps[1] = new_ps[0];
2653       new_hs[0] = new_h;
2654       new_ps[0] = rank();
2655       new_numps = 2;
2656     }
2657 
2658     // Now put passed-in data onto lists
2659     int idx;
2660     for (int i = 0; i < num_ps; i++) {
2661       idx = std::find(&new_ps[0], &new_ps[0] + new_numps, ps[i]) - &new_ps[0];
2662       if (idx < new_numps) {
2663         if (!new_hs[idx] && hs[i])
2664           // h on list is 0 and passed-in h is non-zero, replace it
2665           new_hs[idx] = hs[i];
2666         else
2667           assert(!hs[i] || new_hs[idx] == hs[i]);
2668       }
2669       else {
2670         if (new_numps + 1 == MAX_SHARING_PROCS) {
2671           MB_SET_ERR(MB_FAILURE, "Exceeded MAX_SHARING_PROCS for " << CN::EntityTypeName(TYPE_FROM_HANDLE(new_h))
2672               << ' ' << ID_FROM_HANDLE(new_h) << " in process " << rank());
2673         }
2674         new_ps[new_numps] = ps[i];
2675         new_hs[new_numps] = hs[i];
2676         new_numps++;
2677       }
2678     }
2679 
2680     // Add myself, if it isn't there already
2681     idx = std::find(&new_ps[0], &new_ps[0] + new_numps, rank()) - &new_ps[0];
2682     if (idx == new_numps) {
2683       new_ps[new_numps] = rank();
2684       new_hs[new_numps] = new_h;
2685       new_numps++;
2686     }
2687     else if (!new_hs[idx] && new_numps > 2)
2688       new_hs[idx] = new_h;
2689 
2690     // Proc list is complete; update for shared, multishared
2691     if (new_numps > 1) {
2692       if (new_numps > 2) new_pstat |= PSTATUS_MULTISHARED;
2693       new_pstat |= PSTATUS_SHARED;
2694     }
2695 
2696 /*
2697     plist("new_ps", new_ps, new_numps);
2698     plist("new_hs", new_hs, new_numps);
2699     print_pstatus(new_pstat, ostr);
2700     std::cout << ", new_pstat=" << ostr.c_str() << std::endl;
2701     std::cout << std::endl;
2702 */
2703 
2704     result = set_sharing_data(new_h, new_pstat, num_exist, new_numps, &new_ps[0], &new_hs[0]);MB_CHK_SET_ERR(result, "Failed to set sharing data in update_remote_data");
2705 
2706     if (new_pstat & PSTATUS_SHARED)
2707       sharedEnts.push_back(new_h);
2708 
2709     return MB_SUCCESS;
2710   }
2711 
update_remote_data_old(const EntityHandle new_h,const int * ps,const EntityHandle * hs,const int num_ps,const unsigned char add_pstat)2712   ErrorCode ParallelComm::update_remote_data_old(const EntityHandle new_h,
2713                                                  const int *ps,
2714                                                  const EntityHandle *hs,
2715                                                  const int num_ps,
2716                                                  const unsigned char add_pstat)
2717   {
2718     EntityHandle tag_hs[MAX_SHARING_PROCS];
2719     int tag_ps[MAX_SHARING_PROCS];
2720     unsigned char pstat;
2721     // Get initial sharing data; tag_ps and tag_hs get terminated with -1 and 0
2722     // in this function, so no need to initialize
2723     unsigned int num_exist;
2724     ErrorCode result = get_sharing_data(new_h, tag_ps, tag_hs, pstat, num_exist);MB_CHK_ERR(result);
2725 
2726 #ifndef NDEBUG
2727     {
2728       // Check for duplicates in proc list
2729       std::set<unsigned int> dumprocs;
2730       unsigned int dp = 0;
2731       for (; (int) dp < num_ps && -1 != ps[dp]; dp++)
2732         dumprocs.insert(ps[dp]);
2733       assert(dp == dumprocs.size());
2734     }
2735 #endif
2736 
2737     // Add any new sharing data
2738     bool changed = false;
2739     int idx;
2740     if (!num_exist) {
2741       // Just take what caller passed
2742       memcpy(tag_ps, ps, num_ps*sizeof(int));
2743       memcpy(tag_hs, hs, num_ps*sizeof(EntityHandle));
2744       num_exist = num_ps;
2745       // If it's only one, hopefully I'm not there yet...
2746       assert("I shouldn't be the only proc there." &&
2747              (1 != num_exist || ps[0] != (int)procConfig.proc_rank()));
2748       changed = true;
2749     }
2750     else {
2751       for (int i = 0; i < num_ps; i++) {
2752         idx = std::find(tag_ps, tag_ps + num_exist, ps[i]) - tag_ps;
2753         if (idx == (int) num_exist) {
2754           if (num_exist == MAX_SHARING_PROCS) {
2755             std::cerr << "Exceeded MAX_SHARING_PROCS for "
2756                       << CN::EntityTypeName(TYPE_FROM_HANDLE(new_h))
2757                       << ' ' << ID_FROM_HANDLE(new_h)
2758                       << " in process " << proc_config().proc_rank()
2759                       << std::endl;
2760             std::cerr.flush();
2761             MPI_Abort(proc_config().proc_comm(), 66);
2762           }
2763 
2764           // If there's only 1 sharing proc, and it's not me, then
2765           // we'll end up with 3; add me to the front
2766           if (!i && num_ps == 1 && num_exist == 1 &&
2767               ps[0] != (int)procConfig.proc_rank()) {
2768             int j = 1;
2769             // If I own this entity, put me at front, otherwise after first
2770             if (!(pstat & PSTATUS_NOT_OWNED)) {
2771               tag_ps[1] = tag_ps[0];
2772               tag_hs[1] = tag_hs[0];
2773               j = 0;
2774             }
2775             tag_ps[j] = procConfig.proc_rank();
2776             tag_hs[j] = new_h;
2777             num_exist++;
2778           }
2779 
2780           tag_ps[num_exist] = ps[i];
2781           tag_hs[num_exist] = hs[i];
2782           num_exist++;
2783           changed = true;
2784         }
2785         else if (0 == tag_hs[idx]) {
2786           tag_hs[idx] = hs[i];
2787           changed = true;
2788         }
2789         else if (0 != hs[i]) {
2790           assert(hs[i] == tag_hs[idx]);
2791         }
2792       }
2793     }
2794 
2795     // Adjust for interface layer if necessary
2796     if (add_pstat & PSTATUS_INTERFACE) {
2797       idx = std::min_element(tag_ps, tag_ps + num_exist) - tag_ps;
2798       if (idx) {
2799         int tag_proc = tag_ps[idx];
2800         tag_ps[idx] = tag_ps[0];
2801         tag_ps[0] = tag_proc;
2802         EntityHandle tag_h = tag_hs[idx];
2803         tag_hs[idx] = tag_hs[0];
2804         tag_hs[0] = tag_h;
2805         changed = true;
2806         if (tag_ps[0] != (int)procConfig.proc_rank())
2807           pstat |= PSTATUS_NOT_OWNED;
2808       }
2809     }
2810 
2811     if (!changed)
2812       return MB_SUCCESS;
2813 
2814     assert("interface entities should have > 1 proc" &&
2815            (!(add_pstat & PSTATUS_INTERFACE) || num_exist > 1));
2816     assert("ghost entities should have > 1 proc" &&
2817            (!(add_pstat & PSTATUS_GHOST) || num_exist > 1));
2818 
2819     // If it's multi-shared and we created the entity in this unpack,
2820     // local handle probably isn't in handle list yet
2821     if (num_exist > 2) {
2822       idx = std::find(tag_ps, tag_ps + num_exist, procConfig.proc_rank()) - tag_ps;
2823       assert(idx < (int) num_exist);
2824       if (!tag_hs[idx])
2825         tag_hs[idx] = new_h;
2826     }
2827 
2828     int tag_p;
2829     EntityHandle tag_h;
2830 
2831     // Update pstat
2832     pstat |= add_pstat;
2833 
2834     if (num_exist > 2)
2835       pstat |= (PSTATUS_MULTISHARED | PSTATUS_SHARED);
2836     else if (num_exist > 0)
2837       pstat |= PSTATUS_SHARED;
2838 
2839 //    compare_remote_data(new_h, num_ps, hs, ps, add_pstat,
2840 //                        num_exist, tag_hs, tag_ps, pstat);
2841 
2842     // Reset single shared proc/handle if was shared and moving to multi-shared
2843     if (num_exist > 2 && !(pstat & PSTATUS_MULTISHARED) &&
2844         (pstat & PSTATUS_SHARED)) {
2845       // Must remove sharedp/h first, which really means set to default value
2846       tag_p = -1;
2847       result = mbImpl->tag_set_data(sharedp_tag(), &new_h, 1, &tag_p);MB_CHK_SET_ERR(result, "Failed to set sharedp tag data");
2848       tag_h = 0;
2849       result = mbImpl->tag_set_data(sharedh_tag(), &new_h, 1, &tag_h);MB_CHK_SET_ERR(result, "Failed to set sharedh tag data");
2850     }
2851 
2852     // Set sharing tags
2853     if (num_exist > 2) {
2854       std::fill(tag_ps + num_exist, tag_ps + MAX_SHARING_PROCS, -1);
2855       std::fill(tag_hs + num_exist, tag_hs + MAX_SHARING_PROCS, 0);
2856       result = mbImpl->tag_set_data(sharedps_tag(), &new_h, 1, tag_ps);MB_CHK_SET_ERR(result, "Failed to set sharedps tag data");
2857       result = mbImpl->tag_set_data(sharedhs_tag(), &new_h, 1, tag_hs);MB_CHK_SET_ERR(result, "Failed to set sharedhs tag data");
2858 
2859 #ifndef NDEBUG
2860       {
2861         // Check for duplicates in proc list
2862         std::set<unsigned int> dumprocs;
2863         unsigned int dp = 0;
2864         for (; dp < num_exist && -1 != tag_ps[dp]; dp++)
2865           dumprocs.insert(tag_ps[dp]);
2866         assert(dp == dumprocs.size());
2867       }
2868 #endif
2869     }
2870     else if (num_exist == 2 || num_exist == 1) {
2871       if (tag_ps[0] == (int) procConfig.proc_rank()) {
2872         assert(2 == num_exist && tag_ps[1] != (int) procConfig.proc_rank());
2873         tag_ps[0] = tag_ps[1];
2874         tag_hs[0] = tag_hs[1];
2875       }
2876       assert(tag_ps[0] != -1 && tag_hs[0] != 0);
2877       result = mbImpl->tag_set_data(sharedp_tag(), &new_h, 1, tag_ps);MB_CHK_SET_ERR(result, "Failed to set sharedp tag data");
2878       result = mbImpl->tag_set_data(sharedh_tag(), &new_h, 1, tag_hs);MB_CHK_SET_ERR(result, "Failed to set sharedh tag data");
2879     }
2880 
2881     // Now set new pstatus
2882     result = mbImpl->tag_set_data(pstatus_tag(), &new_h, 1, &pstat);MB_CHK_SET_ERR(result, "Failed to set pstatus tag data");
2883 
2884     if (pstat & PSTATUS_SHARED)
2885       sharedEnts.push_back(new_h);
2886 
2887     return MB_SUCCESS;
2888   }
2889 
get_sharing_data(const Range & entities,std::set<int> & procs,int operation)2890   ErrorCode ParallelComm::get_sharing_data(const Range &entities,
2891                                            std::set<int> &procs,
2892                                            int operation)
2893   {
2894     // Get the union or intersection of sharing data for multiple entities
2895     ErrorCode result;
2896     int sp2[MAX_SHARING_PROCS];
2897     int num_ps;
2898     unsigned char pstat;
2899     std::set<int> tmp_procs;
2900     procs.clear();
2901 
2902     for (Range::const_iterator rit = entities.begin(); rit != entities.end(); ++rit) {
2903       // Get sharing procs
2904       result = get_sharing_data(*rit, sp2, NULL, pstat, num_ps);MB_CHK_SET_ERR(result, "Failed to get sharing data in get_sharing_data");
2905       if (!(pstat & PSTATUS_SHARED) && Interface::INTERSECT == operation) {
2906         procs.clear();
2907         return MB_SUCCESS;
2908       }
2909 
2910       if (rit == entities.begin()) {
2911         std::copy(sp2, sp2 + num_ps, std::inserter(procs, procs.begin()));
2912       }
2913       else {
2914         std::sort(sp2, sp2 + num_ps);
2915         tmp_procs.clear();
2916         if (Interface::UNION == operation)
2917           std::set_union(procs.begin(), procs.end(),
2918                          sp2, sp2 + num_ps, std::inserter(tmp_procs, tmp_procs.end()));
2919         else if (Interface::INTERSECT == operation)
2920           std::set_intersection(procs.begin(), procs.end(),
2921                                 sp2, sp2 + num_ps, std::inserter(tmp_procs, tmp_procs.end()));
2922         else {
2923           assert("Unknown operation." && false);
2924           return MB_FAILURE;
2925         }
2926         procs.swap(tmp_procs);
2927       }
2928       if (Interface::INTERSECT == operation && procs.empty())
2929         return MB_SUCCESS;
2930     }
2931 
2932     return MB_SUCCESS;
2933   }
2934 
get_sharing_data(const EntityHandle entity,int * ps,EntityHandle * hs,unsigned char & pstat,unsigned int & num_ps)2935   ErrorCode ParallelComm::get_sharing_data(const EntityHandle entity,
2936                                            int *ps,
2937                                            EntityHandle *hs,
2938                                            unsigned char &pstat,
2939                                            unsigned int &num_ps)
2940   {
2941     ErrorCode result = mbImpl->tag_get_data(pstatus_tag(), &entity, 1, &pstat);MB_CHK_SET_ERR(result, "Failed to get pstatus tag data");
2942     if (pstat & PSTATUS_MULTISHARED) {
2943       result = mbImpl->tag_get_data(sharedps_tag(), &entity, 1, ps);MB_CHK_SET_ERR(result, "Failed to get sharedps tag data");
2944       if (hs) {
2945         result = mbImpl->tag_get_data(sharedhs_tag(), &entity, 1, hs);MB_CHK_SET_ERR(result, "Failed to get sharedhs tag data");
2946       }
2947       num_ps = std::find(ps, ps + MAX_SHARING_PROCS, -1) - ps;
2948     }
2949     else if (pstat & PSTATUS_SHARED) {
2950       result = mbImpl->tag_get_data(sharedp_tag(), &entity, 1, ps);MB_CHK_SET_ERR(result, "Failed to get sharedp tag data");
2951       if (hs) {
2952         result = mbImpl->tag_get_data(sharedh_tag(), &entity, 1, hs);MB_CHK_SET_ERR(result, "Failed to get sharedh tag data");
2953         hs[1] = 0;
2954       }
2955       // Initialize past end of data
2956       ps[1] = -1;
2957       num_ps = 1;
2958     }
2959     else {
2960       ps[0] = -1;
2961       if (hs)
2962         hs[0] = 0;
2963       num_ps = 0;
2964     }
2965 
2966     assert(MAX_SHARING_PROCS >= num_ps);
2967 
2968     return MB_SUCCESS;
2969   }
2970 
find_existing_entity(const bool is_iface,const int owner_p,const EntityHandle owner_h,const int num_ps,const EntityHandle * connect,const int num_connect,const EntityType this_type,std::vector<EntityHandle> & L2hloc,std::vector<EntityHandle> & L2hrem,std::vector<unsigned int> & L2p,EntityHandle & new_h)2971   ErrorCode ParallelComm::find_existing_entity(const bool is_iface,
2972                                                const int owner_p,
2973                                                const EntityHandle owner_h,
2974                                                const int num_ps,
2975                                                const EntityHandle *connect,
2976                                                const int num_connect,
2977                                                const EntityType this_type,
2978                                                std::vector<EntityHandle> &L2hloc,
2979                                                std::vector<EntityHandle> &L2hrem,
2980                                                std::vector<unsigned int> &L2p,
2981                                                EntityHandle &new_h)
2982   {
2983     new_h = 0;
2984     if (!is_iface && num_ps > 2) {
2985       for (unsigned int i = 0; i < L2hrem.size(); i++) {
2986         if (L2hrem[i] == owner_h && owner_p == (int) L2p[i]) {
2987           new_h = L2hloc[i];
2988           return MB_SUCCESS;
2989         }
2990       }
2991     }
2992 
2993     // If we got here and it's a vertex, we don't need to look further
2994     if (MBVERTEX == this_type || !connect || !num_connect) return MB_SUCCESS;
2995 
2996     Range tmp_range;
2997     ErrorCode result = mbImpl->get_adjacencies(connect, num_connect,
2998                                                CN::Dimension(this_type), false,
2999                                                tmp_range);MB_CHK_SET_ERR(result, "Failed to get existing entity");
3000     if (!tmp_range.empty()) {
3001       // Found a corresponding entity - return target
3002       new_h = *tmp_range.begin();
3003     }
3004     else {
3005       new_h = 0;
3006     }
3007 
3008     return MB_SUCCESS;
3009   }
3010 
get_local_handles(const Range & remote_handles,Range & local_handles,const std::vector<EntityHandle> & new_ents)3011   ErrorCode ParallelComm::get_local_handles(const Range &remote_handles,
3012                                             Range &local_handles,
3013                                             const std::vector<EntityHandle> &new_ents)
3014   {
3015     std::vector<EntityHandle> rh_vec;
3016     rh_vec.reserve(remote_handles.size());
3017     std::copy(remote_handles.begin(), remote_handles.end(), std::back_inserter(rh_vec));
3018     ErrorCode result = get_local_handles(&rh_vec[0], remote_handles.size(), new_ents);
3019     std::copy(rh_vec.begin(), rh_vec.end(), range_inserter(local_handles));
3020     return result;
3021   }
3022 
get_local_handles(EntityHandle * from_vec,int num_ents,const Range & new_ents)3023   ErrorCode ParallelComm::get_local_handles(EntityHandle *from_vec,
3024                                             int num_ents,
3025                                             const Range &new_ents)
3026   {
3027     std::vector<EntityHandle> tmp_ents;
3028     std::copy(new_ents.begin(), new_ents.end(), std::back_inserter(tmp_ents));
3029     return get_local_handles(from_vec, num_ents, tmp_ents);
3030   }
3031 
get_local_handles(EntityHandle * from_vec,int num_ents,const std::vector<EntityHandle> & new_ents)3032   ErrorCode ParallelComm::get_local_handles(EntityHandle *from_vec,
3033                                             int num_ents,
3034                                             const std::vector<EntityHandle> &new_ents)
3035   {
3036     for (int i = 0; i < num_ents; i++) {
3037       if (TYPE_FROM_HANDLE(from_vec[i]) == MBMAXTYPE) {
3038         assert(ID_FROM_HANDLE(from_vec[i]) < (int) new_ents.size());
3039         from_vec[i] = new_ents[ID_FROM_HANDLE(from_vec[i])];
3040       }
3041     }
3042 
3043     return MB_SUCCESS;
3044   }
3045 
3046   /*
3047   template <typename T> void
3048   insert_in_array(T* array, size_t array_size, size_t location, T value)
3049   {
3050     assert(location + 1 < array_size);
3051     for (size_t i = array_size - 1; i > location; i--)
3052       array[i] = array[i - 1];
3053     array[location] = value;
3054   }
3055   */
3056 
pack_range_map(Range & key_range,EntityHandle val_start,HandleMap & handle_map)3057   ErrorCode ParallelComm::pack_range_map(Range &key_range, EntityHandle val_start,
3058                                          HandleMap &handle_map)
3059   {
3060     for (Range::const_pair_iterator key_it = key_range.const_pair_begin();
3061          key_it != key_range.const_pair_end(); ++key_it) {
3062       int tmp_num = (*key_it).second - (*key_it).first + 1;
3063       handle_map.insert((*key_it).first, val_start, tmp_num);
3064       val_start += tmp_num;
3065     }
3066 
3067     return MB_SUCCESS;
3068   }
3069 
pack_sets(Range & entities,Buffer * buff,const bool store_remote_handles,const int to_proc)3070   ErrorCode ParallelComm::pack_sets(Range &entities,
3071                                     Buffer *buff,
3072                                     const bool store_remote_handles,
3073                                     const int to_proc)
3074   {
3075     // SETS:
3076     // . #sets
3077     // . for each set:
3078     //   - options[#sets] (unsigned int)
3079     //   - if (unordered) set range
3080     //   - else if ordered
3081     //     . #ents in set
3082     //     . handles[#ents]
3083     //   - #parents
3084     //   - if (#parents) handles[#parents]
3085     //   - #children
3086     //   - if (#children) handles[#children]
3087 
3088     // Now the sets; assume any sets the application wants to pass are in the entities list
3089     ErrorCode result;
3090     Range all_sets = entities.subset_by_type(MBENTITYSET);
3091 
3092     int buff_size = estimate_sets_buffer_size(all_sets, store_remote_handles);
3093     if (buff_size < 0)
3094       MB_SET_ERR(MB_FAILURE, "Failed to estimate sets buffer size");
3095     buff->check_space(buff_size);
3096 
3097     // Number of sets
3098     PACK_INT(buff->buff_ptr, all_sets.size());
3099 
3100     // Options for all sets
3101     std::vector<unsigned int> options(all_sets.size());
3102     Range::iterator rit;
3103     std::vector<EntityHandle> members;
3104     int i;
3105     for (rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++) {
3106       result = mbImpl->get_meshset_options(*rit, options[i]);MB_CHK_SET_ERR(result, "Failed to get meshset options");
3107     }
3108     buff->check_space(all_sets.size()*sizeof(unsigned int));
3109     PACK_VOID(buff->buff_ptr, &options[0], all_sets.size()*sizeof(unsigned int));
3110 
3111     // Pack parallel geometry unique id
3112     if (!all_sets.empty()) {
3113       Tag uid_tag;
3114       int n_sets = all_sets.size();
3115       bool b_pack = false;
3116       std::vector<int> id_data(n_sets);
3117       result = mbImpl->tag_get_handle("PARALLEL_UNIQUE_ID", 1, MB_TYPE_INTEGER,
3118                                       uid_tag, MB_TAG_SPARSE | MB_TAG_CREAT);MB_CHK_SET_ERR(result, "Failed to create parallel geometry unique id tag");
3119 
3120       result = mbImpl->tag_get_data(uid_tag, all_sets, &id_data[0]);
3121       if (MB_TAG_NOT_FOUND != result) {
3122         if (MB_SUCCESS != result)
3123           MB_SET_ERR(result, "Failed to get parallel geometry unique ids");
3124         for (i = 0; i < n_sets; i++) {
3125           if (id_data[i] != 0) {
3126             b_pack = true;
3127             break;
3128           }
3129         }
3130       }
3131 
3132       if (b_pack) { // If you find
3133         buff->check_space((n_sets + 1)*sizeof(int));
3134         PACK_INT(buff->buff_ptr, n_sets);
3135         PACK_INTS(buff->buff_ptr, &id_data[0], n_sets);
3136       }
3137       else {
3138         buff->check_space(sizeof(int));
3139         PACK_INT(buff->buff_ptr, 0);
3140       }
3141     }
3142 
3143     // Vectors/ranges
3144     std::vector<EntityHandle> entities_vec(entities.size());
3145     std::copy(entities.begin(), entities.end(), entities_vec.begin());
3146     for (rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++) {
3147       members.clear();
3148       result = mbImpl->get_entities_by_handle(*rit, members);MB_CHK_SET_ERR(result, "Failed to get entities in ordered set");
3149       result = get_remote_handles(store_remote_handles, &members[0],
3150                                   &members[0], members.size(),
3151                                   to_proc, entities_vec);MB_CHK_SET_ERR(result, "Failed in get_remote_handles");
3152       buff->check_space(members.size()*sizeof(EntityHandle) + sizeof(int));
3153       PACK_INT(buff->buff_ptr, members.size());
3154       PACK_EH(buff->buff_ptr, &members[0], members.size());
3155     }
3156 
3157     // Pack parent/child sets
3158     if (!store_remote_handles) { // Only works not store remote handles
3159       // Pack numbers of parents/children
3160       unsigned int tot_pch = 0;
3161       int num_pch;
3162       buff->check_space(2*all_sets.size()*sizeof(int));
3163       for (rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++) {
3164         // Pack parents
3165         result = mbImpl->num_parent_meshsets(*rit, &num_pch);MB_CHK_SET_ERR(result, "Failed to get num parents");
3166         PACK_INT(buff->buff_ptr, num_pch);
3167         tot_pch += num_pch;
3168         result = mbImpl->num_child_meshsets(*rit, &num_pch);MB_CHK_SET_ERR(result, "Failed to get num children");
3169         PACK_INT(buff->buff_ptr, num_pch);
3170         tot_pch += num_pch;
3171       }
3172 
3173       // Now pack actual parents/children
3174       members.clear();
3175       members.reserve(tot_pch);
3176       std::vector<EntityHandle> tmp_pch;
3177       for (rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++) {
3178         result = mbImpl->get_parent_meshsets(*rit, tmp_pch);MB_CHK_SET_ERR(result, "Failed to get parents");
3179         std::copy(tmp_pch.begin(), tmp_pch.end(), std::back_inserter(members));
3180         tmp_pch.clear();
3181         result = mbImpl->get_child_meshsets(*rit, tmp_pch);MB_CHK_SET_ERR(result, "Failed to get children");
3182         std::copy(tmp_pch.begin(), tmp_pch.end(), std::back_inserter(members));
3183         tmp_pch.clear();
3184       }
3185       assert(members.size() == tot_pch);
3186       if (!members.empty()) {
3187         result = get_remote_handles(store_remote_handles,
3188                                     &members[0], &members[0],
3189                                     members.size(), to_proc,
3190                                     entities_vec);MB_CHK_SET_ERR(result, "Failed to get remote handles for set parent/child sets");
3191 #ifndef NDEBUG
3192         // Check that all handles are either sets or maxtype
3193         for (unsigned int __j = 0; __j < members.size(); __j++)
3194           assert((TYPE_FROM_HANDLE(members[__j]) == MBMAXTYPE &&
3195                   ID_FROM_HANDLE(members[__j]) < (int)entities.size()) ||
3196                  TYPE_FROM_HANDLE(members[__j]) == MBENTITYSET);
3197 #endif
3198         buff->check_space(members.size()*sizeof(EntityHandle));
3199         PACK_EH(buff->buff_ptr, &members[0], members.size());
3200       }
3201     }
3202     else {
3203       buff->check_space(2*all_sets.size()*sizeof(int));
3204       for (rit = all_sets.begin(); rit != all_sets.end(); ++rit) {
3205         PACK_INT(buff->buff_ptr, 0);
3206         PACK_INT(buff->buff_ptr, 0);
3207       }
3208     }
3209 
3210     // Pack the handles
3211     if (store_remote_handles && !all_sets.empty()) {
3212       buff_size = RANGE_SIZE(all_sets);
3213       buff->check_space(buff_size);
3214       PACK_RANGE(buff->buff_ptr, all_sets);
3215     }
3216 
3217     myDebug->tprintf(4, "Done packing sets.\n");
3218 
3219     buff->set_stored_size();
3220 
3221     return MB_SUCCESS;
3222   }
3223 
unpack_sets(unsigned char * & buff_ptr,std::vector<EntityHandle> & entities,const bool store_remote_handles,const int from_proc)3224   ErrorCode ParallelComm::unpack_sets(unsigned char *&buff_ptr,
3225                                       std::vector<EntityHandle> &entities,
3226                                       const bool store_remote_handles,
3227                                       const int from_proc)
3228   {
3229     // Now the sets; assume any sets the application wants to pass are in the entities list
3230     ErrorCode result;
3231 
3232     bool no_sets = (entities.empty() || (mbImpl->type_from_handle(*entities.rbegin()) == MBENTITYSET));
3233 
3234     Range new_sets;
3235     int num_sets;
3236     UNPACK_INT(buff_ptr, num_sets);
3237 
3238     if (!num_sets)
3239       return MB_SUCCESS;
3240 
3241     int i;
3242     Range::const_iterator rit;
3243     std::vector<EntityHandle> members;
3244     int num_ents;
3245     std::vector<unsigned int> options_vec(num_sets);
3246     // Option value
3247     if (num_sets)
3248       UNPACK_VOID(buff_ptr, &options_vec[0], num_sets*sizeof(unsigned int));
3249 
3250     // Unpack parallel geometry unique id
3251     int n_uid;
3252     UNPACK_INT(buff_ptr, n_uid);
3253     if (n_uid > 0 && n_uid != num_sets) {
3254       std::cerr << "The number of Parallel geometry unique ids should be same."
3255                 << std::endl;
3256     }
3257 
3258     if (n_uid > 0) { // If parallel geometry unique id is packed
3259       std::vector<int> uids(n_uid);
3260       UNPACK_INTS(buff_ptr, &uids[0], n_uid);
3261 
3262       Tag uid_tag;
3263       result = mbImpl->tag_get_handle("PARALLEL_UNIQUE_ID", 1, MB_TYPE_INTEGER,
3264                                       uid_tag, MB_TAG_SPARSE | MB_TAG_CREAT);MB_CHK_SET_ERR(result, "Failed to create parallel geometry unique id tag");
3265 
3266       // Find existing sets
3267       for (i = 0; i < n_uid; i++) {
3268         EntityHandle set_handle;
3269         Range temp_sets;
3270         void* tag_vals[] = { &uids[i] };
3271         if (uids[i] > 0) {
3272           result = mbImpl->get_entities_by_type_and_tag(0, MBENTITYSET,
3273                                                         &uid_tag, tag_vals,
3274                                                         1, temp_sets);
3275         }
3276         if (!temp_sets.empty()) { // Existing set
3277           set_handle = *temp_sets.begin();
3278         }
3279         else { // Create a new set
3280           result = mbImpl->create_meshset(options_vec[i], set_handle);MB_CHK_SET_ERR(result, "Failed to create set in unpack");
3281           result = mbImpl->tag_set_data(uid_tag, &set_handle, 1, &uids[i]);MB_CHK_SET_ERR(result, "Failed to set parallel geometry unique ids");
3282         }
3283         new_sets.insert(set_handle);
3284       }
3285     }
3286     else {
3287       // Create sets
3288       for (i = 0; i < num_sets; i++) {
3289         EntityHandle set_handle;
3290         result = mbImpl->create_meshset(options_vec[i], set_handle);MB_CHK_SET_ERR(result, "Failed to create set in unpack");
3291 
3292         // Make sure new sets handles are monotonically increasing
3293         assert(set_handle > *new_sets.rbegin());
3294         new_sets.insert(set_handle);
3295       }
3296     }
3297 
3298     std::copy(new_sets.begin(), new_sets.end(), std::back_inserter(entities));
3299     // Only need to sort if we came in with no sets on the end
3300     if (!no_sets)
3301       std::sort(entities.begin(), entities.end());
3302 
3303     for (rit = new_sets.begin(), i = 0; rit != new_sets.end(); ++rit, i++) {
3304       // Unpack entities as vector, with length
3305       UNPACK_INT(buff_ptr, num_ents);
3306       members.resize(num_ents);
3307       if (num_ents)
3308         UNPACK_EH(buff_ptr, &members[0], num_ents);
3309       result = get_local_handles(&members[0], num_ents, entities);MB_CHK_SET_ERR(result, "Failed to get local handles for ordered set contents");
3310       result = mbImpl->add_entities(*rit, &members[0], num_ents);MB_CHK_SET_ERR(result, "Failed to add ents to ordered set in unpack");
3311     }
3312 
3313     std::vector<int> num_pch(2*new_sets.size());
3314     std::vector<int>::iterator vit;
3315     int tot_pch = 0;
3316     for (vit = num_pch.begin(); vit != num_pch.end(); ++vit) {
3317       UNPACK_INT(buff_ptr, *vit);
3318       tot_pch += *vit;
3319     }
3320 
3321     members.resize(tot_pch);
3322     UNPACK_EH(buff_ptr, &members[0], tot_pch);
3323     result = get_local_handles(&members[0], tot_pch, entities);MB_CHK_SET_ERR(result, "Failed to get local handle for parent/child sets");
3324 
3325     int num = 0;
3326     EntityHandle *mem_ptr = &members[0];
3327     for (rit = new_sets.begin(); rit != new_sets.end(); ++rit) {
3328       // Unpack parents/children
3329       int num_par = num_pch[num++], num_child = num_pch[num++];
3330       if (num_par + num_child) {
3331         for (i = 0; i < num_par; i++) {
3332           assert(0 != mem_ptr[i]);
3333           result = mbImpl->add_parent_meshset(*rit, mem_ptr[i]);MB_CHK_SET_ERR(result, "Failed to add parent to set in unpack");
3334         }
3335         mem_ptr += num_par;
3336         for (i = 0; i < num_child; i++) {
3337           assert(0 != mem_ptr[i]);
3338           result = mbImpl->add_child_meshset(*rit, mem_ptr[i]);MB_CHK_SET_ERR(result, "Failed to add child to set in unpack");
3339         }
3340         mem_ptr += num_child;
3341       }
3342     }
3343 
3344     // Unpack source handles
3345     Range dum_range;
3346     if (store_remote_handles && !new_sets.empty()) {
3347       UNPACK_RANGE(buff_ptr, dum_range);
3348       result = update_remote_data(new_sets, dum_range, from_proc, 0);MB_CHK_SET_ERR(result, "Failed to set sharing data for sets");
3349     }
3350 
3351     myDebug->tprintf(4, "Done unpacking sets.");
3352 
3353     return MB_SUCCESS;
3354   }
3355 
pack_adjacencies(Range &,Range::const_iterator &,Range &,unsigned char * &,int &,const bool,const bool,const int)3356   ErrorCode ParallelComm::pack_adjacencies(Range& /*entities*/,
3357                                            Range::const_iterator& /*start_rit*/,
3358                                            Range& /*whole_range*/,
3359                                            unsigned char*& /*buff_ptr*/,
3360                                            int& /*count*/,
3361                                            const bool /*just_count*/,
3362                                            const bool /*store_handles*/,
3363                                            const int /*to_proc*/)
3364   {
3365     return MB_FAILURE;
3366   }
3367 
unpack_adjacencies(unsigned char * &,Range &,const bool,const int)3368   ErrorCode ParallelComm::unpack_adjacencies(unsigned char*& /*buff_ptr*/,
3369                                              Range& /*entities*/,
3370                                              const bool /*store_handles*/,
3371                                              const int /*from_proc*/)
3372   {
3373     return MB_FAILURE;
3374   }
3375 
pack_tags(Range & entities,const std::vector<Tag> & src_tags,const std::vector<Tag> & dst_tags,const std::vector<Range> & tag_ranges,Buffer * buff,const bool store_remote_handles,const int to_proc)3376   ErrorCode ParallelComm::pack_tags(Range &entities,
3377                                     const std::vector<Tag> &src_tags,
3378                                     const std::vector<Tag> &dst_tags,
3379                                     const std::vector<Range> &tag_ranges,
3380                                     Buffer *buff,
3381                                     const bool store_remote_handles,
3382                                     const int to_proc)
3383   {
3384     ErrorCode result;
3385     std::vector<Tag>::const_iterator tag_it, dst_it;
3386     std::vector<Range>::const_iterator rit;
3387     int count = 0;
3388 
3389     for (tag_it = src_tags.begin(), rit = tag_ranges.begin();
3390          tag_it != src_tags.end(); ++tag_it, ++rit) {
3391       result = packed_tag_size(*tag_it, *rit, count);
3392       if (MB_SUCCESS != result)
3393         return result;
3394     }
3395 
3396     // Number of tags
3397     count += sizeof(int);
3398 
3399     buff->check_space(count);
3400 
3401     PACK_INT(buff->buff_ptr, src_tags.size());
3402 
3403     std::vector<EntityHandle> entities_vec(entities.size());
3404     std::copy(entities.begin(), entities.end(), entities_vec.begin());
3405 
3406     for (tag_it = src_tags.begin(), dst_it = dst_tags.begin(), rit = tag_ranges.begin();
3407          tag_it != src_tags.end(); ++tag_it, ++dst_it, ++rit) {
3408       result = pack_tag(*tag_it, *dst_it, *rit, entities_vec, buff,
3409                         store_remote_handles, to_proc);
3410       if (MB_SUCCESS != result)
3411         return result;
3412     }
3413 
3414     myDebug->tprintf(4, "Done packing tags.");
3415 
3416     buff->set_stored_size();
3417 
3418     return MB_SUCCESS;
3419   }
3420 
packed_tag_size(Tag tag,const Range & tagged_entities,int & count)3421   ErrorCode ParallelComm::packed_tag_size(Tag tag,
3422                                           const Range &tagged_entities,
3423                                           int &count)
3424   {
3425     // For dense tags, compute size assuming all entities have that tag
3426     // For sparse tags, get number of entities w/ that tag to compute size
3427 
3428     std::vector<int> var_len_sizes;
3429     std::vector<const void*> var_len_values;
3430 
3431     // Default value
3432     count += sizeof(int);
3433     if (NULL != tag->get_default_value())
3434       count += tag->get_default_value_size();
3435 
3436     // Size, type, data type
3437     count += 3*sizeof(int);
3438 
3439     // Name
3440     count += sizeof(int);
3441     count += tag->get_name().size();
3442 
3443     // Range of tag
3444     count += sizeof(int) + tagged_entities.size() * sizeof(EntityHandle);
3445 
3446     if (tag->get_size() == MB_VARIABLE_LENGTH) {
3447       const int num_ent = tagged_entities.size();
3448       // Send a tag size for each entity
3449       count += num_ent * sizeof(int);
3450       // Send tag data for each entity
3451       var_len_sizes.resize(num_ent);
3452       var_len_values.resize(num_ent);
3453       ErrorCode result = tag->get_data(sequenceManager,
3454                                        errorHandler,
3455                                        tagged_entities,
3456                                        &var_len_values[0],
3457                                        &var_len_sizes[0]);MB_CHK_SET_ERR(result, "Failed to get lenghts of variable-length tag values");
3458       count += std::accumulate(var_len_sizes.begin(), var_len_sizes.end(), 0);
3459     }
3460     else {
3461       // Tag data values for range or vector
3462       count += tagged_entities.size() * tag->get_size();
3463     }
3464 
3465     return MB_SUCCESS;
3466   }
3467 
pack_tag(Tag src_tag,Tag dst_tag,const Range & tagged_entities,const std::vector<EntityHandle> & whole_vec,Buffer * buff,const bool store_remote_handles,const int to_proc)3468   ErrorCode ParallelComm::pack_tag(Tag src_tag,
3469                                    Tag dst_tag,
3470                                    const Range &tagged_entities,
3471                                    const std::vector<EntityHandle> &whole_vec,
3472                                    Buffer *buff,
3473                                    const bool store_remote_handles,
3474                                    const int to_proc)
3475   {
3476     ErrorCode result;
3477     std::vector<int> var_len_sizes;
3478     std::vector<const void*> var_len_values;
3479 
3480     if (src_tag != dst_tag) {
3481       if (dst_tag->get_size() != src_tag->get_size())
3482         return MB_TYPE_OUT_OF_RANGE;
3483       if (dst_tag->get_data_type() != src_tag->get_data_type() &&
3484           dst_tag->get_data_type() != MB_TYPE_OPAQUE &&
3485           src_tag->get_data_type() != MB_TYPE_OPAQUE)
3486         return MB_TYPE_OUT_OF_RANGE;
3487     }
3488 
3489     // Size, type, data type
3490     buff->check_space(3*sizeof(int));
3491     PACK_INT(buff->buff_ptr, src_tag->get_size());
3492     TagType this_type;
3493     result = mbImpl->tag_get_type(dst_tag, this_type);
3494     PACK_INT(buff->buff_ptr, (int)this_type);
3495     DataType data_type = src_tag->get_data_type();
3496     PACK_INT(buff->buff_ptr, (int)data_type);
3497     int type_size = TagInfo::size_from_data_type(data_type);
3498 
3499     // Default value
3500     if (NULL == src_tag->get_default_value()) {
3501       buff->check_space(sizeof(int));
3502       PACK_INT(buff->buff_ptr, 0);
3503     }
3504     else {
3505       buff->check_space(src_tag->get_default_value_size());
3506       PACK_BYTES(buff->buff_ptr, src_tag->get_default_value(), src_tag->get_default_value_size());
3507     }
3508 
3509     // Name
3510     buff->check_space(src_tag->get_name().size());
3511     PACK_BYTES(buff->buff_ptr, dst_tag->get_name().c_str(), dst_tag->get_name().size());
3512 
3513     myDebug->tprintf(4, "Packing tag \"%s\"", src_tag->get_name().c_str());
3514     if (src_tag != dst_tag)
3515       myDebug->tprintf(4, " (as tag \"%s\")", dst_tag->get_name().c_str());
3516     myDebug->tprintf(4, "\n");
3517 
3518     // Pack entities
3519     buff->check_space(tagged_entities.size()*sizeof(EntityHandle) + sizeof(int));
3520     PACK_INT(buff->buff_ptr, tagged_entities.size());
3521     std::vector<EntityHandle> dum_tagged_entities(tagged_entities.size());
3522     result = get_remote_handles(store_remote_handles,
3523                                 tagged_entities, &dum_tagged_entities[0], to_proc,
3524                                 whole_vec);
3525     if (MB_SUCCESS != result) {
3526       if (myDebug->get_verbosity() == 3) {
3527         std::cerr << "Failed to get remote handles for tagged entities:" << std::endl;
3528         tagged_entities.print("  ");
3529       }
3530       MB_SET_ERR(result, "Failed to get remote handles for tagged entities");
3531     }
3532 
3533     PACK_EH(buff->buff_ptr, &dum_tagged_entities[0], dum_tagged_entities.size());
3534 
3535     const size_t num_ent = tagged_entities.size();
3536     if (src_tag->get_size() == MB_VARIABLE_LENGTH) {
3537       var_len_sizes.resize(num_ent, 0);
3538       var_len_values.resize(num_ent, 0);
3539       result = mbImpl->tag_get_by_ptr(src_tag, tagged_entities, &var_len_values[0],
3540                                       &var_len_sizes[0]);MB_CHK_SET_ERR(result, "Failed to get variable-length tag data in pack_tags");
3541       buff->check_space(num_ent * sizeof(int));
3542       PACK_INTS(buff->buff_ptr, &var_len_sizes[0], num_ent);
3543       for (unsigned int i = 0; i < num_ent; i++) {
3544         buff->check_space(var_len_sizes[i]);
3545         PACK_VOID(buff->buff_ptr, var_len_values[i], type_size*var_len_sizes[i]);
3546       }
3547     }
3548     else {
3549       buff->check_space(num_ent * src_tag->get_size());
3550       // Should be OK to read directly into buffer, since tags are untyped and
3551       // handled by memcpy
3552       result = mbImpl->tag_get_data(src_tag, tagged_entities, buff->buff_ptr);MB_CHK_SET_ERR(result, "Failed to get tag data in pack_tags");
3553       buff->buff_ptr += num_ent * src_tag->get_size();
3554       PC(num_ent*src_tag->get_size(), " void");
3555     }
3556 
3557     return MB_SUCCESS;
3558   }
3559 
get_tag_send_list(const Range & whole_range,std::vector<Tag> & all_tags,std::vector<Range> & tag_ranges)3560   ErrorCode ParallelComm::get_tag_send_list(const Range& whole_range,
3561                                             std::vector<Tag>& all_tags,
3562                                             std::vector<Range>& tag_ranges)
3563   {
3564     std::vector<Tag> tmp_tags;
3565     ErrorCode result = mbImpl->tag_get_tags(tmp_tags);MB_CHK_SET_ERR(result, "Failed to get tags in pack_tags");
3566 
3567     std::vector<Tag>::iterator tag_it;
3568     for (tag_it = tmp_tags.begin(); tag_it != tmp_tags.end(); ++tag_it) {
3569       std::string tag_name;
3570       result = mbImpl->tag_get_name(*tag_it, tag_name);
3571       if (tag_name.c_str()[0] == '_' && tag_name.c_str()[1] == '_')
3572         continue;
3573 
3574       Range tmp_range;
3575       result = (*tag_it)->get_tagged_entities(sequenceManager, tmp_range);MB_CHK_SET_ERR(result, "Failed to get entities for tag in pack_tags");
3576       tmp_range = intersect(tmp_range, whole_range);
3577 
3578       if (tmp_range.empty())
3579         continue;
3580 
3581       // OK, we'll be sending this tag
3582       all_tags.push_back(*tag_it);
3583       tag_ranges.push_back(Range());
3584       tag_ranges.back().swap(tmp_range);
3585     }
3586 
3587     return MB_SUCCESS;
3588   }
3589 
unpack_tags(unsigned char * & buff_ptr,std::vector<EntityHandle> & entities,const bool,const int,const MPI_Op * const mpi_op)3590   ErrorCode ParallelComm::unpack_tags(unsigned char *&buff_ptr,
3591                                       std::vector<EntityHandle> &entities,
3592                                       const bool /*store_remote_handles*/,
3593                                       const int /*from_proc*/,
3594                                       const MPI_Op * const mpi_op)
3595   {
3596     // Tags
3597     // Get all the tags
3598     // For dense tags, compute size assuming all entities have that tag
3599     // For sparse tags, get number of entities w/ that tag to compute size
3600 
3601     ErrorCode result;
3602 
3603     int num_tags;
3604     UNPACK_INT(buff_ptr, num_tags);
3605     std::vector<const void*> var_len_vals;
3606     std::vector<unsigned char> dum_vals;
3607     std::vector<EntityHandle> dum_ehvals;
3608 
3609     for (int i = 0; i < num_tags; i++) {
3610       // Tag handle
3611       Tag tag_handle;
3612 
3613       // Size, data type
3614       int tag_size, tag_data_type, tag_type;
3615       UNPACK_INT(buff_ptr, tag_size);
3616       UNPACK_INT(buff_ptr, tag_type);
3617       UNPACK_INT(buff_ptr, tag_data_type);
3618 
3619       // Default value
3620       int def_val_size;
3621       UNPACK_INT(buff_ptr, def_val_size);
3622       void *def_val_ptr = NULL;
3623       if (def_val_size) {
3624         def_val_ptr = buff_ptr;
3625         buff_ptr += def_val_size;
3626         UPC(tag_size, " void");
3627       }
3628 
3629       // Name
3630       int name_len;
3631       UNPACK_INT(buff_ptr, name_len);
3632       std::string tag_name(reinterpret_cast<char*>(buff_ptr), name_len);
3633       buff_ptr += name_len;
3634       UPC(64, " chars");
3635 
3636       myDebug->tprintf(4, "Unpacking tag %s\n", tag_name.c_str());
3637 
3638       // Create the tag
3639       if (tag_size == MB_VARIABLE_LENGTH)
3640         result = mbImpl->tag_get_handle(tag_name.c_str(), def_val_size, (DataType)tag_data_type,
3641                                         tag_handle, MB_TAG_VARLEN | MB_TAG_CREAT | MB_TAG_BYTES | tag_type,
3642                                         def_val_ptr);
3643       else
3644         result = mbImpl->tag_get_handle(tag_name.c_str(), tag_size, (DataType) tag_data_type,
3645                                         tag_handle, MB_TAG_CREAT | MB_TAG_BYTES | tag_type,
3646                                         def_val_ptr);
3647       if (MB_SUCCESS != result) return result;
3648 
3649       // Get handles and convert to local handles
3650       int num_ents;
3651       UNPACK_INT(buff_ptr, num_ents);
3652       std::vector<EntityHandle> dum_ents(num_ents);
3653       UNPACK_EH(buff_ptr, &dum_ents[0], num_ents);
3654 
3655       // In this case handles are indices into new entity range; need to convert
3656       // to local handles
3657       result = get_local_handles(&dum_ents[0], num_ents, entities);MB_CHK_SET_ERR(result, "Unable to convert to local handles");
3658 
3659       // If it's a handle type, also convert tag vals in-place in buffer
3660       if (MB_TYPE_HANDLE == tag_type) {
3661         dum_ehvals.resize(num_ents);
3662         UNPACK_EH(buff_ptr, &dum_ehvals[0], num_ents);
3663         result = get_local_handles(&dum_ehvals[0], num_ents, entities);MB_CHK_SET_ERR(result, "Failed to get local handles for tag vals");
3664       }
3665 
3666       DataType data_type;
3667       mbImpl->tag_get_data_type(tag_handle, data_type);
3668       int type_size = TagInfo::size_from_data_type(data_type);
3669 
3670       if (!dum_ents.empty()) {
3671         if (tag_size == MB_VARIABLE_LENGTH) {
3672           // Be careful of alignment here. If the integers are aligned
3673           // in the buffer, we can use them directly. Otherwise we must
3674           // copy them.
3675           std::vector<int> var_lengths(num_ents);
3676           UNPACK_INTS(buff_ptr, &var_lengths[0], num_ents);
3677           UPC(sizeof(int) * num_ents, " void");
3678 
3679           // Get pointers into buffer for each tag value
3680           var_len_vals.resize(num_ents);
3681           for (std::vector<EntityHandle>::size_type j = 0;
3682                j < (std::vector<EntityHandle>::size_type) num_ents; j++) {
3683             var_len_vals[j] = buff_ptr;
3684             buff_ptr += var_lengths[j]*type_size;
3685             UPC(var_lengths[j], " void");
3686           }
3687           result = mbImpl->tag_set_by_ptr(tag_handle, &dum_ents[0], num_ents,
3688                                           &var_len_vals[0], &var_lengths[0]);MB_CHK_SET_ERR(result, "Failed to set tag data when unpacking variable-length tag");
3689         }
3690         else {
3691               // Get existing values of dst tag
3692             dum_vals.resize(tag_size*num_ents);
3693             if (mpi_op) {
3694               int tag_length;
3695               result = mbImpl->tag_get_length(tag_handle, tag_length);MB_CHK_SET_ERR(result, "Failed to get tag length");
3696               result = mbImpl->tag_get_data(tag_handle, &dum_ents[0], num_ents, &dum_vals[0]);MB_CHK_SET_ERR(result, "Failed to get existing value of dst tag on entities");
3697               result = reduce_void(tag_data_type, *mpi_op, tag_length*num_ents, &dum_vals[0], buff_ptr);MB_CHK_SET_ERR(result, "Failed to perform mpi op on dst tags");
3698             }
3699           result = mbImpl->tag_set_data(tag_handle, &dum_ents[0],
3700                                         num_ents, buff_ptr);MB_CHK_SET_ERR(result, "Failed to set range-based tag data when unpacking tag");
3701           buff_ptr += num_ents * tag_size;
3702           UPC(num_ents * tag_size, " void");
3703         }
3704       }
3705     }
3706 
3707     myDebug->tprintf(4, "Done unpacking tags.\n");
3708 
3709     return MB_SUCCESS;
3710   }
3711 
LAND(const T & arg1,const T & arg2)3712   template<class T> T LAND(const T &arg1, const T &arg2) {return arg1 && arg2;}
LOR(const T & arg1,const T & arg2)3713   template<class T> T LOR(const T& arg1, const T& arg2) {return arg1 || arg2;}
LXOR(const T & arg1,const T & arg2)3714   template<class T> T LXOR(const T& arg1, const T& arg2) {return ((arg1 && !arg2) || (!arg1 && arg2));}
MAX(const T & arg1,const T & arg2)3715   template<class T> T MAX(const T& arg1, const T& arg2) {return (arg1 > arg2 ? arg1 : arg2);}
MIN(const T & arg1,const T & arg2)3716   template<class T> T MIN(const T& arg1, const T& arg2) {return (arg1 < arg2 ? arg1 : arg2);}
ADD(const T & arg1,const T & arg2)3717   template<class T> T ADD(const T &arg1, const T &arg2) {return arg1 + arg2;}
MULT(const T & arg1,const T & arg2)3718   template<class T> T MULT(const T &arg1, const T &arg2) {return arg1 * arg2;}
3719 
3720   template <class T>
reduce(const MPI_Op mpi_op,int num_ents,void * old_vals,void * new_vals)3721   ErrorCode ParallelComm::reduce(const MPI_Op mpi_op, int num_ents, void *old_vals, void *new_vals)
3722   {
3723     T *old_tmp = reinterpret_cast<T*>(old_vals);
3724     //T *new_tmp = reinterpret_cast<T*>(new_vals);
3725     // new vals pointer needs to be aligned , some compilers will optimize and will shift
3726 
3727     std::vector<T> new_values;
3728     new_values.resize(num_ents);
3729     memcpy( &new_values[0], new_vals, num_ents * sizeof(T));
3730     T *new_tmp = &new_values[0];
3731 
3732     if (mpi_op == MPI_SUM)
3733       std::transform(old_tmp, old_tmp + num_ents, new_tmp, new_tmp, ADD<T>);
3734     else if (mpi_op == MPI_PROD)
3735       std::transform(old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MULT<T>);
3736     else if (mpi_op == MPI_MAX)
3737       std::transform(old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MAX<T>);
3738     else if (mpi_op == MPI_MIN)
3739       std::transform(old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MIN<T>);
3740     else if (mpi_op == MPI_LAND)
3741       std::transform(old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LAND<T>);
3742     else if (mpi_op == MPI_LOR)
3743       std::transform(old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LOR<T>);
3744     else if (mpi_op == MPI_LXOR)
3745       std::transform(old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LXOR<T>);
3746     else if (mpi_op == MPI_BAND || mpi_op == MPI_BOR || mpi_op == MPI_BXOR) {
3747       std::cerr << "Bitwise operations not allowed in tag reductions." << std::endl;
3748       return MB_FAILURE;
3749     }
3750     else if (mpi_op != MPI_OP_NULL) {
3751       std::cerr << "Unknown MPI operation type." << std::endl;
3752       return MB_TYPE_OUT_OF_RANGE;
3753     }
3754 
3755     // copy now the result back where it should be
3756     memcpy( new_vals, new_tmp, num_ents * sizeof(T));
3757     std::vector<T>().swap(new_values); // way to release allocated vector
3758 
3759     return MB_SUCCESS;
3760   }
3761 
reduce_void(int tag_data_type,const MPI_Op mpi_op,int num_ents,void * old_vals,void * new_vals)3762   ErrorCode ParallelComm::reduce_void(int tag_data_type, const MPI_Op mpi_op, int num_ents, void *old_vals, void *new_vals)
3763   {
3764     ErrorCode result;
3765     switch (tag_data_type) {
3766       case MB_TYPE_INTEGER:
3767           result = reduce<int>(mpi_op, num_ents, old_vals, new_vals);
3768           break;
3769       case MB_TYPE_DOUBLE:
3770           result = reduce<double>(mpi_op, num_ents, old_vals, new_vals);
3771           break;
3772       case MB_TYPE_BIT:
3773           result = reduce<unsigned char>(mpi_op, num_ents, old_vals, new_vals);
3774           break;
3775       default:
3776           result = MB_SUCCESS;
3777           break;
3778     }
3779 
3780     return result;
3781   }
3782 
resolve_shared_ents(EntityHandle this_set,int resolve_dim,int shared_dim,const Tag * id_tag)3783   ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
3784                                               int resolve_dim,
3785                                               int shared_dim,
3786                                               const Tag* id_tag)
3787   {
3788     ErrorCode result;
3789     Range proc_ents;
3790 
3791     // Check for structured mesh, and do it differently if it is
3792     ScdInterface *scdi;
3793     result = mbImpl->query_interface(scdi);
3794     if (scdi) {
3795       result = scdi->tag_shared_vertices(this, this_set);
3796       if (MB_SUCCESS == result) {
3797         myDebug->tprintf(1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size());
3798         return result;
3799       }
3800     }
3801 
3802     if (0 == this_set) {
3803         // Get the entities in the partition sets
3804       for (Range::iterator rit = partitionSets.begin(); rit != partitionSets.end(); ++rit) {
3805         Range tmp_ents;
3806         result = mbImpl->get_entities_by_handle(*rit, tmp_ents, true);
3807         if (MB_SUCCESS != result) return result;
3808         proc_ents.merge(tmp_ents);
3809       }
3810     }
3811     else {
3812       result = mbImpl->get_entities_by_handle(this_set, proc_ents, true);
3813       if (MB_SUCCESS != result) return result;
3814     }
3815 
3816     // Resolve dim is maximal dim of entities in proc_ents
3817     if (-1 == resolve_dim) {
3818       if (proc_ents.empty())
3819         return MB_ENTITY_NOT_FOUND;
3820 
3821       resolve_dim = mbImpl->dimension_from_handle(*proc_ents.rbegin());
3822     }
3823 
3824     // proc_ents should all be of same dimension
3825     if (resolve_dim > shared_dim &&
3826         mbImpl->dimension_from_handle(*proc_ents.rbegin()) !=
3827         mbImpl->dimension_from_handle(*proc_ents.begin())) {
3828       Range::iterator lower = proc_ents.lower_bound(CN::TypeDimensionMap[0].first),
3829         upper = proc_ents.upper_bound(CN::TypeDimensionMap[resolve_dim - 1].second);
3830       proc_ents.erase(lower, upper);
3831     }
3832 
3833     // Must call even if we don't have any entities, to make sure
3834     // collective comm'n works
3835     return resolve_shared_ents(this_set, proc_ents, resolve_dim, shared_dim, NULL, id_tag);
3836   }
3837 
resolve_shared_ents(EntityHandle this_set,Range & proc_ents,int resolve_dim,int shared_dim,Range * skin_ents,const Tag * id_tag)3838   ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
3839                                               Range &proc_ents,
3840                                               int resolve_dim,
3841                                               int shared_dim,
3842                                               Range *skin_ents,
3843                                               const Tag* id_tag)
3844   {
3845 #ifdef MOAB_HAVE_MPE
3846     if (myDebug->get_verbosity() == 2) {
3847       define_mpe();
3848       MPE_Log_event(RESOLVE_START, procConfig.proc_rank(), "Entering resolve_shared_ents.");
3849     }
3850 #endif
3851 
3852     ErrorCode result;
3853     myDebug->tprintf(1, "Resolving shared entities.\n");
3854 
3855     if (resolve_dim < shared_dim) {
3856       MB_SET_ERR(MB_FAILURE, "MOAB does not support vertex-based partitions, only element-based ones");
3857     }
3858 
3859     if (-1 == shared_dim) {
3860       if (!proc_ents.empty())
3861         shared_dim = mbImpl->dimension_from_handle(*proc_ents.begin()) - 1;
3862       else if (resolve_dim == 3)
3863         shared_dim = 2;
3864     }
3865 
3866     if (shared_dim < 0 || resolve_dim < 0) {
3867       MB_SET_ERR(MB_FAILURE, "Unable to guess shared_dim or resolve_dim");
3868     }
3869 
3870     // Get the skin entities by dimension
3871     Range tmp_skin_ents[4];
3872 
3873     // Get the entities to be skinned
3874     // Find the skin
3875     int skin_dim = resolve_dim - 1;
3876     if (!skin_ents) {
3877       skin_ents = tmp_skin_ents;
3878       skin_ents[resolve_dim] = proc_ents;
3879       Skinner skinner(mbImpl);
3880       result = skinner.find_skin(this_set, skin_ents[skin_dim + 1], false, skin_ents[skin_dim],
3881                                  NULL, true, true, true);MB_CHK_SET_ERR(result, "Failed to find skin");
3882       myDebug->tprintf(1, "Found skin:   skin_dim: %d resolve_dim: %d , now resolving.\n", skin_dim, resolve_dim);
3883       myDebug->tprintf(3, "skin_ents[0].size(): %d skin_ents[1].size(): %d  \n", (int)skin_ents[0].size(),(int)skin_ents[1].size());
3884       // Get entities adjacent to skin ents from shared_dim down to zero
3885       for (int this_dim = skin_dim - 1; this_dim >= 0; this_dim--) {
3886         result = mbImpl->get_adjacencies(skin_ents[skin_dim], this_dim,
3887                                          true, skin_ents[this_dim],
3888                                          Interface::UNION);MB_CHK_SET_ERR(result, "Failed to get skin adjacencies");
3889 
3890         if (this_set && skin_dim==2 && this_dim==1)
3891           {
3892             result = mbImpl->add_entities(this_set, skin_ents[this_dim]);MB_CHK_ERR(result);
3893           }
3894       }
3895     }
3896     else if (skin_ents[resolve_dim].empty())
3897       skin_ents[resolve_dim] = proc_ents;
3898 
3899     // Global id tag
3900     Tag gid_tag;
3901     if (id_tag)
3902       gid_tag = *id_tag;
3903     else {
3904       bool tag_created = false;
3905       int def_val = -1;
3906       result = mbImpl->tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER,
3907                                       gid_tag, MB_TAG_DENSE | MB_TAG_CREAT,
3908                                       &def_val, &tag_created);
3909       if (MB_ALREADY_ALLOCATED != result && MB_SUCCESS != result) {
3910         MB_SET_ERR(result, "Failed to create/get gid tag handle");
3911       }
3912       else if (tag_created) {
3913         // Just created it, so we need global ids
3914         result = assign_global_ids(this_set, skin_dim + 1, true, true, true);MB_CHK_SET_ERR(result, "Failed to assign global ids");
3915       }
3916     }
3917 
3918     DataType tag_type;
3919     result = mbImpl->tag_get_data_type(gid_tag, tag_type);MB_CHK_SET_ERR(result, "Failed to get tag data type");
3920     int bytes_per_tag;
3921     result = mbImpl->tag_get_bytes(gid_tag, bytes_per_tag);MB_CHK_SET_ERR(result, "Failed to get number of bytes per tag");
3922     // On 64 bits, long and int are different
3923     // On 32 bits, they are not; if size of long is 8, it is a 64 bit machine (really?)
3924 
3925     // Get gids for skin ents in a vector, to pass to gs
3926     std::vector<long> lgid_data(skin_ents[0].size());
3927     // Size is either long or int
3928     // On 64 bit is 8 or 4
3929     if (sizeof(long) == bytes_per_tag && ((MB_TYPE_HANDLE == tag_type) || (MB_TYPE_OPAQUE == tag_type))) { // It is a special id tag
3930       result = mbImpl->tag_get_data(gid_tag, skin_ents[0], &lgid_data[0]);MB_CHK_SET_ERR(result, "Couldn't get gid tag for skin vertices");
3931     }
3932     else if (4 == bytes_per_tag) { // Must be GLOBAL_ID tag or 32 bits ...
3933       std::vector<int> gid_data(lgid_data.size());
3934       result = mbImpl->tag_get_data(gid_tag, skin_ents[0], &gid_data[0]);MB_CHK_SET_ERR(result, "Failed to get gid tag for skin vertices");
3935       std::copy(gid_data.begin(), gid_data.end(), lgid_data.begin());
3936     }
3937     else {
3938       // Not supported flag
3939       MB_SET_ERR(MB_FAILURE, "Unsupported id tag");
3940     }
3941 
3942     // Put handles in vector for passing to gs setup
3943     std::vector<Ulong> handle_vec; // Assumes that we can do conversion from Ulong to EntityHandle
3944     std::copy(skin_ents[0].begin(), skin_ents[0].end(),
3945               std::back_inserter(handle_vec));
3946 
3947 #ifdef MOAB_HAVE_MPE
3948     if (myDebug->get_verbosity() == 2) {
3949       MPE_Log_event(SHAREDV_START, procConfig.proc_rank(), "Creating crystal router.");
3950     }
3951 #endif
3952 
3953     // Get a crystal router
3954     gs_data::crystal_data *cd = procConfig.crystal_router();
3955 
3956     /*
3957     // Get total number of entities; will overshoot highest global id, but
3958     // that's OK
3959     int num_total[2] = {0, 0}, num_local[2] = {0, 0};
3960     result = mbImpl->get_number_entities_by_dimension(this_set, 0, num_local);
3961     if (MB_SUCCESS != result)return result;
3962     int failure = MPI_Allreduce(num_local, num_total, 1,
3963     MPI_INTEGER, MPI_SUM, procConfig.proc_comm());
3964     if (failure) {
3965       MB_SET_ERR(MB_FAILURE, "Allreduce for total number of shared ents failed");
3966     }
3967     */
3968     // Call gather-scatter to get shared ids & procs
3969     gs_data *gsd = new gs_data();
3970    // assert(sizeof(ulong_) == sizeof(EntityHandle));
3971     result = gsd->initialize(skin_ents[0].size(), &lgid_data[0],
3972                               &handle_vec[0], 2, 1, 1, cd);MB_CHK_SET_ERR(result, "Failed to create gs data");
3973 
3974     // Get shared proc tags
3975     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
3976     result = get_shared_proc_tags(shp_tag, shps_tag,
3977                                   shh_tag, shhs_tag, pstat_tag);MB_CHK_SET_ERR(result, "Failed to get shared proc tags");
3978 
3979     // Load shared verts into a tuple, then sort by index
3980     TupleList shared_verts;
3981     shared_verts.initialize(2, 0, 1, 0,
3982                             skin_ents[0].size()*(MAX_SHARING_PROCS + 1));
3983     shared_verts.enableWriteAccess();
3984 
3985     unsigned int i = 0, j = 0;
3986     for (unsigned int p = 0; p < gsd->nlinfo->_np; p++)
3987       for (unsigned int np = 0; np < gsd->nlinfo->_nshared[p]; np++) {
3988         shared_verts.vi_wr[i++] = gsd->nlinfo->_sh_ind[j];
3989         shared_verts.vi_wr[i++] = gsd->nlinfo->_target[p];
3990         shared_verts.vul_wr[j] = gsd->nlinfo->_ulabels[j];
3991         j++;
3992         shared_verts.inc_n();
3993       }
3994 
3995     myDebug->tprintf(3, " shared verts size %d \n", (int)shared_verts.get_n());
3996 
3997 
3998     int max_size = skin_ents[0].size()*(MAX_SHARING_PROCS + 1);
3999     moab::TupleList::buffer sort_buffer;
4000     sort_buffer.buffer_init(max_size);
4001     shared_verts.sort(0, &sort_buffer);
4002     sort_buffer.reset();
4003 
4004     // Set sharing procs and handles tags on skin ents
4005     int maxp = -1;
4006     std::vector<int> sharing_procs(MAX_SHARING_PROCS);
4007     std::fill(sharing_procs.begin(), sharing_procs.end(), maxp);
4008     j = 0;
4009     i = 0;
4010 
4011     // Get ents shared by 1 or n procs
4012     std::map<std::vector<int>, std::vector<EntityHandle> > proc_nvecs;
4013     Range proc_verts;
4014     result = mbImpl->get_adjacencies(proc_ents, 0, false, proc_verts,
4015                                      Interface::UNION);MB_CHK_SET_ERR(result, "Failed to get proc_verts");
4016 
4017     myDebug->print( 3, " resolve shared ents:  proc verts ", proc_verts );
4018     result = tag_shared_verts(shared_verts, skin_ents,
4019                               proc_nvecs, proc_verts);MB_CHK_SET_ERR(result, "Failed to tag shared verts");
4020 
4021 #ifdef MOAB_HAVE_MPE
4022     if (myDebug->get_verbosity() == 2) {
4023       MPE_Log_event(SHAREDV_END, procConfig.proc_rank(), "Finished tag_shared_verts.");
4024     }
4025 #endif
4026 
4027     // Get entities shared by 1 or n procs
4028     result = get_proc_nvecs(resolve_dim, shared_dim, skin_ents, proc_nvecs);MB_CHK_SET_ERR(result, "Failed to tag shared entities");
4029 
4030 
4031     shared_verts.reset();
4032 
4033     if (myDebug->get_verbosity() > 0) {
4034       for (std::map<std::vector<int>, std::vector<EntityHandle> >::const_iterator mit = proc_nvecs.begin();
4035            mit != proc_nvecs.end(); ++mit) {
4036         myDebug->tprintf(1, "Iface: ");
4037         for (std::vector<int>::const_iterator vit = (mit->first).begin();
4038              vit != (mit->first).end(); ++vit) myDebug->printf(1, " %d", *vit);
4039         myDebug->print(1, "\n");
4040       }
4041     }
4042 
4043     // Create the sets for each interface; store them as tags on
4044     // the interface instance
4045     Range iface_sets;
4046     result = create_interface_sets(proc_nvecs);MB_CHK_SET_ERR(result, "Failed to create interface sets");
4047 
4048     // Establish comm procs and buffers for them
4049     std::set<unsigned int> procs;
4050     result = get_interface_procs(procs, true);MB_CHK_SET_ERR(result, "Failed to get interface procs");
4051 
4052 #ifndef NDEBUG
4053     result = check_all_shared_handles(true);MB_CHK_SET_ERR(result, "Shared handle check failed after interface vertex exchange");
4054 #endif
4055 
4056     // Resolve shared entity remote handles; implemented in ghost cell exchange
4057     // code because it's so similar
4058     result = exchange_ghost_cells(-1, -1, 0, 0, true, true);MB_CHK_SET_ERR(result, "Failed to resolve shared entity remote handles");
4059 
4060     // Now build parent/child links for interface sets
4061     result = create_iface_pc_links();MB_CHK_SET_ERR(result, "Failed to create interface parent/child links");
4062 
4063     gsd->reset();
4064     delete gsd;
4065 
4066 #ifdef MOAB_HAVE_MPE
4067     if (myDebug->get_verbosity() == 2) {
4068       MPE_Log_event(RESOLVE_END, procConfig.proc_rank(), "Exiting resolve_shared_ents.");
4069     }
4070 #endif
4071 
4072     //std::ostringstream ent_str;
4073     //ent_str << "mesh." << procConfig.proc_rank() << ".h5m";
4074     //mbImpl->write_mesh(ent_str.str().c_str());
4075 
4076     // Done
4077     return result;
4078   }
4079 
define_mpe()4080   void ParallelComm::define_mpe()
4081   {
4082 #ifdef MOAB_HAVE_MPE
4083     if (myDebug->get_verbosity() == 2) {
4084       // Define mpe states used for logging
4085       int success;
4086       MPE_Log_get_state_eventIDs(&IFACE_START, &IFACE_END);
4087       MPE_Log_get_state_eventIDs(&GHOST_START, &GHOST_END);
4088       MPE_Log_get_state_eventIDs(&SHAREDV_START, &SHAREDV_END);
4089       MPE_Log_get_state_eventIDs(&RESOLVE_START, &RESOLVE_END);
4090       MPE_Log_get_state_eventIDs(&ENTITIES_START, &ENTITIES_END);
4091       MPE_Log_get_state_eventIDs(&RHANDLES_START, &RHANDLES_END);
4092       MPE_Log_get_state_eventIDs(&OWNED_START, &OWNED_END);
4093       success = MPE_Describe_state(IFACE_START, IFACE_END, "Resolve interface ents", "green");
4094       assert(MPE_LOG_OK == success);
4095       success = MPE_Describe_state(GHOST_START, GHOST_END, "Exchange ghost ents", "red");
4096       assert(MPE_LOG_OK == success);
4097       success = MPE_Describe_state(SHAREDV_START, SHAREDV_END, "Resolve interface vertices", "blue");
4098       assert(MPE_LOG_OK == success);
4099       success = MPE_Describe_state(RESOLVE_START, RESOLVE_END, "Resolve shared ents", "purple");
4100       assert(MPE_LOG_OK == success);
4101       success = MPE_Describe_state(ENTITIES_START, ENTITIES_END, "Exchange shared ents", "yellow");
4102       assert(MPE_LOG_OK == success);
4103       success = MPE_Describe_state(RHANDLES_START, RHANDLES_END, "Remote handles", "cyan");
4104       assert(MPE_LOG_OK == success);
4105       success = MPE_Describe_state(OWNED_START, OWNED_END, "Exchange owned ents", "black");
4106       assert(MPE_LOG_OK == success);
4107     }
4108 #endif
4109   }
4110 
resolve_shared_ents(ParallelComm ** pc,const unsigned int np,EntityHandle this_set,const int part_dim)4111   ErrorCode ParallelComm::resolve_shared_ents(ParallelComm **pc,
4112                                               const unsigned int np,
4113                                               EntityHandle this_set,
4114                                               const int part_dim)
4115   {
4116     std::vector<Range> verts(np);
4117     int tot_verts = 0;
4118     unsigned int p, i, j, v;
4119     ErrorCode rval;
4120     for (p = 0; p < np; p++) {
4121       Skinner skinner(pc[p]->get_moab());
4122       Range part_ents, skin_ents;
4123       rval = pc[p]->get_moab()->get_entities_by_dimension(this_set, part_dim, part_ents);
4124       if (MB_SUCCESS != rval) return rval;
4125       rval = skinner.find_skin(this_set, part_ents, false, skin_ents, 0, true, true, true);
4126       if (MB_SUCCESS != rval) return rval;
4127       rval = pc[p]->get_moab()->get_adjacencies(skin_ents, 0, true, verts[p],
4128                                                 Interface::UNION);
4129       if (MB_SUCCESS != rval) return rval;
4130       tot_verts += verts[p].size();
4131     }
4132 
4133     TupleList shared_ents;
4134     shared_ents.initialize(2, 0, 1, 0, tot_verts);
4135     shared_ents.enableWriteAccess();
4136 
4137     i = 0; j = 0;
4138     std::vector<int> gids;
4139     Range::iterator rit;
4140     Tag gid_tag;
4141     int dum_default = 0;
4142     for (p = 0; p < np; p++) {
4143       rval = pc[p]->get_moab()->tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER,
4144                                                gid_tag, MB_TAG_DENSE | MB_TAG_CREAT,
4145                                                &dum_default);
4146       if (MB_SUCCESS != rval) return rval;
4147       gids.resize(verts[p].size());
4148       rval = pc[p]->get_moab()->tag_get_data(gid_tag, verts[p], &gids[0]);
4149       if (MB_SUCCESS != rval) return rval;
4150 
4151       for (v = 0, rit = verts[p].begin(); v < gids.size(); v++, ++rit) {
4152         shared_ents.vi_wr[i++] = gids[v];
4153         shared_ents.vi_wr[i++] = p;
4154         shared_ents.vul_wr[j] = *rit;
4155         j++;
4156         shared_ents.inc_n();
4157       }
4158     }
4159 
4160     moab::TupleList::buffer sort_buffer;
4161     sort_buffer.buffer_init(tot_verts);
4162     shared_ents.sort(0, &sort_buffer);
4163     sort_buffer.reset();
4164 
4165     j = 0; i = 0;
4166     std::vector<EntityHandle> handles;
4167     std::vector<int> procs;
4168 
4169     while (i < shared_ents.get_n()) {
4170       handles.clear();
4171       procs.clear();
4172 
4173       // Count & accumulate sharing procs
4174       int this_gid = shared_ents.vi_rd[j];
4175       while (i < shared_ents.get_n() && shared_ents.vi_rd[j] == this_gid) {
4176         j++;
4177         procs.push_back(shared_ents.vi_rd[j++]);
4178         handles.push_back(shared_ents.vul_rd[i++]);
4179       }
4180       if (1 == procs.size())
4181         continue;
4182 
4183       for (v = 0; v < procs.size(); v++) {
4184         rval = pc[procs[v]]->update_remote_data(handles[v],
4185                                                 &procs[0], &handles[0], procs.size(),
4186                                                 (procs[0] == (int)pc[procs[v]]->rank() ? PSTATUS_INTERFACE : (PSTATUS_NOT_OWNED|PSTATUS_INTERFACE)));
4187         if (MB_SUCCESS != rval) return rval;
4188       }
4189     }
4190 
4191     std::set<unsigned int> psets;
4192     for (p = 0; p < np; p++) {
4193       rval = pc[p]->create_interface_sets(this_set, part_dim, part_dim - 1);
4194       if (MB_SUCCESS != rval) return rval;
4195       // Establish comm procs and buffers for them
4196       psets.clear();
4197       rval = pc[p]->get_interface_procs(psets, true);
4198       if (MB_SUCCESS != rval) return rval;
4199     }
4200 
4201     shared_ents.reset();
4202 
4203     return MB_SUCCESS;
4204   }
4205 
tag_iface_entities()4206   ErrorCode ParallelComm::tag_iface_entities()
4207   {
4208     ErrorCode result = MB_SUCCESS;
4209     Range iface_ents, tmp_ents, rmv_ents;
4210     std::vector<unsigned char> pstat;
4211     unsigned char set_pstat;
4212     Range::iterator rit2;
4213     unsigned int i;
4214 
4215     for (Range::iterator rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit) {
4216       iface_ents.clear();
4217 
4218       result = mbImpl->get_entities_by_handle(*rit, iface_ents);MB_CHK_SET_ERR(result, "Failed to get interface set contents");
4219       pstat.resize(iface_ents.size());
4220       result = mbImpl->tag_get_data(pstatus_tag(), iface_ents, &pstat[0]);MB_CHK_SET_ERR(result, "Failed to get pstatus values for interface set entities");
4221       result = mbImpl->tag_get_data(pstatus_tag(), &(*rit), 1, &set_pstat);MB_CHK_SET_ERR(result, "Failed to get pstatus values for interface set");
4222       rmv_ents.clear();
4223       for (rit2 = iface_ents.begin(), i = 0; rit2 != iface_ents.end(); ++rit2, i++) {
4224         if (!(pstat[i] & PSTATUS_INTERFACE)) {
4225           rmv_ents.insert(*rit2);
4226           pstat[i] = 0x0;
4227         }
4228       }
4229       result = mbImpl->remove_entities(*rit, rmv_ents);MB_CHK_SET_ERR(result, "Failed to remove entities from interface set");
4230 
4231       if (!(set_pstat & PSTATUS_NOT_OWNED))
4232         continue;
4233       // If we're here, we need to set the notowned status on (remaining) set contents
4234 
4235       // Remove rmv_ents from the contents list
4236       iface_ents = subtract(iface_ents, rmv_ents);
4237       // Compress the pstat vector (removing 0x0's)
4238       std::remove_if(pstat.begin(), pstat.end(),
4239                      std::bind2nd(std::equal_to<unsigned char>(), 0x0));
4240       // Fold the not_owned bit into remaining values
4241       unsigned int sz = iface_ents.size();
4242       for (i = 0; i < sz; i++)
4243         pstat[i] |= PSTATUS_NOT_OWNED;
4244 
4245       // Set the tag on the entities
4246       result = mbImpl->tag_set_data(pstatus_tag(), iface_ents, &pstat[0]);MB_CHK_SET_ERR(result, "Failed to set pstatus values for interface set entities");
4247     }
4248 
4249     return MB_SUCCESS;
4250   }
4251 
set_pstatus_entities(Range & pstatus_ents,unsigned char pstatus_val,bool lower_dim_ents,bool verts_too,int operation)4252   ErrorCode ParallelComm::set_pstatus_entities(Range &pstatus_ents,
4253                                                unsigned char pstatus_val,
4254                                                bool lower_dim_ents,
4255                                                bool verts_too,
4256                                                int operation)
4257   {
4258     std::vector<unsigned char> pstatus_vals(pstatus_ents.size());
4259     Range all_ents, *range_ptr = &pstatus_ents;
4260     ErrorCode result;
4261     if (lower_dim_ents || verts_too) {
4262       all_ents = pstatus_ents;
4263       range_ptr = &all_ents;
4264       int start_dim = (lower_dim_ents ? mbImpl->dimension_from_handle(*pstatus_ents.rbegin()) - 1 : 0);
4265       for (; start_dim >= 0; start_dim--) {
4266         result = mbImpl->get_adjacencies(all_ents, start_dim, true, all_ents,
4267                                          Interface::UNION);MB_CHK_SET_ERR(result, "Failed to get adjacencies for pstatus entities");
4268       }
4269     }
4270     if (Interface::UNION == operation) {
4271       result = mbImpl->tag_get_data(pstatus_tag(), *range_ptr, &pstatus_vals[0]);MB_CHK_SET_ERR(result, "Failed to get pstatus tag data");
4272       for (unsigned int i = 0; i < pstatus_vals.size(); i++)
4273         pstatus_vals[i] |= pstatus_val;
4274     }
4275     else {
4276       for (unsigned int i = 0; i < pstatus_vals.size(); i++)
4277         pstatus_vals[i] = pstatus_val;
4278     }
4279     result = mbImpl->tag_set_data(pstatus_tag(), *range_ptr, &pstatus_vals[0]);MB_CHK_SET_ERR(result, "Failed to set pstatus tag data");
4280 
4281     return MB_SUCCESS;
4282   }
4283 
set_pstatus_entities(EntityHandle * pstatus_ents,int num_ents,unsigned char pstatus_val,bool lower_dim_ents,bool verts_too,int operation)4284   ErrorCode ParallelComm::set_pstatus_entities(EntityHandle *pstatus_ents,
4285                                                int num_ents,
4286                                                unsigned char pstatus_val,
4287                                                bool lower_dim_ents,
4288                                                bool verts_too,
4289                                                int operation)
4290   {
4291     std::vector<unsigned char> pstatus_vals(num_ents);
4292     ErrorCode result;
4293     if (lower_dim_ents || verts_too) {
4294       // In this case, call the range-based version
4295       Range tmp_range;
4296       std::copy(pstatus_ents, pstatus_ents + num_ents, range_inserter(tmp_range));
4297       return set_pstatus_entities(tmp_range, pstatus_val, lower_dim_ents,
4298                                   verts_too, operation);
4299     }
4300 
4301     if (Interface::UNION == operation) {
4302       result = mbImpl->tag_get_data(pstatus_tag(), pstatus_ents, num_ents, &pstatus_vals[0]);MB_CHK_SET_ERR(result, "Failed to get pstatus tag data");
4303       for (unsigned int i = 0; i < (unsigned int) num_ents; i++)
4304         pstatus_vals[i] |= pstatus_val;
4305     }
4306     else {
4307       for (unsigned int i = 0; i < (unsigned int) num_ents; i++)
4308         pstatus_vals[i] = pstatus_val;
4309     }
4310     result = mbImpl->tag_set_data(pstatus_tag(), pstatus_ents, num_ents, &pstatus_vals[0]);MB_CHK_SET_ERR(result, "Failed to set pstatus tag data");
4311 
4312     return MB_SUCCESS;
4313   }
4314 
choose_owner_idx(const std::vector<unsigned> & proc_list)4315   static size_t choose_owner_idx(const std::vector<unsigned>& proc_list)
4316   {
4317     // Try to assign owners randomly so we get a good distribution,
4318     // (note: specifying the same seed on all procs is essential)
4319     unsigned val = 0;
4320     for (size_t i = 0; i < proc_list.size(); i++)
4321       val ^= proc_list[i];
4322     return rand_r(&val) % proc_list.size();
4323   }
4324 
4325   struct set_tuple
4326   {
4327     unsigned idx;
4328     unsigned proc;
4329     EntityHandle handle;
operator <moab::set_tuple4330     inline bool operator<(set_tuple other) const
4331     { return (idx == other.idx) ? (proc < other.proc) : (idx < other.idx); }
4332   };
4333 
resolve_shared_sets(EntityHandle file,const Tag * idtag)4334   ErrorCode ParallelComm::resolve_shared_sets(EntityHandle file, const Tag* idtag)
4335   {
4336     // Find all sets with any of the following tags:
4337     const char* const shared_set_tag_names[] = {GEOM_DIMENSION_TAG_NAME,
4338                                                 MATERIAL_SET_TAG_NAME,
4339                                                 DIRICHLET_SET_TAG_NAME,
4340                                                 NEUMANN_SET_TAG_NAME,
4341                                                 PARALLEL_PARTITION_TAG_NAME};
4342     int num_tags = sizeof(shared_set_tag_names) / sizeof(shared_set_tag_names[0]);
4343     Range candidate_sets;
4344     ErrorCode result;
4345 
4346     // If we're not given an ID tag to use to globally identify sets,
4347     // then fall back to using known tag values
4348     if (!idtag) {
4349       Tag gid, tag;
4350       result = mbImpl->tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, gid);
4351       if (MB_SUCCESS == result)
4352         result = mbImpl->tag_get_handle(GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, tag);
4353       if (MB_SUCCESS == result) {
4354         for (int d = 0; d < 4; d++) {
4355           candidate_sets.clear();
4356           const void* vals[] = { &d };
4357           result = mbImpl->get_entities_by_type_and_tag(file, MBENTITYSET, &tag, vals, 1, candidate_sets);
4358           if (MB_SUCCESS == result)
4359             resolve_shared_sets(candidate_sets, gid);
4360         }
4361       }
4362 
4363       for (int i = 1; i < num_tags; i++) {
4364         result = mbImpl->tag_get_handle(shared_set_tag_names[i], 1, MB_TYPE_INTEGER, tag);
4365         if (MB_SUCCESS == result) {
4366           candidate_sets.clear();
4367           result = mbImpl->get_entities_by_type_and_tag(file, MBENTITYSET, &tag, 0, 1, candidate_sets);
4368           if (MB_SUCCESS == result)
4369             resolve_shared_sets(candidate_sets, tag);
4370         }
4371       }
4372 
4373       return MB_SUCCESS;
4374     }
4375 
4376     for (int i = 0; i < num_tags; i++) {
4377       Tag tag;
4378       result = mbImpl->tag_get_handle(shared_set_tag_names[i], 1, MB_TYPE_INTEGER,
4379                                       tag, MB_TAG_ANY);
4380       if (MB_SUCCESS != result)
4381         continue;
4382 
4383       mbImpl->get_entities_by_type_and_tag(file, MBENTITYSET, &tag, 0, 1, candidate_sets, Interface::UNION);
4384     }
4385 
4386     // Find any additional sets that contain shared entities
4387     Range::iterator hint = candidate_sets.begin();
4388     Range all_sets;
4389     mbImpl->get_entities_by_type(file, MBENTITYSET, all_sets);
4390     all_sets = subtract(all_sets, candidate_sets);
4391     Range::iterator it = all_sets.begin();
4392     while (it != all_sets.end()) {
4393       Range contents;
4394       mbImpl->get_entities_by_handle(*it, contents);
4395       contents.erase(contents.lower_bound(MBENTITYSET), contents.end());
4396       filter_pstatus(contents, PSTATUS_SHARED, PSTATUS_OR);
4397       if (contents.empty()) {
4398         ++it;
4399       }
4400       else {
4401         hint = candidate_sets.insert(hint, *it);
4402         it = all_sets.erase(it);
4403       }
4404     }
4405 
4406     // Find any additionl sets that contain or are parents of potential shared sets
4407     Range prev_list = candidate_sets;
4408     while (!prev_list.empty()) {
4409       it = all_sets.begin();
4410       Range new_list;
4411       hint = new_list.begin();
4412       while (it != all_sets.end()) {
4413         Range contents;
4414         mbImpl->get_entities_by_type(*it, MBENTITYSET, contents);
4415         if (!intersect(prev_list,contents).empty()) {
4416           hint = new_list.insert(hint, *it);
4417           it = all_sets.erase(it);
4418         }
4419         else {
4420           new_list.clear();
4421           mbImpl->get_child_meshsets(*it, contents);
4422           if (!intersect(prev_list,contents).empty()) {
4423             hint = new_list.insert(hint, *it);
4424             it = all_sets.erase(it);
4425           }
4426           else {
4427             ++it;
4428           }
4429         }
4430       }
4431 
4432       candidate_sets.merge(new_list);
4433       prev_list.swap(new_list);
4434     }
4435 
4436     return resolve_shared_sets(candidate_sets, *idtag);
4437   }
4438 
4439 #ifndef NDEBUG
is_sorted_unique(std::vector<unsigned> & v)4440   bool is_sorted_unique(std::vector<unsigned>& v)
4441   {
4442     for (size_t i = 1; i < v.size(); i++)
4443       if (v[i - 1] >= v[i])
4444         return false;
4445     return true;
4446   }
4447 #endif
4448 
resolve_shared_sets(Range & sets,Tag idtag)4449   ErrorCode ParallelComm::resolve_shared_sets(Range& sets, Tag idtag)
4450   {
4451     ErrorCode result;
4452     const unsigned rk = proc_config().proc_rank();
4453     MPI_Comm cm = proc_config().proc_comm();
4454 
4455     // Build sharing list for all sets
4456 
4457     // Get ids for sets in a vector, to pass to gs
4458     std::vector<long> larray; // Allocate sufficient space for longs
4459     std::vector<Ulong> handles;
4460     Range tmp_sets;
4461     // The id tag can be size 4 or size 8
4462     // Based on that, convert to int or to long, similarly to what we do
4463     // for resolving shared vertices;
4464     // This code must work on 32 bit too, where long is 4 bytes, also
4465     // so test first size 4, then we should be fine
4466     DataType tag_type;
4467     result = mbImpl->tag_get_data_type(idtag, tag_type);MB_CHK_SET_ERR(result, "Failed getting tag data type");
4468     int bytes_per_tag;
4469     result = mbImpl->tag_get_bytes(idtag, bytes_per_tag);MB_CHK_SET_ERR(result, "Failed getting number of bytes per tag");
4470     // On 64 bits, long and int are different
4471     // On 32 bits, they are not; if size of long is 8, it is a 64 bit machine (really?)
4472 
4473     for (Range::iterator rit = sets.begin(); rit != sets.end(); ++rit) {
4474       if (sizeof(long) == bytes_per_tag && ((MB_TYPE_HANDLE == tag_type) || (MB_TYPE_OPAQUE == tag_type))) { // It is a special id tag
4475         long dum;
4476         result = mbImpl->tag_get_data(idtag, &(*rit), 1, &dum);
4477         if (MB_SUCCESS == result) {
4478           larray.push_back(dum);
4479           handles.push_back(*rit);
4480           tmp_sets.insert(tmp_sets.end(), *rit);
4481         }
4482       }
4483       else if (4 == bytes_per_tag) { // Must be GLOBAL_ID tag or MATERIAL_ID, etc
4484         int dum;
4485         result = mbImpl->tag_get_data(idtag, &(*rit), 1, &dum);
4486         if (MB_SUCCESS == result) {
4487           larray.push_back(dum);
4488           handles.push_back(*rit);
4489           tmp_sets.insert(tmp_sets.end(), *rit);
4490         }
4491       }
4492     }
4493 
4494     const size_t nsets = handles.size();
4495 
4496     // Get handle array for sets
4497     // This is not true on windows machine, 64 bits: entity handle is 64 bit, long is 32
4498     // assert(sizeof(EntityHandle) <= sizeof(unsigned long));
4499 
4500     // Do communication of data
4501     gs_data::crystal_data *cd = procConfig.crystal_router();
4502     gs_data *gsd = new gs_data();
4503     result = gsd->initialize(nsets, &larray[0], &handles[0], 2, 1, 1, cd);MB_CHK_SET_ERR(result, "Failed to create gs data");
4504 
4505     // Convert from global IDs grouped by process rank to list
4506     // of <idx, rank> pairs so that we can sort primarily
4507     // by idx and secondarily by rank (we want lists of procs for each
4508     // idx, not lists if indices for each proc).
4509     size_t ntuple = 0;
4510     for (unsigned p = 0; p < gsd->nlinfo->_np; p++)
4511       ntuple += gsd->nlinfo->_nshared[p];
4512     std::vector< set_tuple > tuples;
4513     tuples.reserve(ntuple);
4514     size_t j = 0;
4515     for (unsigned p = 0; p < gsd->nlinfo->_np; p++) {
4516       for (unsigned np = 0; np < gsd->nlinfo->_nshared[p]; np++) {
4517         set_tuple t;
4518         t.idx = gsd->nlinfo->_sh_ind[j];
4519         t.proc = gsd->nlinfo->_target[p];
4520         t.handle = gsd->nlinfo->_ulabels[j];
4521         tuples.push_back(t);
4522         j++;
4523       }
4524     }
4525     std::sort(tuples.begin(), tuples.end());
4526 
4527     // Release crystal router stuff
4528     gsd->reset();
4529     delete gsd;
4530 
4531     // Storing sharing data for each set
4532     size_t ti = 0;
4533     unsigned idx = 0;
4534     std::vector<unsigned> procs;
4535     Range::iterator si = tmp_sets.begin();
4536     while (si != tmp_sets.end() && ti < tuples.size()) {
4537       assert(idx <= tuples[ti].idx);
4538       if (idx < tuples[ti].idx)
4539         si += (tuples[ti].idx - idx);
4540       idx = tuples[ti].idx;
4541 
4542       procs.clear();
4543       size_t ti_init = ti;
4544       while (ti < tuples.size() && tuples[ti].idx == idx) {
4545         procs.push_back(tuples[ti].proc);
4546         ++ti;
4547       }
4548       assert(is_sorted_unique(procs));
4549 
4550       result = sharedSetData->set_sharing_procs(*si, procs);
4551       if (MB_SUCCESS != result) {
4552         std::cerr << "Failure at " __FILE__ ":" << __LINE__ << std::endl;
4553         std::cerr.flush();
4554         MPI_Abort(cm, 1);
4555       }
4556 
4557       // Add this proc to list of sharing procs in correct position
4558       // so that all procs select owner based on same list
4559       std::vector<unsigned>::iterator it = std::lower_bound(procs.begin(), procs.end(), rk);
4560       assert(it == procs.end() || *it > rk);
4561       procs.insert(it, rk);
4562       size_t owner_idx = choose_owner_idx(procs);
4563       EntityHandle owner_handle;
4564       if (procs[owner_idx] == rk)
4565         owner_handle = *si;
4566       else if (procs[owner_idx] > rk)
4567         owner_handle = tuples[ti_init + owner_idx - 1].handle;
4568       else
4569         owner_handle = tuples[ti_init + owner_idx].handle;
4570       result = sharedSetData->set_owner(*si, procs[owner_idx], owner_handle);
4571       if (MB_SUCCESS != result) {
4572         std::cerr << "Failure at " __FILE__ ":" << __LINE__ << std::endl;
4573         std::cerr.flush();
4574         MPI_Abort(cm, 1);
4575       }
4576 
4577       ++si;
4578       ++idx;
4579     }
4580 
4581     return MB_SUCCESS;
4582   }
4583     // populate sets with ghost entities, if necessary
augment_default_sets_with_ghosts(EntityHandle file_set)4584   ErrorCode ParallelComm::augment_default_sets_with_ghosts(EntityHandle file_set) {
4585     // gather all default sets we are interested in, material, neumann, etc
4586     // we will skip geometry sets, because they are not uniquely identified with their tag value
4587     // maybe we will add another tag, like category
4588 
4589     if (procConfig.proc_size() < 2)
4590       return MB_SUCCESS; // no reason to stop by
4591     const char* const shared_set_tag_names[] =
4592         { MATERIAL_SET_TAG_NAME, DIRICHLET_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
4593             PARALLEL_PARTITION_TAG_NAME };
4594 
4595     int num_tags = sizeof(shared_set_tag_names) / sizeof(shared_set_tag_names[0]);
4596 
4597     Range * rangeSets = new Range[num_tags];
4598     Tag * tags = new Tag[num_tags + 1]; // one extra for global id tag, which is an int, so far
4599 
4600     int my_rank = rank();
4601     int ** tagVals = new int*[num_tags];
4602     for (int i = 0; i < num_tags; i++)
4603       tagVals[i] = NULL;
4604     ErrorCode rval;
4605 
4606     // for each tag, we keep a local map, from the value to the actual set with that value
4607     // we assume that the tag values are unique, for a given set, otherwise we
4608     // do not know to which set to add the entity
4609 
4610     typedef std::map<int, EntityHandle> MVal;
4611     typedef std::map<int, EntityHandle>::iterator itMVal;
4612     MVal * localMaps = new MVal[num_tags];
4613 
4614     for (int i = 0; i < num_tags; i++) {
4615 
4616       rval = mbImpl->tag_get_handle(shared_set_tag_names[i], 1, MB_TYPE_INTEGER,
4617           tags[i], MB_TAG_ANY);
4618       if (MB_SUCCESS != rval)
4619         continue;
4620       rval = mbImpl->get_entities_by_type_and_tag(file_set, MBENTITYSET,
4621           &(tags[i]), 0, 1, rangeSets[i], Interface::UNION);
4622       MB_CHK_SET_ERR(rval, "can't get sets with a tag");
4623 
4624       if (rangeSets[i].size() > 0) {
4625         tagVals[i] = new int[rangeSets[i].size()];
4626         // fill up with the tag values
4627         rval = mbImpl->tag_get_data(tags[i], rangeSets[i], tagVals[i]);
4628         MB_CHK_SET_ERR(rval, "can't get set tag values");
4629         // now for inverse mapping:
4630         for (int j = 0; j < (int) rangeSets[i].size(); j++) {
4631           localMaps[i][tagVals[i][j]] = rangeSets[i][j];
4632         }
4633       }
4634     }
4635     // get the global id tag too
4636     rval = mbImpl->tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER,
4637         tags[num_tags], MB_TAG_ANY);
4638     MB_CHK_SET_ERR(rval, "can't get global id tag");
4639 
4640     TupleList remoteEnts;
4641     // processor to send to, type of tag (0-mat,) tag value,     remote handle
4642     //                         1-diri
4643     //                         2-neum
4644     //                         3-part
4645     //
4646     int initialSize = (int)sharedEnts.size(); // estimate that on average, each shared ent
4647     // will be sent to one processor, for one tag
4648     // we will actually send only entities that are owned locally, and from those
4649     // only those that do have a special tag (material, neumann, etc)
4650     // if we exceed the capacity, we resize the tuple
4651     remoteEnts.initialize(3, 0, 1, 0, initialSize);
4652     remoteEnts.enableWriteAccess();
4653 
4654     // now, for each owned entity, get the remote handle(s) and Proc(s), and verify if it
4655     // belongs to one of the sets; if yes, create a tuple and append it
4656 
4657     std::set<EntityHandle> own_and_sha;
4658     int ir = 0, jr = 0;
4659     for (std::vector<EntityHandle>::iterator vit = sharedEnts.begin();
4660         vit != sharedEnts.end(); ++vit)
4661     {
4662       // ghosted eh
4663       EntityHandle geh = *vit;
4664       if (own_and_sha.find(geh)!=own_and_sha.end())// already encountered
4665         continue;
4666       int procs[MAX_SHARING_PROCS];
4667       EntityHandle handles[MAX_SHARING_PROCS];
4668       int nprocs;
4669       unsigned char pstat;
4670       rval = get_sharing_data(geh, procs, handles, pstat, nprocs);
4671       MB_CHK_SET_ERR(rval, "Failed to get sharing data");
4672       if (pstat & PSTATUS_NOT_OWNED)
4673         continue; // we will send info only for entities that we own
4674       own_and_sha.insert(geh);
4675       for (int i = 0; i < num_tags; i++) {
4676         for (int j = 0; j < (int) rangeSets[i].size(); j++) {
4677           EntityHandle specialSet = rangeSets[i][j]; // this set has tag i, value tagVals[i][j];
4678           if (mbImpl->contains_entities(specialSet, &geh, 1)) {
4679             // this ghosted entity is in a special set, so form the tuple
4680             // to send to the processors that do not own this
4681             for (int k = 0; k < nprocs; k++) {
4682               if (procs[k] != my_rank) {
4683                 if (remoteEnts.get_n()>=remoteEnts.get_max()-1)
4684                 {
4685                   // resize, so we do not overflow
4686                   int oldSize = remoteEnts.get_max();
4687                   // increase with 50% the capacity
4688                   remoteEnts.resize(oldSize+oldSize/2+1);
4689                 }
4690                 remoteEnts.vi_wr[ir++] = procs[k]; // send to proc
4691                 remoteEnts.vi_wr[ir++] = i; // for the tags [i] (0-3)
4692                 remoteEnts.vi_wr[ir++] = tagVals[i][j]; // actual value of the tag
4693                 remoteEnts.vul_wr[jr++] = handles[k];
4694                 remoteEnts.inc_n();
4695               }
4696             }
4697           }
4698         }
4699       }
4700       // if the local entity has a global id, send it too, so we avoid
4701       // another "exchange_tags" for global id
4702       int gid;
4703       rval = mbImpl->tag_get_data(tags[num_tags], &geh, 1, &gid);
4704       MB_CHK_SET_ERR(rval, "Failed to get global id");
4705       if (gid != 0) {
4706         for (int k = 0; k < nprocs; k++) {
4707           if (procs[k] != my_rank) {
4708             if (remoteEnts.get_n()>=remoteEnts.get_max()-1)
4709             {
4710               // resize, so we do not overflow
4711               int oldSize = remoteEnts.get_max();
4712               // increase with 50% the capacity
4713               remoteEnts.resize(oldSize+oldSize/2+1);
4714             }
4715             remoteEnts.vi_wr[ir++] = procs[k]; // send to proc
4716             remoteEnts.vi_wr[ir++] = num_tags; // for the tags [j] (4)
4717             remoteEnts.vi_wr[ir++] = gid; // actual value of the tag
4718             remoteEnts.vul_wr[jr++] = handles[k];
4719             remoteEnts.inc_n();
4720           }
4721         }
4722       }
4723     }
4724 
4725   #ifndef NDEBUG
4726     if (my_rank == 1 && 1 == get_debug_verbosity())
4727       remoteEnts.print(" on rank 1, before augment routing");
4728     MPI_Barrier(procConfig.proc_comm());
4729     int sentEnts = remoteEnts.get_n();
4730     assert((sentEnts == jr) && (3 * sentEnts == ir));
4731   #endif
4732     // exchange the info now, and send to
4733     gs_data::crystal_data *cd = this->procConfig.crystal_router();
4734     // All communication happens here; no other mpi calls
4735     // Also, this is a collective call
4736     rval = cd->gs_transfer(1, remoteEnts, 0);
4737     MB_CHK_SET_ERR(rval, "Error in tuple transfer");
4738   #ifndef NDEBUG
4739     if (my_rank == 0 && 1 == get_debug_verbosity())
4740       remoteEnts.print(" on rank 0, after augment routing");
4741     MPI_Barrier(procConfig.proc_comm());
4742   #endif
4743 
4744     // now process the data received from other processors
4745     int received = remoteEnts.get_n();
4746     for (int i = 0; i < received; i++) {
4747       //int from = ents_to_delete.vi_rd[i];
4748       EntityHandle geh = (EntityHandle) remoteEnts.vul_rd[i];
4749       int from_proc = remoteEnts.vi_rd[3 * i];
4750       if (my_rank == from_proc)
4751         std::cout << " unexpected receive from my rank " << my_rank
4752             << " during augmenting with ghosts\n ";
4753       int tag_type = remoteEnts.vi_rd[3 * i + 1];
4754       assert((0 <= tag_type) && (tag_type <= num_tags));
4755       int value = remoteEnts.vi_rd[3 * i + 2];
4756       if (tag_type == num_tags) {
4757         // it is global id
4758         rval = mbImpl->tag_set_data(tags[num_tags], &geh, 1, &value);
4759         MB_CHK_SET_ERR(rval, "Error in setting gid tag");
4760       } else {
4761         // now, based on value and tag type, see if we have that value in the map
4762         MVal & lmap = localMaps[tag_type];
4763         itMVal itm = lmap.find(value);
4764         if (itm == lmap.end()) {
4765           // the value was not found yet in the local map, so we have to create the set
4766           EntityHandle newSet;
4767           rval = mbImpl->create_meshset(MESHSET_SET, newSet);
4768           MB_CHK_SET_ERR(rval, "can't create new set");
4769           lmap[value] = newSet;
4770           // set the tag value
4771           rval = mbImpl->tag_set_data(tags[tag_type], &newSet, 1, &value);
4772           MB_CHK_SET_ERR(rval, "can't set tag for new set");
4773 
4774           // we also need to add the new created set to the file set, if not null
4775           if (file_set) {
4776             rval = mbImpl->add_entities(file_set, &newSet, 1);
4777             MB_CHK_SET_ERR(rval, "can't add new set to the file set");
4778           }
4779         }
4780         // add the entity to the set pointed to by the map
4781         rval = mbImpl->add_entities(lmap[value], &geh, 1);
4782         MB_CHK_SET_ERR(rval, "can't add ghost ent to the set");
4783       }
4784     }
4785 
4786     for (int i = 0; i < num_tags; i++)
4787       delete[] tagVals[i];
4788     delete[] tagVals;
4789     delete[] rangeSets;
4790     delete [] tags;
4791     delete[] localMaps;
4792     return MB_SUCCESS;
4793   }
create_interface_sets(EntityHandle this_set,int resolve_dim,int shared_dim)4794   ErrorCode ParallelComm::create_interface_sets(EntityHandle this_set, int resolve_dim, int shared_dim)
4795   {
4796     std::map<std::vector<int>, std::vector<EntityHandle> > proc_nvecs;
4797 
4798     // Build up the list of shared entities
4799     int procs[MAX_SHARING_PROCS];
4800     EntityHandle handles[MAX_SHARING_PROCS];
4801     ErrorCode result;
4802     int nprocs;
4803     unsigned char pstat;
4804     for (std::vector<EntityHandle>::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit) {
4805       if (shared_dim != -1 && mbImpl->dimension_from_handle(*vit) > shared_dim)
4806         continue;
4807       result = get_sharing_data(*vit, procs, handles, pstat, nprocs);MB_CHK_SET_ERR(result, "Failed to get sharing data");
4808       std::sort(procs, procs + nprocs);
4809       std::vector<int> tmp_procs(procs, procs + nprocs);
4810       assert(tmp_procs.size() != 2);
4811       proc_nvecs[tmp_procs].push_back(*vit);
4812     }
4813 
4814     Skinner skinner(mbImpl);
4815     Range skin_ents[4];
4816     result = mbImpl->get_entities_by_dimension(this_set, resolve_dim, skin_ents[resolve_dim]);MB_CHK_SET_ERR(result, "Failed to get skin entities by dimension");
4817     result = skinner.find_skin(this_set, skin_ents[resolve_dim], false,
4818                                skin_ents[resolve_dim - 1], 0, true, true, true);MB_CHK_SET_ERR(result, "Failed to find skin");
4819     if (shared_dim > 1) {
4820       result = mbImpl->get_adjacencies(skin_ents[resolve_dim - 1], resolve_dim - 2, true,
4821                                        skin_ents[resolve_dim - 2], Interface::UNION);MB_CHK_SET_ERR(result, "Failed to get skin adjacencies");
4822     }
4823 
4824     result = get_proc_nvecs(resolve_dim, shared_dim, skin_ents, proc_nvecs);
4825 
4826     return create_interface_sets(proc_nvecs);
4827   }
4828 
create_interface_sets(std::map<std::vector<int>,std::vector<EntityHandle>> & proc_nvecs)4829   ErrorCode ParallelComm::create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs)
4830   {
4831     if (proc_nvecs.empty())
4832       return MB_SUCCESS;
4833 
4834     int proc_ids[MAX_SHARING_PROCS];
4835     EntityHandle proc_handles[MAX_SHARING_PROCS];
4836     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
4837     ErrorCode result = get_shared_proc_tags(shp_tag, shps_tag,
4838                                             shh_tag, shhs_tag,
4839                                             pstat_tag);MB_CHK_SET_ERR(result, "Failed to get shared proc tags in create_interface_sets");
4840     Range::iterator rit;
4841 
4842     // Create interface sets, tag them, and tag their contents with iface set tag
4843     std::vector<unsigned char> pstatus;
4844     for (std::map<std::vector<int>,std::vector<EntityHandle> >::iterator vit = proc_nvecs.begin();
4845          vit != proc_nvecs.end(); ++vit) {
4846       // Create the set
4847       EntityHandle new_set;
4848       result = mbImpl->create_meshset(MESHSET_SET, new_set);MB_CHK_SET_ERR(result, "Failed to create interface set");
4849       interfaceSets.insert(new_set);
4850 
4851       // Add entities
4852       assert(!vit->second.empty());
4853       result = mbImpl->add_entities(new_set, &(vit->second)[0], (vit->second).size());MB_CHK_SET_ERR(result, "Failed to add entities to interface set");
4854       // Tag set with the proc rank(s)
4855       if (vit->first.size() == 1) {
4856         assert((vit->first)[0] != (int)procConfig.proc_rank());
4857         result = mbImpl->tag_set_data(shp_tag, &new_set, 1,
4858                                       &(vit->first)[0]);MB_CHK_SET_ERR(result, "Failed to tag interface set with procs");
4859         proc_handles[0] = 0;
4860         result = mbImpl->tag_set_data(shh_tag, &new_set, 1,
4861                                       proc_handles);MB_CHK_SET_ERR(result, "Failed to tag interface set with procs");
4862       }
4863       else {
4864         // Pad tag data out to MAX_SHARING_PROCS with -1
4865         if (vit->first.size() > MAX_SHARING_PROCS) {
4866           std::cerr << "Exceeded MAX_SHARING_PROCS for "
4867                     << CN::EntityTypeName(TYPE_FROM_HANDLE(new_set))
4868                     << ' ' << ID_FROM_HANDLE(new_set)
4869                     << " on process " << proc_config().proc_rank()
4870                     << std::endl;
4871           std::cerr.flush();
4872           MPI_Abort(proc_config().proc_comm(), 66);
4873         }
4874         //assert(vit->first.size() <= MAX_SHARING_PROCS);
4875         std::copy(vit->first.begin(), vit->first.end(), proc_ids);
4876         std::fill(proc_ids + vit->first.size(), proc_ids + MAX_SHARING_PROCS, -1);
4877         result = mbImpl->tag_set_data(shps_tag, &new_set, 1, proc_ids);MB_CHK_SET_ERR(result, "Failed to tag interface set with procs");
4878         unsigned int ind = std::find(proc_ids, proc_ids + vit->first.size(), procConfig.proc_rank())
4879           - proc_ids;
4880         assert(ind < vit->first.size());
4881         std::fill(proc_handles, proc_handles + MAX_SHARING_PROCS, 0);
4882         proc_handles[ind] = new_set;
4883         result = mbImpl->tag_set_data(shhs_tag, &new_set, 1, proc_handles);MB_CHK_SET_ERR(result, "Failed to tag interface set with procs");
4884       }
4885 
4886       // Get the owning proc, then set the pstatus tag on iface set
4887       int min_proc = (vit->first)[0];
4888       unsigned char pval = (PSTATUS_SHARED | PSTATUS_INTERFACE);
4889       if (min_proc < (int) procConfig.proc_rank())
4890         pval |= PSTATUS_NOT_OWNED;
4891       if (vit->first.size() > 1)
4892         pval |= PSTATUS_MULTISHARED;
4893       result = mbImpl->tag_set_data(pstat_tag, &new_set, 1, &pval);MB_CHK_SET_ERR(result, "Failed to tag interface set with pstatus");
4894 
4895       // Tag the vertices with the same thing
4896       pstatus.clear();
4897       std::vector<EntityHandle> verts;
4898       for (std::vector<EntityHandle>::iterator v2it = (vit->second).begin(); v2it != (vit->second).end(); ++v2it)
4899         if (mbImpl->type_from_handle(*v2it) == MBVERTEX) verts.push_back(*v2it);
4900       pstatus.resize(verts.size(), pval);
4901       if (!verts.empty()) {
4902         result = mbImpl->tag_set_data(pstat_tag, &verts[0], verts.size(), &pstatus[0]);MB_CHK_SET_ERR(result, "Failed to tag interface set vertices with pstatus");
4903       }
4904     }
4905 
4906     return MB_SUCCESS;
4907   }
4908 
create_iface_pc_links()4909   ErrorCode ParallelComm::create_iface_pc_links()
4910   {
4911     // Now that we've resolved the entities in the iface sets,
4912     // set parent/child links between the iface sets
4913 
4914     // First tag all entities in the iface sets
4915     Tag tmp_iface_tag;
4916     EntityHandle tmp_iface_set = 0;
4917     ErrorCode result = mbImpl->tag_get_handle("__tmp_iface", 1, MB_TYPE_HANDLE,
4918                                               tmp_iface_tag, MB_TAG_DENSE | MB_TAG_CREAT,
4919                                               &tmp_iface_set);MB_CHK_SET_ERR(result, "Failed to create temporary interface set tag");
4920 
4921     Range iface_ents;
4922     std::vector<EntityHandle> tag_vals;
4923     Range::iterator rit;
4924 
4925     for (rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit) {
4926       // tag entities with interface set
4927       iface_ents.clear();
4928       result = mbImpl->get_entities_by_handle(*rit, iface_ents);MB_CHK_SET_ERR(result, "Failed to get entities in interface set");
4929 
4930       if (iface_ents.empty())
4931         continue;
4932 
4933       tag_vals.resize(iface_ents.size());
4934       std::fill(tag_vals.begin(), tag_vals.end(), *rit);
4935       result = mbImpl->tag_set_data(tmp_iface_tag, iface_ents, &tag_vals[0]);MB_CHK_SET_ERR(result, "Failed to tag iface entities with interface set");
4936     }
4937 
4938     // Now go back through interface sets and add parent/child links
4939     Range tmp_ents2;
4940     for (int d = 2; d >= 0; d--) {
4941       for (rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit) {
4942         // Get entities on this interface
4943         iface_ents.clear();
4944         result = mbImpl->get_entities_by_handle(*rit, iface_ents, true);MB_CHK_SET_ERR(result, "Failed to get entities by handle");
4945         if (iface_ents.empty() || mbImpl->dimension_from_handle(*iface_ents.rbegin()) != d)
4946           continue;
4947 
4948         // Get higher-dimensional entities and their interface sets
4949         result = mbImpl->get_adjacencies(&(*iface_ents.begin()), 1, d + 1,
4950                                          false, tmp_ents2);MB_CHK_SET_ERR(result, "Failed to get adjacencies for interface sets");
4951         tag_vals.resize(tmp_ents2.size());
4952         result = mbImpl->tag_get_data(tmp_iface_tag, tmp_ents2, &tag_vals[0]);MB_CHK_SET_ERR(result, "Failed to get tmp iface tag for interface sets");
4953 
4954         // Go through and for any on interface make it a parent
4955         EntityHandle last_set = 0;
4956         for (unsigned int i = 0; i < tag_vals.size(); i++) {
4957           if (tag_vals[i] && tag_vals[i] != last_set) {
4958             result = mbImpl->add_parent_child(tag_vals[i], *rit);MB_CHK_SET_ERR(result, "Failed to add parent/child link for interface set");
4959             last_set = tag_vals[i];
4960           }
4961         }
4962       }
4963     }
4964 
4965     // Delete the temporary tag
4966     result = mbImpl->tag_delete(tmp_iface_tag);MB_CHK_SET_ERR(result, "Failed to delete tmp iface tag");
4967 
4968     return MB_SUCCESS;
4969   }
4970 
get_proc_nvecs(int resolve_dim,int shared_dim,Range * skin_ents,std::map<std::vector<int>,std::vector<EntityHandle>> & proc_nvecs)4971   ErrorCode ParallelComm::get_proc_nvecs(int resolve_dim,
4972                                          int shared_dim,
4973                                          Range *skin_ents,
4974                                          std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs)
4975   {
4976     // Set sharing procs tags on other skin ents
4977     ErrorCode result;
4978     const EntityHandle *connect; int num_connect;
4979     std::set<int> sharing_procs;
4980     std::vector<EntityHandle> dum_connect;
4981     std::vector<int> sp_vec;
4982 
4983     for (int d = 3; d > 0; d--) {
4984       if (resolve_dim == d)
4985         continue;
4986 
4987       for (Range::iterator rit = skin_ents[d].begin();
4988            rit != skin_ents[d].end(); ++rit) {
4989         // Get connectivity
4990         result = mbImpl->get_connectivity(*rit, connect, num_connect, false,
4991                                           &dum_connect);MB_CHK_SET_ERR(result, "Failed to get connectivity on non-vertex skin entities");
4992 
4993         int op = (resolve_dim < shared_dim ? Interface::UNION : Interface::INTERSECT);
4994         result = get_sharing_data(connect, num_connect, sharing_procs, op);MB_CHK_SET_ERR(result, "Failed to get sharing data in get_proc_nvecs");
4995         if (sharing_procs.empty() ||
4996             (sharing_procs.size() == 1 && *sharing_procs.begin() == (int)procConfig.proc_rank()))
4997           continue;
4998 
4999         // Need to specify sharing data correctly for entities or they will
5000         // end up in a different interface set than corresponding vertices
5001         if (sharing_procs.size() == 2) {
5002           std::set<int>::iterator it = sharing_procs.find(proc_config().proc_rank());
5003           assert(it != sharing_procs.end());
5004           sharing_procs.erase(it);
5005         }
5006 
5007         // Intersection is the owning proc(s) for this skin ent
5008         sp_vec.clear();
5009         std::copy(sharing_procs.begin(), sharing_procs.end(), std::back_inserter(sp_vec));
5010         assert(sp_vec.size() != 2);
5011         proc_nvecs[sp_vec].push_back(*rit);
5012       }
5013     }
5014 
5015 #ifndef NDEBUG
5016     // Shouldn't be any repeated entities in any of the vectors in proc_nvecs
5017     for (std::map<std::vector<int>, std::vector<EntityHandle> >::iterator mit = proc_nvecs.begin();
5018          mit != proc_nvecs.end(); ++mit) {
5019       std::vector<EntityHandle> tmp_vec = (mit->second);
5020       std::sort(tmp_vec.begin(), tmp_vec.end());
5021       std::vector<EntityHandle>::iterator vit = std::unique(tmp_vec.begin(), tmp_vec.end());
5022       assert(vit == tmp_vec.end());
5023     }
5024 #endif
5025 
5026     return MB_SUCCESS;
5027   }
5028 
5029   // Overloaded form of tag_shared_verts
5030   // Tuple coming in is of form (arbitrary value, remoteProc, localHandle, remoteHandle)
5031   // Also will check for doubles in the list if the list is sorted
tag_shared_verts(TupleList & shared_ents,std::map<std::vector<int>,std::vector<EntityHandle>> & proc_nvecs,Range &,unsigned int i_extra)5032   ErrorCode ParallelComm::tag_shared_verts(TupleList &shared_ents,
5033                                            std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
5034                                            Range& /*proc_verts*/,
5035                                            unsigned int i_extra)
5036   {
5037     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
5038     ErrorCode result = get_shared_proc_tags(shp_tag, shps_tag,
5039                                             shh_tag, shhs_tag,
5040                                             pstat_tag);MB_CHK_SET_ERR(result, "Failed to get shared proc tags in tag_shared_verts");
5041 
5042     unsigned int j = 0, i = 0;
5043     std::vector<int> sharing_procs, sharing_procs2, tag_procs;
5044     std::vector<EntityHandle> sharing_handles, sharing_handles2, tag_lhandles, tag_rhandles;
5045     std::vector<unsigned char> pstatus;
5046 
5047     // Were on tuple j/2
5048     if (i_extra)
5049       i += i_extra;
5050     while (j < 2*shared_ents.get_n()) {
5051       // Count & accumulate sharing procs
5052       EntityHandle this_ent = shared_ents.vul_rd[j], other_ent = 0;
5053       int other_proc = -1;
5054       while (j < 2*shared_ents.get_n() && shared_ents.vul_rd[j] == this_ent) {
5055         j++;
5056         // Shouldn't have same proc
5057         assert(shared_ents.vi_rd[i] != (int)procConfig.proc_rank());
5058         // Grab the remote data if its not a dublicate
5059         if (shared_ents.vul_rd[j] != other_ent || shared_ents.vi_rd[i] != other_proc) {
5060           assert(0 != shared_ents.vul_rd[j]);
5061           sharing_procs.push_back(shared_ents.vi_rd[i]);
5062           sharing_handles.push_back(shared_ents.vul_rd[j]);
5063         }
5064         other_proc = shared_ents.vi_rd[i];
5065         other_ent = shared_ents.vul_rd[j];
5066         j++;
5067         i += 1 + i_extra;
5068       }
5069 
5070       if (sharing_procs.size() > 1) {
5071         // Add current proc/handle to list
5072         sharing_procs.push_back(procConfig.proc_rank());
5073         sharing_handles.push_back(this_ent);
5074 
5075         // Sort sharing_procs and sharing_handles such that
5076         // sharing_procs is in ascending order. Use temporary
5077         // lists and binary search to re-order sharing_handles.
5078         sharing_procs2 = sharing_procs;
5079         std::sort(sharing_procs2.begin(), sharing_procs2.end());
5080         sharing_handles2.resize(sharing_handles.size());
5081         for (size_t k = 0; k < sharing_handles.size(); k++) {
5082           size_t idx = std::lower_bound(sharing_procs2.begin(),
5083                                         sharing_procs2.end(),
5084                                         sharing_procs[k]) - sharing_procs2.begin();
5085           sharing_handles2[idx] = sharing_handles[k];
5086         }
5087         sharing_procs.swap(sharing_procs2);
5088         sharing_handles.swap(sharing_handles2);
5089       }
5090 
5091       assert(sharing_procs.size() != 2);
5092       proc_nvecs[sharing_procs].push_back(this_ent);
5093 
5094       unsigned char share_flag = PSTATUS_SHARED,
5095         ms_flag = (PSTATUS_SHARED | PSTATUS_MULTISHARED);
5096       if (sharing_procs.size() == 1) {
5097         tag_procs.push_back(sharing_procs[0]);
5098         tag_lhandles.push_back(this_ent);
5099         tag_rhandles.push_back(sharing_handles[0]);
5100         pstatus.push_back(share_flag);
5101       }
5102       else {
5103         // Pad lists
5104         //assert(sharing_procs.size() <= MAX_SHARING_PROCS);
5105         if (sharing_procs.size() > MAX_SHARING_PROCS) {
5106           std::cerr << "MAX_SHARING_PROCS exceeded for vertex " << this_ent <<
5107             " on process " << proc_config().proc_rank() << std::endl;
5108           std::cerr.flush();
5109           MPI_Abort(proc_config().proc_comm(), 66);
5110         }
5111         sharing_procs.resize(MAX_SHARING_PROCS, -1);
5112         sharing_handles.resize(MAX_SHARING_PROCS, 0);
5113         result = mbImpl->tag_set_data(shps_tag, &this_ent, 1,
5114                                       &sharing_procs[0]);MB_CHK_SET_ERR(result, "Failed to set sharedps tag on shared vertex");
5115         result = mbImpl->tag_set_data(shhs_tag, &this_ent, 1,
5116                                       &sharing_handles[0]);MB_CHK_SET_ERR(result, "Failed to set sharedhs tag on shared vertex");
5117         result = mbImpl->tag_set_data(pstat_tag, &this_ent, 1, &ms_flag);MB_CHK_SET_ERR(result, "Failed to set pstatus tag on shared vertex");
5118         sharedEnts.push_back(this_ent);
5119       }
5120 
5121       // Reset sharing proc(s) tags
5122       sharing_procs.clear();
5123       sharing_handles.clear();
5124     }
5125 
5126     if (!tag_procs.empty()) {
5127       result = mbImpl->tag_set_data(shp_tag, &tag_lhandles[0], tag_procs.size(),
5128                                     &tag_procs[0]);MB_CHK_SET_ERR(result, "Failed to set sharedp tag on shared vertex");
5129       result = mbImpl->tag_set_data(shh_tag, &tag_lhandles[0], tag_procs.size(),
5130                                     &tag_rhandles[0]);MB_CHK_SET_ERR(result, "Failed to set sharedh tag on shared vertex");
5131       result = mbImpl->tag_set_data(pstat_tag, &tag_lhandles[0], tag_procs.size(), &pstatus[0]);MB_CHK_SET_ERR(result, "Failed to set pstatus tag on shared vertex");
5132       std::copy(tag_lhandles.begin(), tag_lhandles.end(), std::back_inserter(sharedEnts));
5133     }
5134 
5135 #ifndef NDEBUG
5136     // Shouldn't be any repeated entities in any of the vectors in proc_nvecs
5137     for (std::map<std::vector<int>, std::vector<EntityHandle> >::iterator mit = proc_nvecs.begin();
5138          mit != proc_nvecs.end(); ++mit) {
5139       std::vector<EntityHandle> tmp_vec = (mit->second);
5140       std::sort(tmp_vec.begin(), tmp_vec.end());
5141       std::vector<EntityHandle>::iterator vit = std::unique(tmp_vec.begin(), tmp_vec.end());
5142       assert(vit == tmp_vec.end());
5143     }
5144 #endif
5145 
5146     return MB_SUCCESS;
5147   }
5148 
tag_shared_verts(TupleList & shared_ents,Range * skin_ents,std::map<std::vector<int>,std::vector<EntityHandle>> & proc_nvecs,Range &)5149   ErrorCode ParallelComm::tag_shared_verts(TupleList &shared_ents,
5150                                            Range *skin_ents,
5151                                            std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
5152                                            Range& /*proc_verts*/)
5153   {
5154     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
5155     ErrorCode result = get_shared_proc_tags(shp_tag, shps_tag,
5156                                             shh_tag, shhs_tag, pstat_tag);MB_CHK_SET_ERR(result, "Failed to get shared proc tags in tag_shared_verts");
5157 
5158     unsigned int j = 0, i = 0;
5159     std::vector<int> sharing_procs, sharing_procs2;
5160     std::vector<EntityHandle> sharing_handles, sharing_handles2, skin_verts(skin_ents[0].size());
5161     for (Range::iterator rit = skin_ents[0].begin(); rit != skin_ents[0].end(); ++rit, i++)
5162       skin_verts[i] = *rit;
5163     i = 0;
5164 
5165     while (j < 2*shared_ents.get_n()) {
5166       // Count & accumulate sharing procs
5167       int this_idx = shared_ents.vi_rd[j];
5168       EntityHandle this_ent = skin_verts[this_idx];
5169       while (j < 2*shared_ents.get_n() && shared_ents.vi_rd[j] == this_idx) {
5170         j++;
5171         // Shouldn't have same proc
5172         assert(shared_ents.vi_rd[j] != (int)procConfig.proc_rank());
5173         sharing_procs.push_back(shared_ents.vi_rd[j++]);
5174         sharing_handles.push_back(shared_ents.vul_rd[i++]);
5175       }
5176 
5177       if (sharing_procs.size() > 1) {
5178         // Add current proc/handle to list
5179         sharing_procs.push_back(procConfig.proc_rank());
5180         sharing_handles.push_back(this_ent);
5181       }
5182 
5183       // Sort sharing_procs and sharing_handles such that
5184       // sharing_procs is in ascending order. Use temporary
5185       // lists and binary search to re-order sharing_handles.
5186       sharing_procs2 = sharing_procs;
5187       std::sort(sharing_procs2.begin(), sharing_procs2.end());
5188       sharing_handles2.resize(sharing_handles.size());
5189       for (size_t k = 0; k < sharing_handles.size(); k++) {
5190         size_t idx = std::lower_bound(sharing_procs2.begin(),
5191                                       sharing_procs2.end(),
5192                                       sharing_procs[k]) - sharing_procs2.begin();
5193         sharing_handles2[idx] = sharing_handles[k];
5194       }
5195       sharing_procs.swap(sharing_procs2);
5196       sharing_handles.swap(sharing_handles2);
5197 
5198       assert(sharing_procs.size() != 2);
5199       proc_nvecs[sharing_procs].push_back(this_ent);
5200 
5201       unsigned char share_flag = PSTATUS_SHARED,
5202         ms_flag = (PSTATUS_SHARED | PSTATUS_MULTISHARED);
5203       if (sharing_procs.size() == 1) {
5204         result = mbImpl->tag_set_data(shp_tag, &this_ent, 1,
5205                                       &sharing_procs[0]);MB_CHK_SET_ERR(result, "Failed to set sharedp tag on shared vertex");
5206         result = mbImpl->tag_set_data(shh_tag, &this_ent, 1,
5207                                       &sharing_handles[0]);MB_CHK_SET_ERR(result, "Failed to set sharedh tag on shared vertex");
5208         result = mbImpl->tag_set_data(pstat_tag, &this_ent, 1, &share_flag);MB_CHK_SET_ERR(result, "Failed to set pstatus tag on shared vertex");
5209         sharedEnts.push_back(this_ent);
5210       }
5211       else {
5212         // Pad lists
5213         //assert(sharing_procs.size() <= MAX_SHARING_PROCS);
5214         if (sharing_procs.size() > MAX_SHARING_PROCS) {
5215           std::cerr << "MAX_SHARING_PROCS exceeded for vertex " << this_ent <<
5216             " on process " << proc_config().proc_rank() <<  std::endl;
5217           std::cerr.flush();
5218           MPI_Abort(proc_config().proc_comm(), 66);
5219         }
5220         sharing_procs.resize(MAX_SHARING_PROCS, -1);
5221         sharing_handles.resize(MAX_SHARING_PROCS, 0);
5222         result = mbImpl->tag_set_data(shps_tag, &this_ent, 1,
5223                                       &sharing_procs[0]);MB_CHK_SET_ERR(result, "Failed to set sharedps tag on shared vertex");
5224         result = mbImpl->tag_set_data(shhs_tag, &this_ent, 1,
5225                                       &sharing_handles[0]);MB_CHK_SET_ERR(result, "Failed to set sharedhs tag on shared vertex");
5226         result = mbImpl->tag_set_data(pstat_tag, &this_ent, 1, &ms_flag);MB_CHK_SET_ERR(result, "Failed to set pstatus tag on shared vertex");
5227         sharedEnts.push_back(this_ent);
5228       }
5229 
5230       // Reset sharing proc(s) tags
5231       sharing_procs.clear();
5232       sharing_handles.clear();
5233     }
5234 
5235 #ifndef NDEBUG
5236     // Shouldn't be any repeated entities in any of the vectors in proc_nvecs
5237     for (std::map<std::vector<int>, std::vector<EntityHandle> >::iterator mit = proc_nvecs.begin();
5238          mit != proc_nvecs.end(); ++mit) {
5239       std::vector<EntityHandle> tmp_vec = (mit->second);
5240       std::sort(tmp_vec.begin(), tmp_vec.end());
5241       std::vector<EntityHandle>::iterator vit = std::unique(tmp_vec.begin(), tmp_vec.end());
5242       assert(vit == tmp_vec.end());
5243     }
5244 #endif
5245 
5246     return MB_SUCCESS;
5247   }
5248 
5249   //! Get processors with which this processor communicates; sets are sorted by processor
get_interface_procs(std::set<unsigned int> & procs_set,bool get_buffs)5250   ErrorCode ParallelComm::get_interface_procs(std::set<unsigned int> &procs_set,
5251                                               bool get_buffs)
5252   {
5253     // Make sure the sharing procs vector is empty
5254     procs_set.clear();
5255 
5256     // Pre-load vector of single-proc tag values
5257     unsigned int i, j;
5258     std::vector<int> iface_proc(interfaceSets.size());
5259     ErrorCode result = mbImpl->tag_get_data(sharedp_tag(), interfaceSets, &iface_proc[0]);MB_CHK_SET_ERR(result, "Failed to get iface_proc for iface sets");
5260 
5261     // Get sharing procs either from single-proc vector or by getting
5262     // multi-proc tag value
5263     int tmp_iface_procs[MAX_SHARING_PROCS];
5264     std::fill(tmp_iface_procs, tmp_iface_procs + MAX_SHARING_PROCS, -1);
5265     Range::iterator rit;
5266     for (rit = interfaceSets.begin(), i = 0; rit != interfaceSets.end(); ++rit, i++) {
5267       if (-1 != iface_proc[i]) {
5268         assert(iface_proc[i] != (int)procConfig.proc_rank());
5269         procs_set.insert((unsigned int) iface_proc[i]);
5270       }
5271       else {
5272         // Get the sharing_procs tag
5273         result = mbImpl->tag_get_data(sharedps_tag(), &(*rit), 1,
5274                                       tmp_iface_procs);MB_CHK_SET_ERR(result, "Failed to get iface_procs for iface set");
5275         for (j = 0; j < MAX_SHARING_PROCS; j++) {
5276           if (-1 != tmp_iface_procs[j] && tmp_iface_procs[j] != (int)procConfig.proc_rank())
5277             procs_set.insert((unsigned int) tmp_iface_procs[j]);
5278           else if (-1 == tmp_iface_procs[j]) {
5279             std::fill(tmp_iface_procs, tmp_iface_procs + j, -1);
5280             break;
5281           }
5282         }
5283       }
5284     }
5285 
5286     if (get_buffs) {
5287       for (std::set<unsigned int>::iterator sit = procs_set.begin(); sit != procs_set.end(); ++sit)
5288         get_buffers(*sit);
5289     }
5290 
5291     return MB_SUCCESS;
5292   }
5293 
get_pstatus(EntityHandle entity,unsigned char & pstatus_val)5294   ErrorCode ParallelComm::get_pstatus(EntityHandle entity,
5295                                       unsigned char &pstatus_val)
5296   {
5297     ErrorCode result = mbImpl->tag_get_data(pstatus_tag(), &entity, 1, &pstatus_val);MB_CHK_SET_ERR(result, "Failed to get pastatus tag data");
5298     return result;
5299   }
5300 
get_pstatus_entities(int dim,unsigned char pstatus_val,Range & pstatus_ents)5301   ErrorCode ParallelComm::get_pstatus_entities(int dim,
5302                                                unsigned char pstatus_val,
5303                                                Range &pstatus_ents)
5304   {
5305     Range ents;
5306     ErrorCode result;
5307 
5308     if (-1 == dim) {
5309       result = mbImpl->get_entities_by_handle(0, ents);MB_CHK_SET_ERR(result, "Failed to get all entities");
5310     }
5311     else {
5312       result = mbImpl->get_entities_by_dimension(0, dim, ents);MB_CHK_SET_ERR(result, "Failed to get entities of dimension " << dim);
5313     }
5314 
5315     std::vector<unsigned char> pstatus(ents.size());
5316     result = mbImpl->tag_get_data(pstatus_tag(), ents, &pstatus[0]);MB_CHK_SET_ERR(result, "Failed to get pastatus tag data");
5317     Range::iterator rit = ents.begin();
5318     int i = 0;
5319     if (pstatus_val) {
5320       for (; rit != ents.end(); i++, ++rit) {
5321         if (pstatus[i]&pstatus_val &&
5322             (-1 == dim || mbImpl->dimension_from_handle(*rit) == dim))
5323           pstatus_ents.insert(*rit);
5324       }
5325     }
5326     else {
5327       for (; rit != ents.end(); i++, ++rit) {
5328         if (!pstatus[i] &&
5329             (-1 == dim || mbImpl->dimension_from_handle(*rit) == dim))
5330           pstatus_ents.insert(*rit);
5331       }
5332     }
5333 
5334     return MB_SUCCESS;
5335   }
5336 
check_global_ids(EntityHandle this_set,const int dimension,const int start_id,const bool largest_dim_only,const bool parallel,const bool owned_only)5337   ErrorCode ParallelComm::check_global_ids(EntityHandle this_set,
5338                                            const int dimension,
5339                                            const int start_id,
5340                                            const bool largest_dim_only,
5341                                            const bool parallel,
5342                                            const bool owned_only)
5343   {
5344     // Global id tag
5345     Tag gid_tag; int def_val = -1;
5346     ErrorCode result = mbImpl->tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER,
5347                                               gid_tag, MB_TAG_DENSE | MB_TAG_CREAT, &def_val);
5348     if (MB_ALREADY_ALLOCATED != result && MB_SUCCESS != result) {
5349       MB_SET_ERR(result, "Failed to create/get gid tag handle");
5350     }
5351 
5352     Range dum_range;
5353     if (MB_ALREADY_ALLOCATED == result) {
5354       void *tag_ptr = &def_val;
5355       ErrorCode tmp_result = mbImpl->get_entities_by_type_and_tag(this_set, MBVERTEX,
5356                                                                   &gid_tag, &tag_ptr, 1,
5357                                                                   dum_range);MB_CHK_SET_ERR(tmp_result, "Failed to get entities by MBVERTEX type and gid tag");
5358     }
5359 
5360     if (MB_ALREADY_ALLOCATED != result || !dum_range.empty()) {
5361       // Just created it, so we need global ids
5362       result = assign_global_ids(this_set, dimension, start_id, largest_dim_only,
5363                                  parallel, owned_only);MB_CHK_SET_ERR(result, "Failed assigning global ids");
5364     }
5365 
5366     return MB_SUCCESS;
5367   }
5368 
is_iface_proc(EntityHandle this_set,int to_proc)5369   bool ParallelComm::is_iface_proc(EntityHandle this_set,
5370                                    int to_proc)
5371   {
5372     int sharing_procs[MAX_SHARING_PROCS];
5373     std::fill(sharing_procs, sharing_procs + MAX_SHARING_PROCS, -1);
5374     ErrorCode result = mbImpl->tag_get_data(sharedp_tag(), &this_set, 1,
5375                                             sharing_procs);
5376     if (MB_SUCCESS == result && to_proc == sharing_procs[0])
5377       return true;
5378 
5379     result = mbImpl->tag_get_data(sharedps_tag(), &this_set, 1,
5380                                   sharing_procs);
5381     if (MB_SUCCESS != result)
5382       return false;
5383 
5384     for (int i = 0; i < MAX_SHARING_PROCS; i++) {
5385       if (to_proc == sharing_procs[i])
5386         return true;
5387       else if (-1 == sharing_procs[i])
5388         return false;
5389     }
5390 
5391     return false;
5392   }
5393 
filter_pstatus(Range & ents,unsigned char pstat,unsigned char op,int to_proc,Range * returned_ents)5394   ErrorCode ParallelComm::filter_pstatus(Range &ents,
5395                                          unsigned char pstat,
5396                                          unsigned char op,
5397                                          int to_proc,
5398                                          Range *returned_ents)
5399   {
5400     Range tmp_ents;
5401 
5402     //assert(!ents.empty());
5403     if (ents.empty()) {
5404       if (returned_ents)
5405         returned_ents->clear();
5406       return MB_SUCCESS;
5407     }
5408 
5409     // Put into tmp_ents any entities which are not owned locally or
5410     // who are already shared with to_proc
5411     std::vector<unsigned char> shared_flags(ents.size()), shared_flags2;
5412     ErrorCode result = mbImpl->tag_get_data(pstatus_tag(), ents,
5413                                             &shared_flags[0]);MB_CHK_SET_ERR(result, "Failed to get pstatus flag");
5414     Range::const_iterator rit, hint = tmp_ents.begin();;
5415     int i;
5416     if (op == PSTATUS_OR) {
5417       for (rit = ents.begin(), i = 0; rit != ents.end(); ++rit, i++) {
5418         if (((shared_flags[i] & ~pstat) ^ shared_flags[i]) & pstat) {
5419           hint = tmp_ents.insert(hint, *rit);
5420           if (-1 != to_proc)
5421             shared_flags2.push_back(shared_flags[i]);
5422         }
5423       }
5424     }
5425     else if (op == PSTATUS_AND) {
5426       for (rit = ents.begin(), i = 0; rit != ents.end(); ++rit, i++) {
5427         if ((shared_flags[i] & pstat) == pstat) {
5428           hint = tmp_ents.insert(hint, *rit);
5429           if (-1 != to_proc)
5430             shared_flags2.push_back(shared_flags[i]);
5431         }
5432       }
5433     }
5434     else if (op == PSTATUS_NOT) {
5435       for (rit = ents.begin(), i = 0; rit != ents.end(); ++rit, i++) {
5436         if (!(shared_flags[i] & pstat)) {
5437           hint = tmp_ents.insert(hint, *rit);
5438           if (-1 != to_proc)
5439             shared_flags2.push_back(shared_flags[i]);
5440         }
5441       }
5442     }
5443     else {
5444       assert(false);
5445       return MB_FAILURE;
5446     }
5447 
5448     if (-1 != to_proc) {
5449       int sharing_procs[MAX_SHARING_PROCS];
5450       std::fill(sharing_procs, sharing_procs + MAX_SHARING_PROCS, -1);
5451       Range tmp_ents2;
5452       hint = tmp_ents2.begin();
5453 
5454       for (rit = tmp_ents.begin(), i = 0; rit != tmp_ents.end(); ++rit, i++) {
5455         // We need to check sharing procs
5456         if (shared_flags2[i] & PSTATUS_MULTISHARED) {
5457           result = mbImpl->tag_get_data(sharedps_tag(), &(*rit), 1,
5458                                         sharing_procs);MB_CHK_SET_ERR(result, "Failed to get sharedps tag");
5459           assert(-1 != sharing_procs[0]);
5460           for (unsigned int j = 0; j < MAX_SHARING_PROCS; j++) {
5461             // If to_proc shares this entity, add it to list
5462             if (sharing_procs[j] == to_proc) {
5463               hint = tmp_ents2.insert(hint, *rit);
5464             }
5465             else if (-1 == sharing_procs[j])
5466               break;
5467 
5468             sharing_procs[j] = -1;
5469           }
5470         }
5471         else if (shared_flags2[i] & PSTATUS_SHARED) {
5472           result = mbImpl->tag_get_data(sharedp_tag(), &(*rit), 1,
5473                                         sharing_procs);MB_CHK_SET_ERR(result, "Failed to get sharedp tag");
5474           assert(-1 != sharing_procs[0]);
5475           if (sharing_procs[0] == to_proc)
5476             hint = tmp_ents2.insert(hint, *rit);
5477           sharing_procs[0] = -1;
5478         }
5479         else
5480           assert("should never get here" && false);
5481       }
5482 
5483       tmp_ents.swap(tmp_ents2);
5484     }
5485 
5486     if (returned_ents)
5487       returned_ents->swap(tmp_ents);
5488     else
5489       ents.swap(tmp_ents);
5490 
5491     return MB_SUCCESS;
5492   }
5493 
exchange_ghost_cells(int ghost_dim,int bridge_dim,int num_layers,int addl_ents,bool store_remote_handles,bool wait_all,EntityHandle * file_set)5494   ErrorCode ParallelComm::exchange_ghost_cells(int ghost_dim, int bridge_dim,
5495                                                int num_layers, int addl_ents,
5496                                                bool store_remote_handles,
5497                                                bool wait_all,
5498                                                EntityHandle *file_set)
5499   {
5500 #ifdef MOAB_HAVE_MPE
5501     if (myDebug->get_verbosity() == 2) {
5502       if (!num_layers)
5503         MPE_Log_event(IFACE_START, procConfig.proc_rank(), "Starting interface exchange.");
5504       else
5505         MPE_Log_event(GHOST_START, procConfig.proc_rank(), "Starting ghost exchange.");
5506     }
5507 #endif
5508 
5509     myDebug->tprintf(1, "Entering exchange_ghost_cells with num_layers = %d\n", num_layers);
5510     if (myDebug->get_verbosity() == 4) {
5511       msgs.clear();
5512       msgs.reserve(MAX_SHARING_PROCS);
5513     }
5514 
5515     // If we're only finding out about existing ents, we have to be storing
5516     // remote handles too
5517     assert(num_layers > 0 || store_remote_handles);
5518 
5519     const bool is_iface = !num_layers;
5520 
5521     // Get the b-dimensional interface(s) with with_proc, where b = bridge_dim
5522 
5523     int success;
5524     ErrorCode result = MB_SUCCESS;
5525     int incoming1 = 0, incoming2 = 0;
5526 
5527     reset_all_buffers();
5528 
5529     // When this function is called, buffProcs should already have any
5530     // communicating procs
5531 
5532     //===========================================
5533     // Post ghost irecv's for ghost entities from all communicating procs
5534     //===========================================
5535 #ifdef MOAB_HAVE_MPE
5536     if (myDebug->get_verbosity() == 2) {
5537       MPE_Log_event(ENTITIES_START, procConfig.proc_rank(), "Starting entity exchange.");
5538     }
5539 #endif
5540 
5541     // Index reqs the same as buffer/sharing procs indices
5542     std::vector<MPI_Request> recv_ent_reqs(3*buffProcs.size(), MPI_REQUEST_NULL),
5543       recv_remoteh_reqs(3*buffProcs.size(), MPI_REQUEST_NULL);
5544     std::vector<unsigned int>::iterator proc_it;
5545     int ind, p;
5546     sendReqs.resize(3*buffProcs.size(), MPI_REQUEST_NULL);
5547     for (ind = 0, proc_it = buffProcs.begin();
5548          proc_it != buffProcs.end(); ++proc_it, ind++) {
5549       incoming1++;
5550       PRINT_DEBUG_IRECV(procConfig.proc_rank(), buffProcs[ind],
5551                         remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
5552                         MB_MESG_ENTS_SIZE, incoming1);
5553       success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
5554                           MPI_UNSIGNED_CHAR, buffProcs[ind],
5555                           MB_MESG_ENTS_SIZE, procConfig.proc_comm(),
5556                           &recv_ent_reqs[3*ind]);
5557       if (success != MPI_SUCCESS) {
5558         MB_SET_ERR(MB_FAILURE, "Failed to post irecv in ghost exchange");
5559       }
5560     }
5561 
5562     //===========================================
5563     // Get entities to be sent to neighbors
5564     //===========================================
5565     Range sent_ents[MAX_SHARING_PROCS], allsent, tmp_range;
5566     TupleList entprocs;
5567     int dum_ack_buff;
5568     result = get_sent_ents(is_iface, bridge_dim, ghost_dim, num_layers,
5569                            addl_ents, sent_ents, allsent, entprocs);MB_CHK_SET_ERR(result, "get_sent_ents failed");
5570 
5571     // augment file set with the entities to be sent
5572     // we might have created new entities if addl_ents>0, edges and/or faces
5573     if (addl_ents> 0 && file_set && !allsent.empty()) {
5574       result = mbImpl->add_entities(*file_set, allsent);
5575       MB_CHK_SET_ERR(result, "Failed to add new sub-entities to set");
5576     }
5577     myDebug->tprintf(1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
5578                      (unsigned long)allsent.size());
5579 
5580     //===========================================
5581     // Pack and send ents from this proc to others
5582     //===========================================
5583     for (p = 0, proc_it = buffProcs.begin();
5584          proc_it != buffProcs.end(); ++proc_it, p++) {
5585       myDebug->tprintf(1, "Sent ents compactness (size) = %f (%lu)\n", sent_ents[p].compactness(),
5586                        (unsigned long)sent_ents[p].size());
5587 
5588       // Reserve space on front for size and for initial buff size
5589       localOwnedBuffs[p]->reset_buffer(sizeof(int));
5590 
5591       // Entities
5592       result = pack_entities(sent_ents[p], localOwnedBuffs[p],
5593                              store_remote_handles, buffProcs[p], is_iface,
5594                              &entprocs, &allsent);MB_CHK_SET_ERR(result, "Packing entities failed");
5595 
5596       if (myDebug->get_verbosity() == 4) {
5597         msgs.resize(msgs.size() + 1);
5598         msgs.back() = new Buffer(*localOwnedBuffs[p]);
5599       }
5600 
5601       // Send the buffer (size stored in front in send_buffer)
5602       result = send_buffer(*proc_it, localOwnedBuffs[p],
5603                            MB_MESG_ENTS_SIZE, sendReqs[3*p],
5604                            recv_ent_reqs[3*p + 2], &dum_ack_buff,
5605                            incoming1,
5606                            MB_MESG_REMOTEH_SIZE,
5607                            (!is_iface && store_remote_handles ?  // this used for ghosting only
5608                             localOwnedBuffs[p] : NULL),
5609                            &recv_remoteh_reqs[3*p], &incoming2);MB_CHK_SET_ERR(result, "Failed to Isend in ghost exchange");
5610     }
5611 
5612     entprocs.reset();
5613 
5614     //===========================================
5615     // Receive/unpack new entities
5616     //===========================================
5617     // Number of incoming messages for ghosts is the number of procs we
5618     // communicate with; for iface, it's the number of those with lower rank
5619     MPI_Status status;
5620     std::vector<std::vector<EntityHandle> > recd_ents(buffProcs.size());
5621     std::vector<std::vector<EntityHandle> > L1hloc(buffProcs.size()), L1hrem(buffProcs.size());
5622     std::vector<std::vector<int> > L1p(buffProcs.size());
5623     std::vector<EntityHandle> L2hloc, L2hrem;
5624     std::vector<unsigned int> L2p;
5625     std::vector<EntityHandle> new_ents;
5626 
5627     while (incoming1) {
5628       // Wait for all recvs of ghost ents before proceeding to sending remote handles,
5629       // b/c some procs may have sent to a 3rd proc ents owned by me;
5630       PRINT_DEBUG_WAITANY(recv_ent_reqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank());
5631 
5632       success = MPI_Waitany(3*buffProcs.size(), &recv_ent_reqs[0], &ind, &status);
5633       if (MPI_SUCCESS != success) {
5634         MB_SET_ERR(MB_FAILURE, "Failed in waitany in ghost exchange");
5635       }
5636 
5637       PRINT_DEBUG_RECD(status);
5638 
5639       // OK, received something; decrement incoming counter
5640       incoming1--;
5641       bool done = false;
5642 
5643       // In case ind is for ack, we need index of one before it
5644       unsigned int base_ind = 3*(ind/3);
5645       result = recv_buffer(MB_MESG_ENTS_SIZE,
5646                            status,
5647                            remoteOwnedBuffs[ind/3],
5648                            recv_ent_reqs[base_ind + 1],
5649                            recv_ent_reqs[base_ind + 2],
5650                            incoming1,
5651                            localOwnedBuffs[ind/3],
5652                            sendReqs[base_ind + 1],
5653                            sendReqs[base_ind + 2],
5654                            done,
5655                            (!is_iface && store_remote_handles ?
5656                             localOwnedBuffs[ind/3] : NULL),
5657                            MB_MESG_REMOTEH_SIZE, // maybe base_ind+1?
5658                            &recv_remoteh_reqs[base_ind+1], &incoming2);MB_CHK_SET_ERR(result, "Failed to receive buffer");
5659 
5660       if (done) {
5661         if (myDebug->get_verbosity() == 4) {
5662           msgs.resize(msgs.size() + 1);
5663           msgs.back() = new Buffer(*remoteOwnedBuffs[ind/3]);
5664         }
5665 
5666         // Message completely received - process buffer that was sent
5667         remoteOwnedBuffs[ind/3]->reset_ptr(sizeof(int));
5668         result = unpack_entities(remoteOwnedBuffs[ind/3]->buff_ptr,
5669                                  store_remote_handles, ind/3, is_iface,
5670                                  L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents);
5671         if (MB_SUCCESS != result) {
5672           std::cout << "Failed to unpack entities. Buffer contents:" << std::endl;
5673           print_buffer(remoteOwnedBuffs[ind/3]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind/3], false);
5674           return result;
5675         }
5676 
5677         if (recv_ent_reqs.size() != 3*buffProcs.size()) {
5678           // Post irecv's for remote handles from new proc; shouldn't be iface,
5679           // since we know about all procs we share with
5680           assert(!is_iface);
5681           recv_remoteh_reqs.resize(3*buffProcs.size(), MPI_REQUEST_NULL);
5682           for (unsigned int i = recv_ent_reqs.size(); i < 3*buffProcs.size(); i += 3) {
5683             localOwnedBuffs[i/3]->reset_buffer();
5684             incoming2++;
5685             PRINT_DEBUG_IRECV(procConfig.proc_rank(), buffProcs[i/3],
5686                               localOwnedBuffs[i/3]->mem_ptr, INITIAL_BUFF_SIZE,
5687                               MB_MESG_REMOTEH_SIZE, incoming2);
5688             success = MPI_Irecv(localOwnedBuffs[i/3]->mem_ptr, INITIAL_BUFF_SIZE,
5689                                 MPI_UNSIGNED_CHAR, buffProcs[i/3],
5690                                 MB_MESG_REMOTEH_SIZE, procConfig.proc_comm(),
5691                                 &recv_remoteh_reqs[i]);
5692             if (success != MPI_SUCCESS) {
5693               MB_SET_ERR(MB_FAILURE, "Failed to post irecv for remote handles in ghost exchange");
5694             }
5695           }
5696           recv_ent_reqs.resize(3*buffProcs.size(), MPI_REQUEST_NULL);
5697           sendReqs.resize(3*buffProcs.size(), MPI_REQUEST_NULL);
5698         }
5699       }
5700     }
5701 
5702     // Add requests for any new addl procs
5703     if (recv_ent_reqs.size() != 3*buffProcs.size()) {
5704       // Shouldn't get here...
5705       MB_SET_ERR(MB_FAILURE, "Requests length doesn't match proc count in ghost exchange");
5706     }
5707 
5708 #ifdef MOAB_HAVE_MPE
5709     if (myDebug->get_verbosity() == 2) {
5710       MPE_Log_event(ENTITIES_END, procConfig.proc_rank(), "Ending entity exchange.");
5711     }
5712 #endif
5713 
5714     if (is_iface) {
5715       // Need to check over entities I sent and make sure I received
5716       // handles for them from all expected procs; if not, need to clean
5717       // them up
5718       result = check_clean_iface(allsent);
5719       if (MB_SUCCESS != result)
5720         std::cout << "Failed check." << std::endl;
5721 
5722       // Now set the shared/interface tag on non-vertex entities on interface
5723       result = tag_iface_entities();MB_CHK_SET_ERR(result, "Failed to tag iface entities");
5724 
5725 #ifndef NDEBUG
5726       result = check_sent_ents(allsent);
5727       if (MB_SUCCESS != result) std::cout << "Failed check." << std::endl;
5728       result = check_all_shared_handles(true);
5729       if (MB_SUCCESS != result) std::cout << "Failed check." << std::endl;
5730 #endif
5731 
5732 #ifdef MOAB_HAVE_MPE
5733       if (myDebug->get_verbosity() == 2) {
5734         MPE_Log_event(IFACE_END, procConfig.proc_rank(), "Ending interface exchange.");
5735       }
5736 #endif
5737 
5738       //===========================================
5739       // Wait if requested
5740       //===========================================
5741       if (wait_all) {
5742         if (myDebug->get_verbosity() == 5) {
5743           success = MPI_Barrier(procConfig.proc_comm());
5744         }
5745         else {
5746           MPI_Status mult_status[3*MAX_SHARING_PROCS];
5747           success = MPI_Waitall(3*buffProcs.size(), &recv_ent_reqs[0], mult_status);
5748           if (MPI_SUCCESS != success) {
5749             MB_SET_ERR(MB_FAILURE, "Failed in waitall in ghost exchange");
5750           }
5751           success = MPI_Waitall(3*buffProcs.size(), &sendReqs[0], mult_status);
5752           if (MPI_SUCCESS != success) {
5753             MB_SET_ERR(MB_FAILURE, "Failed in waitall in ghost exchange");
5754           }
5755           /*success = MPI_Waitall(3*buffProcs.size(), &recv_remoteh_reqs[0], mult_status);
5756           if (MPI_SUCCESS != success) {
5757             MB_SET_ERR(MB_FAILURE, "Failed in waitall in ghost exchange");
5758           }*/
5759         }
5760       }
5761 
5762       myDebug->tprintf(1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size());
5763       myDebug->tprintf(1, "Exiting exchange_ghost_cells\n");
5764 
5765       return MB_SUCCESS;
5766     }
5767 
5768     // we still need to wait on sendReqs, if they are not fulfilled yet
5769     if (wait_all) {
5770       if (myDebug->get_verbosity() == 5) {
5771         success = MPI_Barrier(procConfig.proc_comm());
5772       }
5773       else {
5774         MPI_Status mult_status[3*MAX_SHARING_PROCS];
5775         success = MPI_Waitall(3*buffProcs.size(), &sendReqs[0], mult_status);
5776         if (MPI_SUCCESS != success) {
5777           MB_SET_ERR(MB_FAILURE, "Failed in waitall in ghost exchange");
5778         }
5779       }
5780     }
5781     //===========================================
5782     // Send local handles for new ghosts to owner, then add
5783     // those to ghost list for that owner
5784     //===========================================
5785     for (p = 0, proc_it = buffProcs.begin();
5786          proc_it != buffProcs.end(); ++proc_it, p++) {
5787 
5788       // Reserve space on front for size and for initial buff size
5789       remoteOwnedBuffs[p]->reset_buffer(sizeof(int));
5790 
5791       result = pack_remote_handles(L1hloc[p], L1hrem[p], L1p[p], *proc_it,
5792                                    remoteOwnedBuffs[p]);MB_CHK_SET_ERR(result, "Failed to pack remote handles");
5793       remoteOwnedBuffs[p]->set_stored_size();
5794 
5795       if (myDebug->get_verbosity() == 4) {
5796         msgs.resize(msgs.size() + 1);
5797         msgs.back() = new Buffer(*remoteOwnedBuffs[p]);
5798       }
5799       result = send_buffer(buffProcs[p], remoteOwnedBuffs[p],
5800                            MB_MESG_REMOTEH_SIZE,
5801                            sendReqs[3*p],
5802                            recv_remoteh_reqs[3*p + 2],
5803                            &dum_ack_buff, incoming2);MB_CHK_SET_ERR(result, "Failed to send remote handles");
5804     }
5805 
5806     //===========================================
5807     // Process remote handles of my ghosteds
5808     //===========================================
5809     while (incoming2) {
5810       PRINT_DEBUG_WAITANY(recv_remoteh_reqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank());
5811       success = MPI_Waitany(3*buffProcs.size(), &recv_remoteh_reqs[0], &ind, &status);
5812       if (MPI_SUCCESS != success) {
5813         MB_SET_ERR(MB_FAILURE, "Failed in waitany in ghost exchange");
5814       }
5815 
5816       // OK, received something; decrement incoming counter
5817       incoming2--;
5818 
5819       PRINT_DEBUG_RECD(status);
5820 
5821       bool done = false;
5822       unsigned int base_ind = 3*(ind/3);
5823       result = recv_buffer(MB_MESG_REMOTEH_SIZE, status,
5824                            localOwnedBuffs[ind/3],
5825                            recv_remoteh_reqs[base_ind+1],
5826                            recv_remoteh_reqs[base_ind + 2], incoming2,
5827                            remoteOwnedBuffs[ind/3],
5828                            sendReqs[base_ind+1],
5829                            sendReqs[base_ind + 2],
5830                            done);MB_CHK_SET_ERR(result, "Failed to receive remote handles");
5831       if (done) {
5832         // Incoming remote handles
5833         if (myDebug->get_verbosity() == 4) {
5834           msgs.resize(msgs.size() + 1);
5835           msgs.back() = new Buffer(*localOwnedBuffs[ind/3]);
5836         }
5837         localOwnedBuffs[ind/3]->reset_ptr(sizeof(int));
5838         result = unpack_remote_handles(buffProcs[ind/3],
5839                                        localOwnedBuffs[ind/3]->buff_ptr,
5840                                        L2hloc, L2hrem, L2p);MB_CHK_SET_ERR(result, "Failed to unpack remote handles");
5841       }
5842     }
5843 
5844 #ifdef MOAB_HAVE_MPE
5845     if (myDebug->get_verbosity() == 2) {
5846       MPE_Log_event(RHANDLES_END, procConfig.proc_rank(), "Ending remote handles.");
5847       MPE_Log_event(GHOST_END, procConfig.proc_rank(),
5848                     "Ending ghost exchange (still doing checks).");
5849     }
5850 #endif
5851 
5852     //===========================================
5853     // Wait if requested
5854     //===========================================
5855     if (wait_all) {
5856       if (myDebug->get_verbosity() == 5) {
5857         success = MPI_Barrier(procConfig.proc_comm());
5858       }
5859       else {
5860         MPI_Status mult_status[3*MAX_SHARING_PROCS];
5861         success = MPI_Waitall(3*buffProcs.size(), &recv_remoteh_reqs[0], mult_status);
5862         if (MPI_SUCCESS == success)
5863           success = MPI_Waitall(3*buffProcs.size(), &sendReqs[0], mult_status);
5864       }
5865       if (MPI_SUCCESS != success) {
5866         MB_SET_ERR(MB_FAILURE, "Failed in waitall in ghost exchange");
5867       }
5868     }
5869 
5870 #ifndef NDEBUG
5871     result = check_sent_ents(allsent);MB_CHK_SET_ERR(result, "Failed check on shared entities");
5872     result = check_all_shared_handles(true);MB_CHK_SET_ERR(result, "Failed check on all shared handles");
5873 #endif
5874 
5875     if (file_set && !new_ents.empty()) {
5876       result = mbImpl->add_entities(*file_set, &new_ents[0], new_ents.size());MB_CHK_SET_ERR(result, "Failed to add new entities to set");
5877     }
5878 
5879     myDebug->tprintf(1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size());
5880     myDebug->tprintf(1, "Exiting exchange_ghost_cells\n");
5881 
5882     return MB_SUCCESS;
5883   }
5884 
send_buffer(const unsigned int to_proc,Buffer * send_buff,int mesg_tag,MPI_Request & send_req,MPI_Request & ack_req,int * ack_buff,int & this_incoming,int next_mesg_tag,Buffer * next_recv_buff,MPI_Request * next_recv_req,int * next_incoming)5885   ErrorCode ParallelComm::send_buffer(const unsigned int to_proc,
5886                                       Buffer *send_buff,
5887                                       int mesg_tag,
5888                                       MPI_Request &send_req,
5889                                       MPI_Request &ack_req,
5890                                       int *ack_buff,
5891                                       int &this_incoming,
5892                                       int next_mesg_tag,
5893                                       Buffer *next_recv_buff,
5894                                       MPI_Request *next_recv_req,
5895                                       int *next_incoming)
5896   {
5897     ErrorCode result = MB_SUCCESS;
5898     int success;
5899 
5900     // If small message, post recv for remote handle message
5901     if (send_buff->get_stored_size() <= (int)INITIAL_BUFF_SIZE && next_recv_buff) {
5902       (*next_incoming)++;
5903       PRINT_DEBUG_IRECV(procConfig.proc_rank(), to_proc, next_recv_buff->mem_ptr,
5904                         INITIAL_BUFF_SIZE, next_mesg_tag, *next_incoming);
5905       success = MPI_Irecv(next_recv_buff->mem_ptr, INITIAL_BUFF_SIZE,
5906                           MPI_UNSIGNED_CHAR, to_proc,
5907                           next_mesg_tag, procConfig.proc_comm(),
5908                           next_recv_req);
5909       if (success != MPI_SUCCESS) {
5910         MB_SET_ERR(MB_FAILURE, "Failed to post irecv for next message in ghost exchange");
5911       }
5912     }
5913     // If large, we'll need an ack before sending the rest
5914     else if (send_buff->get_stored_size() > (int)INITIAL_BUFF_SIZE) {
5915       this_incoming++;
5916       PRINT_DEBUG_IRECV(procConfig.proc_rank(), to_proc, (unsigned char*)ack_buff,
5917                         sizeof(int), mesg_tag - 1, this_incoming);
5918       success = MPI_Irecv((void*)ack_buff, sizeof(int),
5919                           MPI_UNSIGNED_CHAR, to_proc,
5920                           mesg_tag - 1, procConfig.proc_comm(),
5921                           &ack_req);
5922       if (success != MPI_SUCCESS) {
5923         MB_SET_ERR(MB_FAILURE, "Failed to post irecv for entity ack in ghost exchange");
5924       }
5925     }
5926 
5927     // Send the buffer
5928     PRINT_DEBUG_ISEND(procConfig.proc_rank(), to_proc, send_buff->mem_ptr, mesg_tag,
5929                       std::min(send_buff->get_stored_size(), (int)INITIAL_BUFF_SIZE));
5930     assert(0 <= send_buff->get_stored_size() &&
5931            send_buff->get_stored_size() <= (int)send_buff->alloc_size);
5932     success = MPI_Isend(send_buff->mem_ptr,
5933                         std::min(send_buff->get_stored_size(),
5934                                  (int)INITIAL_BUFF_SIZE),
5935                         MPI_UNSIGNED_CHAR, to_proc,
5936                         mesg_tag, procConfig.proc_comm(), &send_req);
5937     if (success != MPI_SUCCESS) return MB_FAILURE;
5938 
5939     return result;
5940   }
5941 
recv_buffer(int mesg_tag_expected,const MPI_Status & mpi_status,Buffer * recv_buff,MPI_Request & recv_req,MPI_Request &,int & this_incoming,Buffer * send_buff,MPI_Request & send_req,MPI_Request & sent_ack_req,bool & done,Buffer * next_buff,int next_tag,MPI_Request * next_req,int * next_incoming)5942   ErrorCode ParallelComm::recv_buffer(int mesg_tag_expected,
5943                                       const MPI_Status &mpi_status,
5944                                       Buffer *recv_buff,
5945                                       MPI_Request &recv_req,
5946                                       MPI_Request & /*ack_recvd_req*/,
5947                                       int &this_incoming,
5948                                       Buffer *send_buff,
5949                                       MPI_Request &send_req,
5950                                       MPI_Request &sent_ack_req,
5951                                       bool &done,
5952                                       Buffer *next_buff,
5953                                       int next_tag,
5954                                       MPI_Request *next_req,
5955                                       int *next_incoming)
5956   {
5957     // Process a received message; if there will be more coming,
5958     // post a receive for 2nd part then send an ack message
5959     int from_proc = mpi_status.MPI_SOURCE;
5960     int success;
5961 
5962     // Set the buff_ptr on the recv_buffer; needs to point beyond any
5963     // valid data already in the buffer
5964     recv_buff->reset_ptr(std::min(recv_buff->get_stored_size(),
5965                                   (int)recv_buff->alloc_size));
5966 
5967     if (mpi_status.MPI_TAG == mesg_tag_expected &&
5968         recv_buff->get_stored_size() > (int)INITIAL_BUFF_SIZE) {
5969       // 1st message & large - allocate buffer, post irecv for 2nd message,
5970       // then send ack
5971       recv_buff->reserve(recv_buff->get_stored_size());
5972       assert(recv_buff->alloc_size > INITIAL_BUFF_SIZE);
5973 
5974       // Will expect a 2nd message
5975       this_incoming++;
5976 
5977       PRINT_DEBUG_IRECV(procConfig.proc_rank(), from_proc,
5978                         recv_buff->mem_ptr + INITIAL_BUFF_SIZE,
5979                         recv_buff->get_stored_size() - INITIAL_BUFF_SIZE,
5980                         mesg_tag_expected + 1, this_incoming);
5981       success = MPI_Irecv(recv_buff->mem_ptr + INITIAL_BUFF_SIZE,
5982                           recv_buff->get_stored_size() - INITIAL_BUFF_SIZE,
5983                           MPI_UNSIGNED_CHAR, from_proc,
5984                           mesg_tag_expected + 1, procConfig.proc_comm(),
5985                           &recv_req);
5986       if (success != MPI_SUCCESS) {
5987         MB_SET_ERR(MB_FAILURE, "Failed to post 2nd iRecv in ghost exchange");
5988       }
5989 
5990       // Send ack, doesn't matter what data actually is
5991       PRINT_DEBUG_ISEND(procConfig.proc_rank(), from_proc, recv_buff->mem_ptr,
5992                         mesg_tag_expected - 1, sizeof(int));
5993       success = MPI_Isend(recv_buff->mem_ptr, sizeof(int),
5994                           MPI_UNSIGNED_CHAR, from_proc,
5995                           mesg_tag_expected - 1, procConfig.proc_comm(), &sent_ack_req);
5996       if (success != MPI_SUCCESS) {
5997         MB_SET_ERR(MB_FAILURE, "Failed to send ack in ghost exchange");
5998       }
5999     }
6000     else if (mpi_status.MPI_TAG == mesg_tag_expected - 1) {
6001       // Got an ack back, send the 2nd half of message
6002 
6003       // Should be a large message if we got this
6004       assert(*((size_t*)send_buff->mem_ptr) > INITIAL_BUFF_SIZE);
6005 
6006       // Post irecv for next message, then send 2nd message
6007       if (next_buff) {
6008         // We'll expect a return message
6009         (*next_incoming)++;
6010         PRINT_DEBUG_IRECV(procConfig.proc_rank(), from_proc, next_buff->mem_ptr,
6011                           INITIAL_BUFF_SIZE, next_tag, *next_incoming);
6012 
6013         success = MPI_Irecv(next_buff->mem_ptr,
6014                             INITIAL_BUFF_SIZE,
6015                             MPI_UNSIGNED_CHAR, from_proc,
6016                             next_tag, procConfig.proc_comm(),
6017                             next_req);
6018         if (success != MPI_SUCCESS) {
6019           MB_SET_ERR(MB_FAILURE, "Failed to post next irecv in ghost exchange");
6020         }
6021       }
6022 
6023       // Send 2nd message
6024       PRINT_DEBUG_ISEND(procConfig.proc_rank(), from_proc,
6025                         send_buff->mem_ptr+INITIAL_BUFF_SIZE,
6026                         mesg_tag_expected + 1,
6027                         send_buff->get_stored_size() - INITIAL_BUFF_SIZE);
6028 
6029       assert(send_buff->get_stored_size()-INITIAL_BUFF_SIZE < send_buff->alloc_size &&
6030              0 <= send_buff->get_stored_size());
6031       success = MPI_Isend(send_buff->mem_ptr+INITIAL_BUFF_SIZE,
6032                           send_buff->get_stored_size() - INITIAL_BUFF_SIZE,
6033                           MPI_UNSIGNED_CHAR, from_proc, mesg_tag_expected + 1,
6034                           procConfig.proc_comm(), &send_req);
6035       if (success != MPI_SUCCESS) {
6036         MB_SET_ERR(MB_FAILURE, "Failed to send 2nd message in ghost exchange");
6037       }
6038     }
6039     else if ((mpi_status.MPI_TAG == mesg_tag_expected &&
6040               recv_buff->get_stored_size() <= (int)INITIAL_BUFF_SIZE) ||
6041               mpi_status.MPI_TAG == mesg_tag_expected + 1) {
6042       // Message completely received - signal that we're done
6043       done = true;
6044     }
6045 
6046     return MB_SUCCESS;
6047   }
6048 
6049   struct ProcList {
6050     int procs[MAX_SHARING_PROCS];
6051   };
operator <(const ProcList & a,const ProcList & b)6052   static bool operator<(const ProcList& a, const ProcList& b) {
6053     for (int i = 0; i < MAX_SHARING_PROCS; i++) {
6054       if (a.procs[i] < b.procs[i])
6055         return true;
6056       else if (b.procs[i] < a.procs[i])
6057         return false;
6058       else if (a.procs[i] < 0)
6059         return false;
6060     }
6061     return false;
6062   }
6063 
check_clean_iface(Range & allsent)6064   ErrorCode ParallelComm::check_clean_iface(Range &allsent)
6065   {
6066     // allsent is all entities I think are on interface; go over them, looking
6067     // for zero-valued handles, and fix any I find
6068 
6069     // Keep lists of entities for which teh sharing data changed, grouped
6070     // by set of sharing procs.
6071     typedef std::map< ProcList, Range > procmap_t;
6072     procmap_t old_procs, new_procs;
6073 
6074     ErrorCode result = MB_SUCCESS;
6075     Range::iterator rit;
6076     Range::reverse_iterator rvit;
6077     unsigned char pstatus;
6078     int nump;
6079     ProcList sharedp;
6080     EntityHandle sharedh[MAX_SHARING_PROCS];
6081     for (rvit = allsent.rbegin(); rvit != allsent.rend(); ++rvit) {
6082       result = get_sharing_data(*rvit, sharedp.procs, sharedh, pstatus, nump);MB_CHK_SET_ERR(result, "Failed to get sharing data");
6083       assert("Should be shared with at least one other proc" &&
6084              (nump > 1 || sharedp.procs[0] != (int)procConfig.proc_rank()));
6085       assert(nump == MAX_SHARING_PROCS || sharedp.procs[nump] == -1);
6086 
6087       // Look for first null handle in list
6088       int idx = std::find(sharedh, sharedh + nump, (EntityHandle)0) - sharedh;
6089       if (idx == nump)
6090         continue; // All handles are valid
6091 
6092       ProcList old_list(sharedp);
6093       std::sort(old_list.procs, old_list.procs + nump);
6094       old_procs[old_list].insert(*rvit);
6095 
6096       // Remove null handles and corresponding proc ranks from lists
6097       int new_nump = idx;
6098       bool removed_owner = !idx;
6099       for (++idx; idx < nump; ++idx) {
6100         if (sharedh[idx]) {
6101           sharedh[new_nump] = sharedh[idx];
6102           sharedp.procs[new_nump] = sharedp.procs[idx];
6103           ++new_nump;
6104         }
6105       }
6106       sharedp.procs[new_nump] = -1;
6107 
6108       if (removed_owner && new_nump > 1) {
6109         // The proc that we choose as the entity owner isn't sharing the
6110         // entity (doesn't have a copy of it). We need to pick a different
6111         // owner. Choose the proc with lowest rank.
6112         idx = std::min_element(sharedp.procs, sharedp.procs + new_nump) - sharedp.procs;
6113         std::swap(sharedp.procs[0], sharedp.procs[idx]);
6114         std::swap(sharedh[0], sharedh[idx]);
6115         if (sharedp.procs[0] == (int)proc_config().proc_rank())
6116           pstatus &= ~PSTATUS_NOT_OWNED;
6117       }
6118 
6119       result = set_sharing_data(*rvit, pstatus, nump, new_nump, sharedp.procs, sharedh);MB_CHK_SET_ERR(result, "Failed to set sharing data in check_clean_iface");
6120 
6121       if (new_nump > 1) {
6122         if (new_nump == 2) {
6123           if (sharedp.procs[1] != (int)proc_config().proc_rank()) {
6124             assert(sharedp.procs[0] == (int)proc_config().proc_rank());
6125             sharedp.procs[0] = sharedp.procs[1];
6126           }
6127           sharedp.procs[1] = -1;
6128         }
6129         else {
6130           std::sort(sharedp.procs, sharedp.procs + new_nump);
6131         }
6132         new_procs[sharedp].insert(*rvit);
6133       }
6134     }
6135 
6136     if (old_procs.empty()) {
6137       assert(new_procs.empty());
6138       return MB_SUCCESS;
6139     }
6140 
6141     // Update interface sets
6142     procmap_t::iterator pmit;
6143     //std::vector<unsigned char> pstatus_list;
6144     rit = interface_sets().begin();
6145     while (rit != interface_sets().end()) {
6146       result = get_sharing_data(*rit, sharedp.procs, sharedh, pstatus, nump);MB_CHK_SET_ERR(result, "Failed to get sharing data for interface set");
6147       assert(nump != 2);
6148       std::sort(sharedp.procs, sharedp.procs + nump);
6149       assert(nump == MAX_SHARING_PROCS || sharedp.procs[nump] == -1);
6150 
6151       pmit = old_procs.find(sharedp);
6152       if (pmit != old_procs.end()) {
6153         result = mbImpl->remove_entities(*rit, pmit->second);MB_CHK_SET_ERR(result, "Failed to remove entities from interface set");
6154       }
6155 
6156       pmit = new_procs.find(sharedp);
6157       if (pmit == new_procs.end()) {
6158         int count;
6159         result = mbImpl->get_number_entities_by_handle(*rit, count);MB_CHK_SET_ERR(result, "Failed to get number of entities in interface set");
6160         if (!count) {
6161           result = mbImpl->delete_entities(&*rit, 1);MB_CHK_SET_ERR(result, "Failed to delete entities from interface set");
6162           rit = interface_sets().erase(rit);
6163         }
6164         else {
6165           ++rit;
6166         }
6167       }
6168       else {
6169         result = mbImpl->add_entities(*rit, pmit->second);MB_CHK_SET_ERR(result, "Failed to add entities to interface set");
6170 
6171         // Remove those that we've processed so that we know which ones
6172         // are new.
6173         new_procs.erase(pmit);
6174         ++rit;
6175       }
6176     }
6177 
6178     // Create interface sets for new proc id combinations
6179     std::fill(sharedh, sharedh + MAX_SHARING_PROCS, 0);
6180     for (pmit = new_procs.begin(); pmit != new_procs.end(); ++pmit) {
6181       EntityHandle new_set;
6182       result = mbImpl->create_meshset(MESHSET_SET, new_set);MB_CHK_SET_ERR(result, "Failed to create interface set");
6183       interfaceSets.insert(new_set);
6184 
6185       // Add entities
6186       result = mbImpl->add_entities(new_set, pmit->second);MB_CHK_SET_ERR(result, "Failed to add entities to interface set");
6187       // Tag set with the proc rank(s)
6188       assert(pmit->first.procs[0] >= 0);
6189       pstatus = PSTATUS_SHARED|PSTATUS_INTERFACE;
6190       if (pmit->first.procs[1] == -1) {
6191         int other = pmit->first.procs[0];
6192         assert(other != (int)procConfig.proc_rank());
6193         result = mbImpl->tag_set_data(sharedp_tag(), &new_set, 1, pmit->first.procs);MB_CHK_SET_ERR(result, "Failed to tag interface set with procs");
6194         sharedh[0] = 0;
6195         result = mbImpl->tag_set_data(sharedh_tag(), &new_set, 1, sharedh);MB_CHK_SET_ERR(result, "Failed to tag interface set with procs");
6196         if (other < (int)proc_config().proc_rank())
6197           pstatus |= PSTATUS_NOT_OWNED;
6198       }
6199       else {
6200         result = mbImpl->tag_set_data(sharedps_tag(), &new_set, 1, pmit->first.procs);MB_CHK_SET_ERR(result, "Failed to tag interface set with procs");
6201         result = mbImpl->tag_set_data(sharedhs_tag(), &new_set, 1, sharedh);MB_CHK_SET_ERR(result, "Failed to tag interface set with procs");
6202         pstatus |= PSTATUS_MULTISHARED;
6203         if (pmit->first.procs[0] < (int)proc_config().proc_rank())
6204           pstatus |= PSTATUS_NOT_OWNED;
6205       }
6206 
6207       result = mbImpl->tag_set_data(pstatus_tag(), &new_set, 1, &pstatus);MB_CHK_SET_ERR(result, "Failed to tag interface set with pstatus");
6208 
6209       // Set pstatus on all interface entities in set
6210       result = mbImpl->tag_clear_data(pstatus_tag(), pmit->second, &pstatus);MB_CHK_SET_ERR(result, "Failed to tag interface entities with pstatus");
6211     }
6212 
6213     return MB_SUCCESS;
6214   }
6215 
set_sharing_data(EntityHandle ent,unsigned char pstatus,int old_nump,int new_nump,int * ps,EntityHandle * hs)6216   ErrorCode ParallelComm::set_sharing_data(EntityHandle ent, unsigned char pstatus,
6217                                            int old_nump, int new_nump,
6218                                            int *ps, EntityHandle *hs)
6219   {
6220     // If new nump is less than 3, the entity is no longer mutishared
6221      if (old_nump > 2 && (pstatus & PSTATUS_MULTISHARED) && new_nump < 3) {
6222        // Unset multishared flag
6223        pstatus ^= PSTATUS_MULTISHARED;
6224      }
6225 
6226     // Check for consistency in input data
6227      //DBG
6228    /*  bool con1 = ((new_nump == 2 && pstatus&PSTATUS_SHARED && !(pstatus&PSTATUS_MULTISHARED)) || (new_nump > 2 && pstatus&PSTATUS_SHARED && pstatus&PSTATUS_MULTISHARED));
6229      bool con2 = (!(pstatus&PSTATUS_GHOST) || pstatus&PSTATUS_SHARED);
6230      bool con3 = (new_nump < 3 || (pstatus&PSTATUS_NOT_OWNED && ps[0] != (int)rank()) || (!(pstatus&PSTATUS_NOT_OWNED) && ps[0] == (int)rank()));
6231      std::cout<<"current rank = "<<rank()<<std::endl;
6232      std::cout<<"condition 1::"<<con1<<std::endl;
6233      std::cout<<"condition 2::"<<con2<<std::endl;
6234      std::cout<<"condition 3::"<<con3<<std::endl;*/
6235 
6236      //DBG
6237 
6238     assert(new_nump > 1 &&
6239            ((new_nump == 2 && pstatus&PSTATUS_SHARED && !(pstatus&PSTATUS_MULTISHARED)) || // If <= 2 must not be multishared
6240             (new_nump > 2 && pstatus&PSTATUS_SHARED && pstatus&PSTATUS_MULTISHARED)) && // If > 2 procs, must be multishared
6241            (!(pstatus&PSTATUS_GHOST) || pstatus&PSTATUS_SHARED) && // If ghost, it must also be shared
6242            (new_nump < 3 || (pstatus&PSTATUS_NOT_OWNED && ps[0] != (int)rank()) || // I'm not owner and first proc not me
6243             (!(pstatus&PSTATUS_NOT_OWNED) && ps[0] == (int)rank())) // I'm owner and first proc is me
6244            );
6245 
6246 #ifndef NDEBUG
6247       {
6248         // Check for duplicates in proc list
6249         std::set<unsigned int> dumprocs;
6250         int dp = 0;
6251         for (; dp < old_nump && -1 != ps[dp]; dp++)
6252           dumprocs.insert(ps[dp]);
6253         assert(dp == (int)dumprocs.size());
6254       }
6255 #endif
6256 
6257     ErrorCode result;
6258     // Reset any old data that needs to be
6259     if (old_nump > 2 && new_nump < 3) {
6260       // Need to remove multishared tags
6261       result = mbImpl->tag_delete_data(sharedps_tag(), &ent, 1);MB_CHK_SET_ERR(result, "set_sharing_data:1");
6262       result = mbImpl->tag_delete_data(sharedhs_tag(), &ent, 1);MB_CHK_SET_ERR(result, "set_sharing_data:2");
6263 //    if (new_nump < 2)
6264 //      pstatus = 0x0;
6265 //    else if (ps[0] != (int)proc_config().proc_rank())
6266 //      pstatus |= PSTATUS_NOT_OWNED;
6267     }
6268     else if ((old_nump < 3 && new_nump > 2) || (old_nump > 1 && new_nump == 1)) {
6269       // Reset sharedp and sharedh tags
6270       int tmp_p = -1;
6271       EntityHandle tmp_h = 0;
6272       result = mbImpl->tag_set_data(sharedp_tag(), &ent, 1, &tmp_p);MB_CHK_SET_ERR(result, "set_sharing_data:3");
6273       result = mbImpl->tag_set_data(sharedh_tag(), &ent, 1, &tmp_h);MB_CHK_SET_ERR(result, "set_sharing_data:4");
6274     }
6275 
6276     assert("check for multishared/owner I'm first proc" &&
6277            (!(pstatus & PSTATUS_MULTISHARED) || (pstatus & (PSTATUS_NOT_OWNED|PSTATUS_GHOST)) || (ps[0] == (int)rank())) &&
6278            "interface entities should have > 1 proc" &&
6279            (!(pstatus & PSTATUS_INTERFACE) || new_nump > 1) &&
6280            "ghost entities should have > 1 proc" &&
6281            (!(pstatus & PSTATUS_GHOST) || new_nump > 1)
6282            );
6283 
6284     // Now set new data
6285     if (new_nump > 2) {
6286       result = mbImpl->tag_set_data(sharedps_tag(), &ent, 1, ps);MB_CHK_SET_ERR(result, "set_sharing_data:5");
6287       result = mbImpl->tag_set_data(sharedhs_tag(), &ent, 1, hs);MB_CHK_SET_ERR(result, "set_sharing_data:6");
6288     }
6289     else {
6290       unsigned int j = (ps[0] == (int)procConfig.proc_rank() ? 1 : 0);
6291       assert(-1 != ps[j]);
6292       result = mbImpl->tag_set_data(sharedp_tag(), &ent, 1, ps + j);MB_CHK_SET_ERR(result, "set_sharing_data:7");
6293       result = mbImpl->tag_set_data(sharedh_tag(), &ent, 1, hs + j);MB_CHK_SET_ERR(result, "set_sharing_data:8");
6294     }
6295 
6296     result = mbImpl->tag_set_data(pstatus_tag(), &ent, 1, &pstatus);MB_CHK_SET_ERR(result, "set_sharing_data:9");
6297 
6298     if (old_nump > 1 && new_nump < 2)
6299       sharedEnts.erase(std::find(sharedEnts.begin(), sharedEnts.end(), ent));
6300 
6301     return result;
6302   }
6303 
get_sent_ents(const bool is_iface,const int bridge_dim,const int ghost_dim,const int num_layers,const int addl_ents,Range * sent_ents,Range & allsent,TupleList & entprocs)6304   ErrorCode ParallelComm::get_sent_ents(const bool is_iface,
6305                                         const int bridge_dim, const int ghost_dim,
6306                                         const int num_layers, const int addl_ents,
6307                                         Range *sent_ents, Range &allsent,
6308                                         TupleList &entprocs)
6309   {
6310     ErrorCode result;
6311     unsigned int ind;
6312     std::vector<unsigned int>::iterator proc_it;
6313     Range tmp_range;
6314 
6315     // Done in a separate loop over procs because sometimes later procs
6316     // need to add info to earlier procs' messages
6317     for (ind = 0, proc_it = buffProcs.begin();
6318          proc_it != buffProcs.end(); ++proc_it, ind++) {
6319       if (!is_iface) {
6320         result = get_ghosted_entities(bridge_dim, ghost_dim, buffProcs[ind],
6321                                       num_layers, addl_ents, sent_ents[ind]);MB_CHK_SET_ERR(result, "Failed to get ghost layers");
6322       }
6323       else {
6324         result = get_iface_entities(buffProcs[ind], -1, sent_ents[ind]);MB_CHK_SET_ERR(result, "Failed to get interface layers");
6325       }
6326 
6327       // Filter out entities already shared with destination
6328       tmp_range.clear();
6329       result = filter_pstatus(sent_ents[ind], PSTATUS_SHARED, PSTATUS_AND,
6330                               buffProcs[ind], &tmp_range);MB_CHK_SET_ERR(result, "Failed to filter on owner");
6331       if (!tmp_range.empty())
6332         sent_ents[ind] = subtract(sent_ents[ind], tmp_range);
6333 
6334       allsent.merge(sent_ents[ind]);
6335     }
6336 
6337     //===========================================
6338     // Need to get procs each entity is sent to
6339     //===========================================
6340 
6341     // Get the total # of proc/handle pairs
6342     int npairs = 0;
6343     for (ind = 0; ind < buffProcs.size(); ind++)
6344       npairs += sent_ents[ind].size();
6345 
6346     // Allocate a TupleList of that size
6347     entprocs.initialize(1, 0, 1, 0, npairs);
6348     entprocs.enableWriteAccess();
6349 
6350     // Put the proc/handle pairs in the list
6351     for (ind = 0, proc_it = buffProcs.begin();
6352          proc_it != buffProcs.end(); ++proc_it, ind++) {
6353       for (Range::iterator rit = sent_ents[ind].begin();
6354           rit != sent_ents[ind].end(); ++rit) {
6355         entprocs.vi_wr[entprocs.get_n()] = *proc_it;
6356         entprocs.vul_wr[entprocs.get_n()] = *rit;
6357         entprocs.inc_n();
6358       }
6359     }
6360     // Sort by handle
6361     moab::TupleList::buffer sort_buffer;
6362     sort_buffer.buffer_init(npairs);
6363     entprocs.sort(1, &sort_buffer);
6364 
6365     entprocs.disableWriteAccess();
6366     sort_buffer.reset();
6367 
6368     return MB_SUCCESS;
6369   }
6370 
exchange_ghost_cells(ParallelComm ** pcs,unsigned int num_procs,int ghost_dim,int bridge_dim,int num_layers,int addl_ents,bool store_remote_handles,EntityHandle * file_sets)6371   ErrorCode ParallelComm::exchange_ghost_cells(ParallelComm **pcs,
6372                                                unsigned int num_procs,
6373                                                int ghost_dim, int bridge_dim,
6374                                                int num_layers, int addl_ents,
6375                                                bool store_remote_handles,
6376                                                EntityHandle *file_sets)
6377   {
6378     // Static version of function, exchanging info through buffers rather
6379     // than through messages
6380 
6381     // If we're only finding out about existing ents, we have to be storing
6382     // remote handles too
6383     assert(num_layers > 0 || store_remote_handles);
6384 
6385     const bool is_iface = !num_layers;
6386 
6387     unsigned int ind;
6388     ParallelComm *pc;
6389     ErrorCode result = MB_SUCCESS;
6390 
6391     std::vector<Error*> ehs(num_procs);
6392     for (unsigned int i = 0; i < num_procs; i++) {
6393       result = pcs[i]->get_moab()->query_interface(ehs[i]);
6394       assert (MB_SUCCESS == result);
6395     }
6396 
6397     // When this function is called, buffProcs should already have any
6398     // communicating procs
6399 
6400     //===========================================
6401     // Get entities to be sent to neighbors
6402     //===========================================
6403 
6404     // Done in a separate loop over procs because sometimes later procs
6405     // need to add info to earlier procs' messages
6406     Range sent_ents[MAX_SHARING_PROCS][MAX_SHARING_PROCS],
6407       allsent[MAX_SHARING_PROCS];
6408 
6409     //===========================================
6410     // Get entities to be sent to neighbors
6411     //===========================================
6412     TupleList entprocs[MAX_SHARING_PROCS];
6413     for (unsigned int p = 0; p < num_procs; p++) {
6414       pc = pcs[p];
6415       result = pc->get_sent_ents(is_iface, bridge_dim, ghost_dim, num_layers, addl_ents,
6416                                  sent_ents[p], allsent[p], entprocs[p]);MB_CHK_SET_ERR(result, "p = " << p << ", get_sent_ents failed");
6417 
6418       //===========================================
6419       // Pack entities into buffers
6420       //===========================================
6421       for (ind = 0; ind < pc->buffProcs.size(); ind++) {
6422         // Entities
6423         pc->localOwnedBuffs[ind]->reset_ptr(sizeof(int));
6424         result = pc->pack_entities(sent_ents[p][ind], pc->localOwnedBuffs[ind],
6425                                    store_remote_handles, pc->buffProcs[ind], is_iface,
6426                                    &entprocs[p], &allsent[p]);MB_CHK_SET_ERR(result, "p = " << p << ", packing entities failed");
6427       }
6428 
6429       entprocs[p].reset();
6430     }
6431 
6432     //===========================================
6433     // Receive/unpack new entities
6434     //===========================================
6435     // Number of incoming messages for ghosts is the number of procs we
6436     // communicate with; for iface, it's the number of those with lower rank
6437     std::vector<std::vector<EntityHandle> > L1hloc[MAX_SHARING_PROCS], L1hrem[MAX_SHARING_PROCS];
6438     std::vector<std::vector<int> > L1p[MAX_SHARING_PROCS];
6439     std::vector<EntityHandle> L2hloc[MAX_SHARING_PROCS], L2hrem[MAX_SHARING_PROCS];
6440     std::vector<unsigned int> L2p[MAX_SHARING_PROCS];
6441     std::vector<EntityHandle> new_ents[MAX_SHARING_PROCS];
6442 
6443     for (unsigned int p = 0; p < num_procs; p++) {
6444       L1hloc[p].resize(pcs[p]->buffProcs.size());
6445       L1hrem[p].resize(pcs[p]->buffProcs.size());
6446       L1p[p].resize(pcs[p]->buffProcs.size());
6447     }
6448 
6449     for (unsigned int p = 0; p < num_procs; p++) {
6450       pc = pcs[p];
6451 
6452       for (ind = 0; ind < pc->buffProcs.size(); ind++) {
6453         // Incoming ghost entities; unpack; returns entities received
6454         // both from sending proc and from owning proc (which may be different)
6455 
6456         // Buffer could be empty, which means there isn't any message to
6457         // unpack (due to this comm proc getting added as a result of indirect
6458         // communication); just skip this unpack
6459         if (pc->localOwnedBuffs[ind]->get_stored_size() == 0)
6460           continue;
6461 
6462         unsigned int to_p = pc->buffProcs[ind];
6463         pc->localOwnedBuffs[ind]->reset_ptr(sizeof(int));
6464         result = pcs[to_p]->unpack_entities(pc->localOwnedBuffs[ind]->buff_ptr,
6465                                             store_remote_handles, ind, is_iface,
6466                                             L1hloc[to_p], L1hrem[to_p], L1p[to_p], L2hloc[to_p],
6467                                             L2hrem[to_p], L2p[to_p], new_ents[to_p]);MB_CHK_SET_ERR(result, "p = " << p << ", failed to unpack entities");
6468       }
6469     }
6470 
6471     if (is_iface) {
6472       // Need to check over entities I sent and make sure I received
6473       // handles for them from all expected procs; if not, need to clean
6474       // them up
6475       for (unsigned int p = 0; p < num_procs; p++) {
6476         result = pcs[p]->check_clean_iface(allsent[p]);MB_CHK_SET_ERR(result, "p = " << p << ", failed to check on shared entities");
6477       }
6478 
6479 #ifndef NDEBUG
6480       for (unsigned int p = 0; p < num_procs; p++) {
6481         result = pcs[p]->check_sent_ents(allsent[p]);MB_CHK_SET_ERR(result, "p = " << p << ", failed to check on shared entities");
6482       }
6483       result = check_all_shared_handles(pcs, num_procs);MB_CHK_SET_ERR(result, "Failed to check on all shared handles");
6484 #endif
6485       return MB_SUCCESS;
6486     }
6487 
6488     //===========================================
6489     // Send local handles for new ghosts to owner, then add
6490     // those to ghost list for that owner
6491     //===========================================
6492     std::vector<unsigned int>::iterator proc_it;
6493     for (unsigned int p = 0; p < num_procs; p++) {
6494       pc = pcs[p];
6495 
6496       for (ind = 0, proc_it = pc->buffProcs.begin();
6497            proc_it != pc->buffProcs.end(); ++proc_it, ind++) {
6498         // Skip if iface layer and higher-rank proc
6499         pc->localOwnedBuffs[ind]->reset_ptr(sizeof(int));
6500         result = pc->pack_remote_handles(L1hloc[p][ind], L1hrem[p][ind], L1p[p][ind], *proc_it,
6501                                          pc->localOwnedBuffs[ind]);MB_CHK_SET_ERR(result, "p = " << p << ", failed to pack remote handles");
6502       }
6503     }
6504 
6505     //===========================================
6506     // Process remote handles of my ghosteds
6507     //===========================================
6508     for (unsigned int p = 0; p < num_procs; p++) {
6509       pc = pcs[p];
6510 
6511       for (ind = 0, proc_it = pc->buffProcs.begin();
6512            proc_it != pc->buffProcs.end(); ++proc_it, ind++) {
6513         // Incoming remote handles
6514         unsigned int to_p = pc->buffProcs[ind];
6515         pc->localOwnedBuffs[ind]->reset_ptr(sizeof(int));
6516         result = pcs[to_p]->unpack_remote_handles(p,
6517                                                   pc->localOwnedBuffs[ind]->buff_ptr,
6518                                                   L2hloc[to_p], L2hrem[to_p], L2p[to_p]);MB_CHK_SET_ERR(result, "p = " << p << ", failed to unpack remote handles");
6519       }
6520     }
6521 
6522 #ifndef NDEBUG
6523     for (unsigned int p = 0; p < num_procs; p++) {
6524       result = pcs[p]->check_sent_ents(allsent[p]);MB_CHK_SET_ERR(result, "p = " << p << ", failed to check on shared entities");
6525     }
6526 
6527     result = ParallelComm::check_all_shared_handles(pcs, num_procs);MB_CHK_SET_ERR(result, "Failed to check on all shared handles");
6528 #endif
6529 
6530     if (file_sets) {
6531       for (unsigned int p = 0; p < num_procs; p++) {
6532         if (new_ents[p].empty())
6533           continue;
6534         result = pcs[p]->get_moab()->add_entities(file_sets[p], &new_ents[p][0], new_ents[p].size());MB_CHK_SET_ERR(result, "p = " << p << ", failed to add new entities to set");
6535       }
6536     }
6537 
6538     return MB_SUCCESS;
6539   }
6540 
post_irecv(std::vector<unsigned int> & exchange_procs)6541   ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& exchange_procs)
6542   {
6543     // Set buffers
6544     int n_proc = exchange_procs.size();
6545     for (int i = 0; i < n_proc; i++)
6546       get_buffers(exchange_procs[i]);
6547     reset_all_buffers();
6548 
6549     // Post ghost irecv's for entities from all communicating procs
6550     // Index requests the same as buffer/sharing procs indices
6551     int success;
6552     recvReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
6553     recvRemotehReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
6554     sendReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
6555 
6556     int incoming = 0;
6557     for (int i = 0; i < n_proc; i++) {
6558       int ind = get_buffers(exchange_procs[i]);
6559       incoming++;
6560       PRINT_DEBUG_IRECV(procConfig.proc_rank(), buffProcs[ind],
6561                         remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
6562                         MB_MESG_ENTS_SIZE, incoming);
6563       success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
6564                           MPI_UNSIGNED_CHAR, buffProcs[ind],
6565                           MB_MESG_ENTS_SIZE, procConfig.proc_comm(),
6566                           &recvReqs[2*ind]);
6567       if (success != MPI_SUCCESS) {
6568         MB_SET_ERR(MB_FAILURE, "Failed to post irecv in owned entity exchange");
6569       }
6570     }
6571 
6572     return MB_SUCCESS;
6573   }
6574 
post_irecv(std::vector<unsigned int> & shared_procs,std::set<unsigned int> & recv_procs)6575   ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
6576                                      std::set<unsigned int>& recv_procs)
6577   {
6578     // Set buffers
6579     int num = shared_procs.size();
6580     for (int i = 0; i < num; i++)
6581       get_buffers(shared_procs[i]);
6582     reset_all_buffers();
6583     num = remoteOwnedBuffs.size();
6584     for (int i = 0; i < num; i++)
6585       remoteOwnedBuffs[i]->set_stored_size();
6586     num = localOwnedBuffs.size();
6587     for (int i = 0; i < num; i++)
6588       localOwnedBuffs[i]->set_stored_size();
6589 
6590     // Post ghost irecv's for entities from all communicating procs
6591     // Index requests the same as buffer/sharing procs indices
6592     int success;
6593     recvReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
6594     recvRemotehReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
6595     sendReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
6596 
6597     int incoming = 0;
6598     std::set<unsigned int>::iterator it = recv_procs.begin();
6599     std::set<unsigned int>::iterator eit = recv_procs.end();
6600     for (; it != eit; ++it) {
6601       int ind = get_buffers(*it);
6602       incoming++;
6603       PRINT_DEBUG_IRECV(procConfig.proc_rank(), buffProcs[ind],
6604                         remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
6605                         MB_MESG_ENTS_SIZE, incoming);
6606       success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
6607                           MPI_UNSIGNED_CHAR, buffProcs[ind],
6608                           MB_MESG_ENTS_SIZE, procConfig.proc_comm(),
6609                           &recvReqs[2*ind]);
6610       if (success != MPI_SUCCESS) {
6611         MB_SET_ERR(MB_FAILURE, "Failed to post irecv in owned entity exchange");
6612       }
6613     }
6614 
6615     return MB_SUCCESS;
6616   }
6617 
exchange_owned_meshs(std::vector<unsigned int> & exchange_procs,std::vector<Range * > & exchange_ents,std::vector<MPI_Request> & recv_ent_reqs,std::vector<MPI_Request> & recv_remoteh_reqs,bool store_remote_handles,bool wait_all,bool migrate,int dim)6618   ErrorCode ParallelComm::exchange_owned_meshs(std::vector<unsigned int>& exchange_procs,
6619                                                std::vector<Range*>& exchange_ents,
6620                                                std::vector<MPI_Request>& recv_ent_reqs,
6621                                                std::vector<MPI_Request>& recv_remoteh_reqs,
6622                                                bool store_remote_handles,
6623                                                bool wait_all,
6624                                                bool migrate,
6625                                                int dim)
6626   {
6627     // Filter out entities already shared with destination
6628     // Exchange twice for entities and sets
6629     ErrorCode result;
6630     std::vector<unsigned int> exchange_procs_sets;
6631     std::vector<Range*> exchange_sets;
6632     int n_proc = exchange_procs.size();
6633     for (int i = 0; i < n_proc; i++) {
6634       Range set_range = exchange_ents[i]->subset_by_type(MBENTITYSET);
6635       *exchange_ents[i] = subtract(*exchange_ents[i], set_range);
6636       Range* tmp_range = new Range(set_range);
6637       exchange_sets.push_back(tmp_range);
6638       exchange_procs_sets.push_back(exchange_procs[i]);
6639     }
6640 
6641     if (dim == 2) {
6642       // Exchange entities first
6643       result = exchange_owned_mesh(exchange_procs, exchange_ents,
6644                                    recvReqs, recvRemotehReqs, true,
6645                                    store_remote_handles, wait_all, migrate);MB_CHK_SET_ERR(result, "Failed to exchange owned mesh entities");
6646 
6647       // Exchange sets
6648       result = exchange_owned_mesh(exchange_procs_sets, exchange_sets,
6649                                    recvReqs, recvRemotehReqs, false,
6650                                    store_remote_handles, wait_all, migrate);
6651     }
6652     else {
6653       // Exchange entities first
6654       result = exchange_owned_mesh(exchange_procs, exchange_ents,
6655                                    recv_ent_reqs, recv_remoteh_reqs, false,
6656                                    store_remote_handles, wait_all, migrate);MB_CHK_SET_ERR(result, "Failed to exchange owned mesh entities");
6657 
6658       // Exchange sets
6659       result = exchange_owned_mesh(exchange_procs_sets, exchange_sets,
6660                                    recv_ent_reqs, recv_remoteh_reqs, false,
6661                                    store_remote_handles, wait_all, migrate);MB_CHK_SET_ERR(result, "Failed to exchange owned mesh sets");
6662     }
6663 
6664     for (int i = 0; i < n_proc; i++)
6665       delete exchange_sets[i];
6666 
6667     // Build up the list of shared entities
6668     std::map<std::vector<int>, std::vector<EntityHandle> > proc_nvecs;
6669     int procs[MAX_SHARING_PROCS];
6670     EntityHandle handles[MAX_SHARING_PROCS];
6671     int nprocs;
6672     unsigned char pstat;
6673     for (std::vector<EntityHandle>::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit) {
6674       if (mbImpl->dimension_from_handle(*vit) > 2)
6675         continue;
6676       result = get_sharing_data(*vit, procs, handles, pstat, nprocs);MB_CHK_SET_ERR(result, "Failed to get sharing data in exchange_owned_meshs");
6677       std::sort(procs, procs + nprocs);
6678       std::vector<int> tmp_procs(procs, procs + nprocs);
6679       assert(tmp_procs.size() != 2);
6680       proc_nvecs[tmp_procs].push_back(*vit);
6681     }
6682 
6683     // Create interface sets from shared entities
6684     result = create_interface_sets(proc_nvecs);MB_CHK_SET_ERR(result, "Failed to create interface sets");
6685 
6686     return MB_SUCCESS;
6687   }
6688 
exchange_owned_mesh(std::vector<unsigned int> & exchange_procs,std::vector<Range * > & exchange_ents,std::vector<MPI_Request> & recv_ent_reqs,std::vector<MPI_Request> & recv_remoteh_reqs,const bool recv_posted,bool store_remote_handles,bool wait_all,bool migrate)6689   ErrorCode ParallelComm::exchange_owned_mesh(std::vector<unsigned int>& exchange_procs,
6690                                               std::vector<Range*>& exchange_ents,
6691                                               std::vector<MPI_Request>& recv_ent_reqs,
6692                                               std::vector<MPI_Request>& recv_remoteh_reqs,
6693                                               const bool recv_posted,
6694                                               bool store_remote_handles,
6695                                               bool wait_all,
6696                                               bool migrate)
6697   {
6698 #ifdef MOAB_HAVE_MPE
6699     if (myDebug->get_verbosity() == 2) {
6700       MPE_Log_event(OWNED_START, procConfig.proc_rank(), "Starting owned ents exchange.");
6701     }
6702 #endif
6703 
6704     myDebug->tprintf(1, "Entering exchange_owned_mesh\n");
6705     if (myDebug->get_verbosity() == 4) {
6706       msgs.clear();
6707       msgs.reserve(MAX_SHARING_PROCS);
6708     }
6709     unsigned int i;
6710     int ind, success;
6711     ErrorCode result = MB_SUCCESS;
6712     int incoming1 = 0, incoming2 = 0;
6713 
6714     // Set buffProcs with communicating procs
6715     unsigned int n_proc = exchange_procs.size();
6716     for (i = 0; i < n_proc; i++) {
6717       ind = get_buffers(exchange_procs[i]);
6718       result = add_verts(*exchange_ents[i]);MB_CHK_SET_ERR(result, "Failed to add verts");
6719 
6720       // Filter out entities already shared with destination
6721       Range tmp_range;
6722       result = filter_pstatus(*exchange_ents[i], PSTATUS_SHARED, PSTATUS_AND,
6723                               buffProcs[ind], &tmp_range);MB_CHK_SET_ERR(result, "Failed to filter on owner");
6724       if (!tmp_range.empty()) {
6725         *exchange_ents[i] = subtract(*exchange_ents[i], tmp_range);
6726       }
6727     }
6728 
6729     //===========================================
6730     // Post ghost irecv's for entities from all communicating procs
6731     //===========================================
6732 #ifdef MOAB_HAVE_MPE
6733     if (myDebug->get_verbosity() == 2) {
6734       MPE_Log_event(ENTITIES_START, procConfig.proc_rank(), "Starting entity exchange.");
6735     }
6736 #endif
6737 
6738     // Index reqs the same as buffer/sharing procs indices
6739     if (!recv_posted) {
6740       reset_all_buffers();
6741       recv_ent_reqs.resize(3*buffProcs.size(), MPI_REQUEST_NULL);
6742       recv_remoteh_reqs.resize(3*buffProcs.size(), MPI_REQUEST_NULL);
6743       sendReqs.resize(3*buffProcs.size(), MPI_REQUEST_NULL);
6744 
6745       for (i = 0; i < n_proc; i++) {
6746         ind = get_buffers(exchange_procs[i]);
6747         incoming1++;
6748         PRINT_DEBUG_IRECV(procConfig.proc_rank(), buffProcs[ind],
6749                           remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
6750                           MB_MESG_ENTS_SIZE, incoming1);
6751         success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
6752                             MPI_UNSIGNED_CHAR, buffProcs[ind],
6753                             MB_MESG_ENTS_SIZE, procConfig.proc_comm(),
6754                             &recv_ent_reqs[3*ind]);
6755         if (success != MPI_SUCCESS) {
6756           MB_SET_ERR(MB_FAILURE, "Failed to post irecv in owned entity exchange");
6757         }
6758       }
6759     }
6760     else
6761       incoming1 += n_proc;
6762 
6763     //===========================================
6764     // Get entities to be sent to neighbors
6765     // Need to get procs each entity is sent to
6766     //===========================================
6767     Range allsent, tmp_range;
6768     int dum_ack_buff;
6769     int npairs = 0;
6770     TupleList entprocs;
6771     for (i = 0; i < n_proc; i++) {
6772       int n_ents = exchange_ents[i]->size();
6773       if (n_ents > 0) {
6774         npairs += n_ents; // Get the total # of proc/handle pairs
6775         allsent.merge(*exchange_ents[i]);
6776       }
6777     }
6778 
6779     // Allocate a TupleList of that size
6780     entprocs.initialize(1, 0, 1, 0, npairs);
6781     entprocs.enableWriteAccess();
6782 
6783     // Put the proc/handle pairs in the list
6784     for (i = 0; i < n_proc; i++) {
6785       for (Range::iterator rit = exchange_ents[i]->begin(); rit != exchange_ents[i]->end(); ++rit) {
6786         entprocs.vi_wr[entprocs.get_n()] = exchange_procs[i];
6787         entprocs.vul_wr[entprocs.get_n()] = *rit;
6788         entprocs.inc_n();
6789       }
6790     }
6791 
6792     // Sort by handle
6793     moab::TupleList::buffer sort_buffer;
6794     sort_buffer.buffer_init(npairs);
6795     entprocs.sort(1, &sort_buffer);
6796     sort_buffer.reset();
6797 
6798     myDebug->tprintf(1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
6799                      (unsigned long)allsent.size());
6800 
6801     //===========================================
6802     // Pack and send ents from this proc to others
6803     //===========================================
6804     for (i = 0; i < n_proc; i++) {
6805       ind = get_buffers(exchange_procs[i]);
6806       myDebug->tprintf(1, "Sent ents compactness (size) = %f (%lu)\n", exchange_ents[i]->compactness(),
6807                        (unsigned long)exchange_ents[i]->size());
6808       // Reserve space on front for size and for initial buff size
6809       localOwnedBuffs[ind]->reset_buffer(sizeof(int));
6810       result = pack_buffer(*exchange_ents[i], false, true,
6811                            store_remote_handles, buffProcs[ind],
6812                            localOwnedBuffs[ind], &entprocs, &allsent);
6813 
6814       if (myDebug->get_verbosity() == 4) {
6815         msgs.resize(msgs.size() + 1);
6816         msgs.back() = new Buffer(*localOwnedBuffs[ind]);
6817       }
6818 
6819       // Send the buffer (size stored in front in send_buffer)
6820       result = send_buffer(exchange_procs[i], localOwnedBuffs[ind],
6821                            MB_MESG_ENTS_SIZE, sendReqs[3*ind],
6822                            recv_ent_reqs[3*ind + 2], &dum_ack_buff,
6823                            incoming1,
6824                            MB_MESG_REMOTEH_SIZE,
6825                            (store_remote_handles ?
6826                            localOwnedBuffs[ind] : NULL),
6827                            &recv_remoteh_reqs[3*ind], &incoming2);MB_CHK_SET_ERR(result, "Failed to Isend in ghost exchange");
6828     }
6829 
6830     entprocs.reset();
6831 
6832     //===========================================
6833     // Receive/unpack new entities
6834     //===========================================
6835     // Number of incoming messages is the number of procs we communicate with
6836     MPI_Status status;
6837     std::vector<std::vector<EntityHandle> > recd_ents(buffProcs.size());
6838     std::vector<std::vector<EntityHandle> > L1hloc(buffProcs.size()), L1hrem(buffProcs.size());
6839     std::vector<std::vector<int> > L1p(buffProcs.size());
6840     std::vector<EntityHandle> L2hloc, L2hrem;
6841     std::vector<unsigned int> L2p;
6842     std::vector<EntityHandle> new_ents;
6843 
6844     while (incoming1) {
6845       // Wait for all recvs of ents before proceeding to sending remote handles,
6846       // b/c some procs may have sent to a 3rd proc ents owned by me;
6847       PRINT_DEBUG_WAITANY(recv_ent_reqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank());
6848 
6849       success = MPI_Waitany(3*buffProcs.size(), &recv_ent_reqs[0], &ind, &status);
6850       if (MPI_SUCCESS != success) {
6851         MB_SET_ERR(MB_FAILURE, "Failed in waitany in owned entity exchange");
6852       }
6853 
6854       PRINT_DEBUG_RECD(status);
6855 
6856       // OK, received something; decrement incoming counter
6857       incoming1--;
6858       bool done = false;
6859 
6860       // In case ind is for ack, we need index of one before it
6861       unsigned int base_ind = 3*(ind/3);
6862       result = recv_buffer(MB_MESG_ENTS_SIZE,
6863                            status,
6864                            remoteOwnedBuffs[ind/3],
6865                            recv_ent_reqs[base_ind + 1],
6866                            recv_ent_reqs[base_ind + 2],
6867                            incoming1,
6868                            localOwnedBuffs[ind/3],
6869                            sendReqs[base_ind + 1],
6870                            sendReqs[base_ind + 2],
6871                            done,
6872                            (store_remote_handles ?
6873                             localOwnedBuffs[ind/3] : NULL),
6874                            MB_MESG_REMOTEH_SIZE,
6875                            &recv_remoteh_reqs[base_ind + 1], &incoming2);MB_CHK_SET_ERR(result, "Failed to receive buffer");
6876 
6877       if (done) {
6878         if (myDebug->get_verbosity() == 4) {
6879           msgs.resize(msgs.size() + 1);
6880           msgs.back() = new Buffer(*remoteOwnedBuffs[ind/3]);
6881         }
6882 
6883         // Message completely received - process buffer that was sent
6884         remoteOwnedBuffs[ind/3]->reset_ptr(sizeof(int));
6885         result = unpack_buffer(remoteOwnedBuffs[ind/3]->buff_ptr,
6886                                store_remote_handles, buffProcs[ind/3], ind/3,
6887                                L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p,
6888                                new_ents, true);
6889         if (MB_SUCCESS != result) {
6890           std::cout << "Failed to unpack entities. Buffer contents:" << std::endl;
6891           print_buffer(remoteOwnedBuffs[ind/3]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind/3], false);
6892           return result;
6893         }
6894 
6895         if (recv_ent_reqs.size() != 3*buffProcs.size()) {
6896           // Post irecv's for remote handles from new proc
6897           recv_remoteh_reqs.resize(3*buffProcs.size(), MPI_REQUEST_NULL);
6898           for (i = recv_ent_reqs.size(); i < 3*buffProcs.size(); i += 3) {
6899             localOwnedBuffs[i/3]->reset_buffer();
6900             incoming2++;
6901             PRINT_DEBUG_IRECV(procConfig.proc_rank(), buffProcs[i/3],
6902                               localOwnedBuffs[i/3]->mem_ptr, INITIAL_BUFF_SIZE,
6903                               MB_MESG_REMOTEH_SIZE, incoming2);
6904             success = MPI_Irecv(localOwnedBuffs[i/3]->mem_ptr, INITIAL_BUFF_SIZE,
6905                                 MPI_UNSIGNED_CHAR, buffProcs[i/3],
6906                                 MB_MESG_REMOTEH_SIZE, procConfig.proc_comm(),
6907                                 &recv_remoteh_reqs[i]);
6908             if (success != MPI_SUCCESS) {
6909               MB_SET_ERR(MB_FAILURE, "Failed to post irecv for remote handles in ghost exchange");
6910             }
6911           }
6912           recv_ent_reqs.resize(3*buffProcs.size(), MPI_REQUEST_NULL);
6913           sendReqs.resize(3*buffProcs.size(), MPI_REQUEST_NULL);
6914         }
6915       }
6916     }
6917 
6918     // Assign and remove newly created elements from/to receive processor
6919     result = assign_entities_part(new_ents, procConfig.proc_rank());MB_CHK_SET_ERR(result, "Failed to assign entities to part");
6920     if (migrate) {
6921       result = remove_entities_part(allsent, procConfig.proc_rank());MB_CHK_SET_ERR(result, "Failed to remove entities to part");
6922     }
6923 
6924     // Add requests for any new addl procs
6925     if (recv_ent_reqs.size() != 3*buffProcs.size()) {
6926       // Shouldn't get here...
6927       MB_SET_ERR(MB_FAILURE, "Requests length doesn't match proc count in entity exchange");
6928     }
6929 
6930 #ifdef MOAB_HAVE_MPE
6931     if (myDebug->get_verbosity() == 2) {
6932       MPE_Log_event(ENTITIES_END, procConfig.proc_rank(), "Ending entity exchange.");
6933     }
6934 #endif
6935 
6936     // we still need to wait on sendReqs, if they are not fulfilled yet
6937     if (wait_all) {
6938       if (myDebug->get_verbosity() == 5) {
6939         success = MPI_Barrier(procConfig.proc_comm());
6940       }
6941       else {
6942         MPI_Status mult_status[3*MAX_SHARING_PROCS];
6943         success = MPI_Waitall(3*buffProcs.size(), &sendReqs[0], mult_status);
6944         if (MPI_SUCCESS != success) {
6945           MB_SET_ERR(MB_FAILURE, "Failed in waitall in exchange owned mesh");
6946         }
6947       }
6948     }
6949 
6950     //===========================================
6951     // Send local handles for new entity to owner
6952     //===========================================
6953     for (i = 0; i < n_proc; i++) {
6954       ind = get_buffers(exchange_procs[i]);
6955       // Reserve space on front for size and for initial buff size
6956       remoteOwnedBuffs[ind]->reset_buffer(sizeof(int));
6957 
6958       result = pack_remote_handles(L1hloc[ind], L1hrem[ind], L1p[ind],
6959                                    buffProcs[ind], remoteOwnedBuffs[ind]);MB_CHK_SET_ERR(result, "Failed to pack remote handles");
6960       remoteOwnedBuffs[ind]->set_stored_size();
6961 
6962       if (myDebug->get_verbosity() == 4) {
6963         msgs.resize(msgs.size() + 1);
6964         msgs.back() = new Buffer(*remoteOwnedBuffs[ind]);
6965       }
6966       result = send_buffer(buffProcs[ind], remoteOwnedBuffs[ind],
6967                            MB_MESG_REMOTEH_SIZE,
6968                            sendReqs[3*ind],
6969                            recv_remoteh_reqs[3*ind + 2],
6970                            &dum_ack_buff, incoming2);MB_CHK_SET_ERR(result, "Failed to send remote handles");
6971     }
6972 
6973     //===========================================
6974     // Process remote handles of my ghosteds
6975     //===========================================
6976     while (incoming2) {
6977       PRINT_DEBUG_WAITANY(recv_remoteh_reqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank());
6978       success = MPI_Waitany(3*buffProcs.size(), &recv_remoteh_reqs[0], &ind, &status);
6979       if (MPI_SUCCESS != success) {
6980         MB_SET_ERR(MB_FAILURE, "Failed in waitany in owned entity exchange");
6981       }
6982 
6983       // OK, received something; decrement incoming counter
6984       incoming2--;
6985 
6986       PRINT_DEBUG_RECD(status);
6987 
6988       bool done = false;
6989       unsigned int base_ind = 3*(ind/3);
6990       result = recv_buffer(MB_MESG_REMOTEH_SIZE, status,
6991                            localOwnedBuffs[ind/3],
6992                            recv_remoteh_reqs[base_ind + 1],
6993                            recv_remoteh_reqs[base_ind + 2],
6994                            incoming2,
6995                            remoteOwnedBuffs[ind/3],
6996                            sendReqs[base_ind + 1],
6997                            sendReqs[base_ind + 2],
6998                            done);MB_CHK_SET_ERR(result, "Failed to receive remote handles");
6999 
7000       if (done) {
7001         // Incoming remote handles
7002         if (myDebug->get_verbosity() == 4) {
7003           msgs.resize(msgs.size() + 1);
7004           msgs.back() = new Buffer(*localOwnedBuffs[ind/3]);
7005         }
7006 
7007         localOwnedBuffs[ind/3]->reset_ptr(sizeof(int));
7008         result = unpack_remote_handles(buffProcs[ind/3],
7009                                        localOwnedBuffs[ind/3]->buff_ptr,
7010                                        L2hloc, L2hrem, L2p);MB_CHK_SET_ERR(result, "Failed to unpack remote handles");
7011       }
7012     }
7013 
7014 #ifdef MOAB_HAVE_MPE
7015     if (myDebug->get_verbosity() == 2) {
7016       MPE_Log_event(RHANDLES_END, procConfig.proc_rank(), "Ending remote handles.");
7017       MPE_Log_event(OWNED_END, procConfig.proc_rank(),
7018                     "Ending ghost exchange (still doing checks).");
7019     }
7020 #endif
7021 
7022     //===========================================
7023     // Wait if requested
7024     //===========================================
7025     if (wait_all) {
7026       if (myDebug->get_verbosity() == 5) {
7027         success = MPI_Barrier(procConfig.proc_comm());
7028       }
7029       else {
7030         MPI_Status mult_status[3*MAX_SHARING_PROCS];
7031         success = MPI_Waitall(3*buffProcs.size(), &recv_remoteh_reqs[0], mult_status);
7032         if (MPI_SUCCESS == success)
7033           success = MPI_Waitall(3*buffProcs.size(), &sendReqs[0], mult_status);
7034       }
7035       if (MPI_SUCCESS != success) {
7036         MB_SET_ERR(MB_FAILURE, "Failed in waitall in owned entity exchange");
7037       }
7038     }
7039 
7040 #ifndef NDEBUG
7041     result = check_sent_ents(allsent);MB_CHK_SET_ERR(result, "Failed check on shared entities");
7042 #endif
7043     myDebug->tprintf(1, "Exiting exchange_owned_mesh\n");
7044 
7045     return MB_SUCCESS;
7046   }
7047 
get_iface_entities(int other_proc,int dim,Range & iface_ents)7048   ErrorCode ParallelComm::get_iface_entities(int other_proc,
7049                                              int dim,
7050                                              Range &iface_ents)
7051   {
7052     Range iface_sets;
7053     ErrorCode result = MB_SUCCESS;
7054 
7055     for (Range::iterator rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit) {
7056       if (-1 != other_proc && !is_iface_proc(*rit, other_proc))
7057         continue;
7058 
7059       if (-1 == dim) {
7060         result = mbImpl->get_entities_by_handle(*rit, iface_ents);MB_CHK_SET_ERR(result, "Failed to get entities in iface set");
7061       }
7062       else {
7063         result = mbImpl->get_entities_by_dimension(*rit, dim, iface_ents);MB_CHK_SET_ERR(result, "Failed to get entities in iface set");
7064       }
7065     }
7066 
7067     return MB_SUCCESS;
7068   }
7069 
assign_entities_part(std::vector<EntityHandle> & entities,const int proc)7070   ErrorCode ParallelComm::assign_entities_part(std::vector<EntityHandle> &entities, const int proc)
7071   {
7072     EntityHandle part_set;
7073     ErrorCode result = get_part_handle(proc, part_set);MB_CHK_SET_ERR(result, "Failed to get part handle");
7074 
7075     if (part_set > 0) {
7076       result = mbImpl->add_entities(part_set, &entities[0], entities.size());MB_CHK_SET_ERR(result, "Failed to add entities to part set");
7077     }
7078 
7079     return MB_SUCCESS;
7080   }
7081 
remove_entities_part(Range & entities,const int proc)7082   ErrorCode ParallelComm::remove_entities_part(Range &entities, const int proc)
7083   {
7084     EntityHandle part_set;
7085     ErrorCode result = get_part_handle(proc, part_set);MB_CHK_SET_ERR(result, "Failed to get part handle");
7086 
7087     if (part_set > 0) {
7088       result = mbImpl->remove_entities(part_set, entities);MB_CHK_SET_ERR(result, "Failed to remove entities from part set");
7089     }
7090 
7091     return MB_SUCCESS;
7092   }
7093 
check_sent_ents(Range & allsent)7094   ErrorCode ParallelComm::check_sent_ents(Range &allsent)
7095   {
7096     // Check entities to make sure there are no zero-valued remote handles
7097     // where they shouldn't be
7098     std::vector<unsigned char> pstat(allsent.size());
7099     ErrorCode result = mbImpl->tag_get_data(pstatus_tag(), allsent, &pstat[0]);MB_CHK_SET_ERR(result, "Failed to get pstatus tag data");
7100     std::vector<EntityHandle> handles(allsent.size());
7101     result = mbImpl->tag_get_data(sharedh_tag(), allsent, &handles[0]);MB_CHK_SET_ERR(result, "Failed to get sharedh tag data");
7102     std::vector<int> procs(allsent.size());
7103     result = mbImpl->tag_get_data(sharedp_tag(), allsent, &procs[0]);MB_CHK_SET_ERR(result, "Failed to get sharedp tag data");
7104 
7105     Range bad_entities;
7106 
7107     Range::iterator rit;
7108     unsigned int i;
7109     EntityHandle dum_hs[MAX_SHARING_PROCS];
7110     int dum_ps[MAX_SHARING_PROCS];
7111 
7112     for (rit = allsent.begin(), i = 0; rit != allsent.end(); ++rit, i++) {
7113       if (-1 != procs[i] && 0 == handles[i])
7114         bad_entities.insert(*rit);
7115       else {
7116         // Might be multi-shared...
7117         result = mbImpl->tag_get_data(sharedps_tag(), &(*rit), 1, dum_ps);
7118         if (MB_TAG_NOT_FOUND == result)
7119           continue;
7120         else if (MB_SUCCESS != result)
7121           MB_SET_ERR(result, "Failed to get sharedps tag data");
7122         result = mbImpl->tag_get_data(sharedhs_tag(), &(*rit), 1, dum_hs);MB_CHK_SET_ERR(result, "Failed to get sharedhs tag data");
7123 
7124         // Find first non-set proc
7125         int *ns_proc = std::find(dum_ps, dum_ps + MAX_SHARING_PROCS, -1);
7126         int num_procs = ns_proc - dum_ps;
7127         assert(num_procs <= MAX_SHARING_PROCS);
7128         // Now look for zero handles in active part of dum_hs
7129         EntityHandle *ns_handle = std::find(dum_hs, dum_hs + num_procs, 0);
7130         int num_handles = ns_handle - dum_hs;
7131         assert(num_handles <= num_procs);
7132         if (num_handles != num_procs)
7133           bad_entities.insert(*rit);
7134       }
7135     }
7136 
7137     return MB_SUCCESS;
7138   }
7139 
pack_remote_handles(std::vector<EntityHandle> & L1hloc,std::vector<EntityHandle> & L1hrem,std::vector<int> & L1p,unsigned int,Buffer * buff)7140   ErrorCode ParallelComm::pack_remote_handles(std::vector<EntityHandle> &L1hloc,
7141                                               std::vector<EntityHandle> &L1hrem,
7142                                               std::vector<int> &L1p,
7143                                               unsigned int /*to_proc*/,
7144                                               Buffer *buff)
7145   {
7146     assert(std::find(L1hloc.begin(), L1hloc.end(), (EntityHandle)0) == L1hloc.end());
7147 
7148     // 2 vectors of handles plus ints
7149     buff->check_space(((L1p.size() + 1)*sizeof(int) +
7150                        (L1hloc.size() + 1)*sizeof(EntityHandle) +
7151                        (L1hrem.size() + 1)*sizeof(EntityHandle)));
7152 
7153     // Should be in pairs of handles
7154     PACK_INT(buff->buff_ptr, L1hloc.size());
7155     PACK_INTS(buff->buff_ptr, &L1p[0], L1p.size());
7156     // Pack handles in reverse order, (remote, local), so on destination they
7157     // are ordered (local, remote)
7158     PACK_EH(buff->buff_ptr, &L1hrem[0], L1hrem.size());
7159     PACK_EH(buff->buff_ptr, &L1hloc[0], L1hloc.size());
7160 
7161     buff->set_stored_size();
7162 
7163     return MB_SUCCESS;
7164   }
7165 
unpack_remote_handles(unsigned int from_proc,unsigned char * & buff_ptr,std::vector<EntityHandle> & L2hloc,std::vector<EntityHandle> & L2hrem,std::vector<unsigned int> & L2p)7166   ErrorCode ParallelComm::unpack_remote_handles(unsigned int from_proc,
7167                                                 unsigned char *&buff_ptr,
7168                                                 std::vector<EntityHandle> &L2hloc,
7169                                                 std::vector<EntityHandle> &L2hrem,
7170                                                 std::vector<unsigned int> &L2p)
7171   {
7172     // Incoming remote handles; use to set remote handles
7173     int num_eh;
7174     UNPACK_INT(buff_ptr, num_eh);
7175 
7176     unsigned char *buff_proc = buff_ptr;
7177     buff_ptr += num_eh * sizeof(int);
7178     unsigned char *buff_rem = buff_ptr + num_eh * sizeof(EntityHandle);
7179     ErrorCode result;
7180     EntityHandle hpair[2], new_h;
7181     int proc;
7182     for (int i = 0; i < num_eh; i++) {
7183       UNPACK_INT(buff_proc, proc);
7184       // Handles packed (local, remote), though here local is either on this
7185       // proc or owner proc, depending on value of proc (-1 = here, otherwise owner);
7186       // this is decoded in find_existing_entity
7187       UNPACK_EH(buff_ptr, hpair, 1);
7188       UNPACK_EH(buff_rem, hpair + 1, 1);
7189 
7190       if (-1 != proc) {
7191         result = find_existing_entity(false, proc, hpair[0], 3, NULL, 0,
7192                                       mbImpl->type_from_handle(hpair[1]),
7193                                       L2hloc, L2hrem, L2p, new_h);MB_CHK_SET_ERR(result, "Didn't get existing entity");
7194         if (new_h)
7195           hpair[0] = new_h;
7196         else
7197           hpair[0] = 0;
7198       }
7199       if (!(hpair[0] && hpair[1]))
7200         return MB_FAILURE;
7201       int this_proc = from_proc;
7202       result = update_remote_data(hpair[0], &this_proc, hpair + 1, 1, 0);MB_CHK_SET_ERR(result, "Failed to set remote data range on sent entities in ghost exchange");
7203     }
7204 
7205     return MB_SUCCESS;
7206   }
7207 
get_ghosted_entities(int bridge_dim,int ghost_dim,int to_proc,int num_layers,int addl_ents,Range & ghosted_ents)7208   ErrorCode ParallelComm::get_ghosted_entities(int bridge_dim,
7209                                                int ghost_dim,
7210                                                int to_proc,
7211                                                int num_layers,
7212                                                int addl_ents,
7213                                                Range &ghosted_ents)
7214   {
7215     // Get bridge ents on interface(s)
7216     Range from_ents;
7217     ErrorCode result = MB_SUCCESS;
7218     assert(0 < num_layers);
7219     for (Range::iterator rit = interfaceSets.begin(); rit != interfaceSets.end();
7220          ++rit) {
7221       if (!is_iface_proc(*rit, to_proc))
7222         continue;
7223 
7224       // Get starting "from" entities
7225       if (bridge_dim == -1) {
7226         result = mbImpl->get_entities_by_handle(*rit, from_ents);MB_CHK_SET_ERR(result, "Failed to get bridge ents in the set");
7227       }
7228       else {
7229         result = mbImpl->get_entities_by_dimension(*rit, bridge_dim, from_ents);MB_CHK_SET_ERR(result, "Failed to get bridge ents in the set");
7230       }
7231 
7232       // Need to get layers of bridge-adj entities
7233       if (from_ents.empty())
7234         continue;
7235       result = MeshTopoUtil(mbImpl).get_bridge_adjacencies(from_ents, bridge_dim,
7236                                                            ghost_dim, ghosted_ents,
7237                                                            num_layers);MB_CHK_SET_ERR(result, "Failed to get bridge adjacencies");
7238     }
7239 
7240     result = add_verts(ghosted_ents);MB_CHK_SET_ERR(result, "Failed to add verts");
7241 
7242     if (addl_ents) {
7243       // First get the ents of ghost_dim
7244       Range tmp_ents, tmp_owned, tmp_notowned;
7245       tmp_owned = ghosted_ents.subset_by_dimension(ghost_dim);
7246       if (tmp_owned.empty())
7247         return result;
7248 
7249       tmp_notowned = tmp_owned;
7250 
7251       // Next, filter by pstatus; can only create adj entities for entities I own
7252       result = filter_pstatus(tmp_owned, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &tmp_owned);MB_CHK_SET_ERR(result, "Failed to filter owned entities");
7253 
7254       tmp_notowned -= tmp_owned;
7255 
7256       // Get edges first
7257       if (1 == addl_ents || 3 == addl_ents) {
7258         result = mbImpl->get_adjacencies(tmp_owned, 1, true, tmp_ents, Interface::UNION);MB_CHK_SET_ERR(result, "Failed to get edge adjacencies for owned ghost entities");
7259         result = mbImpl->get_adjacencies(tmp_notowned, 1, false, tmp_ents, Interface::UNION);MB_CHK_SET_ERR(result, "Failed to get edge adjacencies for notowned ghost entities");
7260       }
7261       if (2 == addl_ents || 3 == addl_ents) {
7262         result = mbImpl->get_adjacencies(tmp_owned, 2, true, tmp_ents, Interface::UNION);MB_CHK_SET_ERR(result, "Failed to get face adjacencies for owned ghost entities");
7263         result = mbImpl->get_adjacencies(tmp_notowned, 2, false, tmp_ents, Interface::UNION);MB_CHK_SET_ERR(result, "Failed to get face adjacencies for notowned ghost entities");
7264       }
7265 
7266       ghosted_ents.merge(tmp_ents);
7267     }
7268 
7269     return result;
7270   }
7271 
add_verts(Range & sent_ents)7272   ErrorCode ParallelComm::add_verts(Range &sent_ents)
7273   {
7274     // Get the verts adj to these entities, since we'll have to send those too
7275 
7276     // First check sets
7277     std::pair<Range::const_iterator, Range::const_iterator>
7278       set_range = sent_ents.equal_range(MBENTITYSET);
7279     ErrorCode result = MB_SUCCESS, tmp_result;
7280     for (Range::const_iterator rit = set_range.first; rit != set_range.second; ++rit) {
7281       tmp_result = mbImpl->get_entities_by_type(*rit, MBVERTEX, sent_ents);MB_CHK_SET_ERR(tmp_result, "Failed to get contained verts");
7282     }
7283 
7284     // Now non-sets
7285     Range tmp_ents;
7286     std::copy(sent_ents.begin(), set_range.first, range_inserter(tmp_ents));
7287     result = mbImpl->get_adjacencies(tmp_ents, 0, false, sent_ents,
7288                                      Interface::UNION);MB_CHK_SET_ERR(result, "Failed to get vertices adj to ghosted ents");
7289 
7290     // if polyhedra, need to add all faces from there
7291     Range polyhedra=sent_ents.subset_by_type(MBPOLYHEDRON);
7292     // get all faces adjacent to every polyhedra
7293     result = mbImpl->get_connectivity(polyhedra, sent_ents);MB_CHK_SET_ERR(result, "Failed to get polyhedra faces");
7294     return result;
7295   }
7296 
exchange_tags(const std::vector<Tag> & src_tags,const std::vector<Tag> & dst_tags,const Range & entities_in)7297   ErrorCode ParallelComm::exchange_tags(const std::vector<Tag> &src_tags,
7298                                         const std::vector<Tag> &dst_tags,
7299                                         const Range &entities_in)
7300   {
7301     ErrorCode result;
7302     int success;
7303 
7304     myDebug->tprintf(1, "Entering exchange_tags\n");
7305 
7306     // Get all procs interfacing to this proc
7307     std::set<unsigned int> exch_procs;
7308     result = get_comm_procs(exch_procs);
7309 
7310     // Post ghost irecv's for all interface procs
7311     // Index requests the same as buffer/sharing procs indices
7312     std::vector<MPI_Request> recv_tag_reqs(3*buffProcs.size(), MPI_REQUEST_NULL);
7313     // sent_ack_reqs(buffProcs.size(), MPI_REQUEST_NULL);
7314     std::vector<unsigned int>::iterator sit;
7315     int ind;
7316 
7317     reset_all_buffers();
7318     int incoming = 0;
7319 
7320     for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++) {
7321       incoming++;
7322       PRINT_DEBUG_IRECV(*sit, procConfig.proc_rank(), remoteOwnedBuffs[ind]->mem_ptr,
7323                         INITIAL_BUFF_SIZE, MB_MESG_TAGS_SIZE, incoming);
7324 
7325       success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
7326                           MPI_UNSIGNED_CHAR, *sit,
7327                           MB_MESG_TAGS_SIZE, procConfig.proc_comm(),
7328                           &recv_tag_reqs[3*ind]);
7329       if (success != MPI_SUCCESS) {
7330         MB_SET_ERR(MB_FAILURE, "Failed to post irecv in ghost exchange");
7331       }
7332     }
7333 
7334     // Pack and send tags from this proc to others
7335     // Make sendReqs vector to simplify initialization
7336     sendReqs.resize(3*buffProcs.size(), MPI_REQUEST_NULL);
7337 
7338     // Take all shared entities if incoming list is empty
7339     Range entities;
7340     if (entities_in.empty())
7341       std::copy(sharedEnts.begin(), sharedEnts.end(), range_inserter(entities));
7342     else
7343       entities = entities_in;
7344 
7345     int dum_ack_buff;
7346 
7347     for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++) {
7348       Range tag_ents = entities;
7349 
7350       // Get ents shared by proc *sit
7351       result = filter_pstatus(tag_ents, PSTATUS_SHARED, PSTATUS_AND, *sit);MB_CHK_SET_ERR(result, "Failed pstatus AND check");
7352 
7353       // Remote nonowned entities
7354       if (!tag_ents.empty()) {
7355         result = filter_pstatus(tag_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT);MB_CHK_SET_ERR(result, "Failed pstatus NOT check");
7356       }
7357 
7358       // Pack-send; this also posts receives if store_remote_handles is true
7359       std::vector<Range> tag_ranges;
7360       for (std::vector<Tag>::const_iterator vit = src_tags.begin(); vit != src_tags.end(); ++vit) {
7361         const void* ptr;
7362         int sz;
7363         if (mbImpl->tag_get_default_value(*vit, ptr, sz) != MB_SUCCESS) {
7364           Range tagged_ents;
7365           mbImpl->get_entities_by_type_and_tag(0, MBMAXTYPE, &*vit, 0, 1, tagged_ents);
7366           tag_ranges.push_back(intersect(tag_ents, tagged_ents));
7367         }
7368         else {
7369           tag_ranges.push_back(tag_ents);
7370         }
7371       }
7372 
7373       // Pack the data
7374       // Reserve space on front for size and for initial buff size
7375       localOwnedBuffs[ind]->reset_ptr(sizeof(int));
7376 
7377       result = pack_tags(tag_ents,
7378                          src_tags, dst_tags, tag_ranges,
7379                          localOwnedBuffs[ind], true, *sit);MB_CHK_SET_ERR(result, "Failed to count buffer in pack_send_tag");
7380 
7381       // Now send it
7382       result = send_buffer(*sit, localOwnedBuffs[ind], MB_MESG_TAGS_SIZE, sendReqs[3*ind],
7383                            recv_tag_reqs[3*ind + 2], &dum_ack_buff, incoming);MB_CHK_SET_ERR(result, "Failed to send buffer");
7384     }
7385 
7386     // Receive/unpack tags
7387     while (incoming) {
7388       MPI_Status status;
7389       int index_in_recv_requests;
7390       PRINT_DEBUG_WAITANY(recv_tag_reqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank());
7391       success = MPI_Waitany(3*buffProcs.size(), &recv_tag_reqs[0], &index_in_recv_requests, &status);
7392       if (MPI_SUCCESS != success) {
7393         MB_SET_ERR(MB_FAILURE, "Failed in waitany in tag exchange");
7394       }
7395       // Processor index in the list is divided by 3
7396       ind = index_in_recv_requests / 3;
7397 
7398       PRINT_DEBUG_RECD(status);
7399 
7400       // OK, received something; decrement incoming counter
7401       incoming--;
7402 
7403       bool done = false;
7404       std::vector<EntityHandle> dum_vec;
7405       result = recv_buffer(MB_MESG_TAGS_SIZE,
7406                            status,
7407                            remoteOwnedBuffs[ind],
7408                            recv_tag_reqs[3*ind + 1], // This is for receiving the second message
7409                            recv_tag_reqs[3*ind + 2], // This would be for ack, but it is not used; consider removing it
7410                            incoming,
7411                            localOwnedBuffs[ind],
7412                            sendReqs[3*ind + 1], // Send request for sending the second message
7413                            sendReqs[3*ind + 2], // This is for sending the ack
7414                            done);MB_CHK_SET_ERR(result, "Failed to resize recv buffer");
7415       if (done) {
7416         remoteOwnedBuffs[ind]->reset_ptr(sizeof(int));
7417         result = unpack_tags(remoteOwnedBuffs[ind]->buff_ptr,
7418                              dum_vec, true, buffProcs[ind]);MB_CHK_SET_ERR(result, "Failed to recv-unpack-tag message");
7419       }
7420     }
7421 
7422     // OK, now wait
7423     if (myDebug->get_verbosity() == 5) {
7424       success = MPI_Barrier(procConfig.proc_comm());
7425     }
7426     else {
7427       MPI_Status status[3*MAX_SHARING_PROCS];
7428       success = MPI_Waitall(3*buffProcs.size(), &sendReqs[0], status);
7429     }
7430     if (MPI_SUCCESS != success) {
7431       MB_SET_ERR(MB_FAILURE, "Failure in waitall in tag exchange");
7432     }
7433 
7434     // If source tag is not equal to destination tag, then
7435     // do local copy for owned entities (communicate w/ self)
7436     assert(src_tags.size() == dst_tags.size());
7437     if (src_tags != dst_tags) {
7438       std::vector<unsigned char> data;
7439       Range owned_ents;
7440       if (entities_in.empty())
7441         std::copy(sharedEnts.begin(), sharedEnts.end(), range_inserter(entities));
7442       else
7443         owned_ents = entities_in;
7444       result = filter_pstatus(owned_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT);MB_CHK_SET_ERR(result, "Failure to get subset of owned entities");
7445 
7446       if (!owned_ents.empty()) { // Check this here, otherwise we get
7447         // Unexpected results from get_entities_by_type_and_tag w/ Interface::INTERSECT
7448         for (size_t i = 0; i < src_tags.size(); i++) {
7449           if (src_tags[i] == dst_tags[i])
7450             continue;
7451 
7452           Range tagged_ents(owned_ents);
7453           result = mbImpl->get_entities_by_type_and_tag(0, MBMAXTYPE,
7454                                                         &src_tags[0],
7455                                                         0, 1, tagged_ents,
7456                                                         Interface::INTERSECT);MB_CHK_SET_ERR(result, "get_entities_by_type_and_tag(type == MBMAXTYPE) failed");
7457 
7458           int sz, size2;
7459           result = mbImpl->tag_get_bytes(src_tags[i], sz);MB_CHK_SET_ERR(result, "tag_get_size failed");
7460           result = mbImpl->tag_get_bytes(dst_tags[i], size2);MB_CHK_SET_ERR(result, "tag_get_size failed");
7461           if (sz != size2) {
7462             MB_SET_ERR(MB_FAILURE, "tag sizes don't match");
7463           }
7464 
7465           data.resize(sz * tagged_ents.size());
7466           result = mbImpl->tag_get_data(src_tags[i], tagged_ents, &data[0]);MB_CHK_SET_ERR(result, "tag_get_data failed");
7467           result = mbImpl->tag_set_data(dst_tags[i], tagged_ents, &data[0]);MB_CHK_SET_ERR(result, "tag_set_data failed");
7468         }
7469       }
7470     }
7471 
7472     myDebug->tprintf(1, "Exiting exchange_tags");
7473 
7474     return MB_SUCCESS;
7475   }
7476 
reduce_tags(const std::vector<Tag> & src_tags,const std::vector<Tag> & dst_tags,const MPI_Op mpi_op,const Range & entities_in)7477   ErrorCode ParallelComm::reduce_tags(const std::vector<Tag> &src_tags,
7478                                       const std::vector<Tag> &dst_tags,
7479                                       const MPI_Op mpi_op,
7480                                       const Range &entities_in)
7481   {
7482     ErrorCode result;
7483     int success;
7484 
7485     myDebug->tprintf(1, "Entering reduce_tags\n");
7486 
7487     // Check that restrictions are met: number of source/dst tags...
7488     if (src_tags.size() != dst_tags.size()) {
7489       MB_SET_ERR(MB_FAILURE, "Source and destination tag handles must be specified for reduce_tags");
7490     }
7491 
7492     // ... tag data types
7493     std::vector<Tag>::const_iterator vits, vitd;
7494     int tags_size, tagd_size;
7495     DataType tags_type, tagd_type;
7496     std::vector<unsigned char> vals;
7497     std::vector<int> tags_sizes;
7498     for (vits = src_tags.begin(), vitd = dst_tags.begin(); vits != src_tags.end(); ++vits, ++vitd) {
7499       // Checks on tag characteristics
7500       result = mbImpl->tag_get_data_type(*vits, tags_type);MB_CHK_SET_ERR(result, "Failed to get src tag data type");
7501       if (tags_type != MB_TYPE_INTEGER && tags_type != MB_TYPE_DOUBLE &&
7502           tags_type != MB_TYPE_BIT) {
7503         MB_SET_ERR(MB_FAILURE, "Src/dst tags must have integer, double, or bit data type");
7504       }
7505 
7506       result = mbImpl->tag_get_bytes(*vits, tags_size);MB_CHK_SET_ERR(result, "Failed to get src tag bytes");
7507       vals.resize(tags_size);
7508       result = mbImpl->tag_get_default_value(*vits, &vals[0]);MB_CHK_SET_ERR(result, "Src tag must have default value");
7509 
7510       tags_sizes.push_back(tags_size);
7511 
7512       // OK, those passed; now check whether dest tags, if specified, agree with src tags
7513       if (*vits == *vitd)
7514         continue;
7515 
7516       result = mbImpl->tag_get_bytes(*vitd, tagd_size);MB_CHK_SET_ERR(result, "Coudln't get dst tag bytes");
7517       if (tags_size != tagd_size) {
7518         MB_SET_ERR(MB_FAILURE, "Sizes between src and dst tags don't match");
7519       }
7520       result = mbImpl->tag_get_data_type(*vitd, tagd_type);MB_CHK_SET_ERR(result, "Coudln't get dst tag data type");
7521       if (tags_type != tagd_type) {
7522         MB_SET_ERR(MB_FAILURE, "Src and dst tags must be of same data type");
7523       }
7524     }
7525 
7526     // Get all procs interfacing to this proc
7527     std::set<unsigned int> exch_procs;
7528     result = get_comm_procs(exch_procs);
7529 
7530     // Post ghost irecv's for all interface procs
7531     // Index requests the same as buffer/sharing procs indices
7532     std::vector<MPI_Request> recv_tag_reqs(3*buffProcs.size(), MPI_REQUEST_NULL);
7533 
7534     std::vector<unsigned int>::iterator sit;
7535     int ind;
7536 
7537     reset_all_buffers();
7538     int incoming = 0;
7539 
7540     for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++) {
7541       incoming++;
7542       PRINT_DEBUG_IRECV(*sit, procConfig.proc_rank(), remoteOwnedBuffs[ind]->mem_ptr,
7543                         INITIAL_BUFF_SIZE, MB_MESG_TAGS_SIZE, incoming);
7544 
7545       success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
7546                           MPI_UNSIGNED_CHAR, *sit,
7547                           MB_MESG_TAGS_SIZE, procConfig.proc_comm(),
7548                           &recv_tag_reqs[3*ind]);
7549       if (success != MPI_SUCCESS) {
7550         MB_SET_ERR(MB_FAILURE, "Failed to post irecv in ghost exchange");
7551       }
7552     }
7553 
7554     // Pack and send tags from this proc to others
7555     // Make sendReqs vector to simplify initialization
7556     sendReqs.resize(3*buffProcs.size(), MPI_REQUEST_NULL);
7557 
7558     // Take all shared entities if incoming list is empty
7559     Range entities;
7560     if (entities_in.empty())
7561       std::copy(sharedEnts.begin(), sharedEnts.end(), range_inserter(entities));
7562     else
7563       entities = entities_in;
7564 
7565     // If the tags are different, copy the source to the dest tag locally
7566     std::vector<Tag>::const_iterator vit = src_tags.begin(), vit2 = dst_tags.begin();
7567     std::vector<int>::const_iterator vsizes = tags_sizes.begin();
7568     for (; vit != src_tags.end(); ++vit, ++vit2, ++vsizes) {
7569       if (*vit == *vit2)
7570         continue;
7571       vals.resize(entities.size()*(*vsizes));
7572       result = mbImpl->tag_get_data(*vit, entities, &vals[0]);MB_CHK_SET_ERR(result, "Didn't get data properly");
7573       result = mbImpl->tag_set_data(*vit2, entities, &vals[0]);MB_CHK_SET_ERR(result, "Didn't set data properly");
7574     }
7575 
7576     int dum_ack_buff;
7577 
7578     for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++) {
7579       Range tag_ents = entities;
7580 
7581       // Get ents shared by proc *sit
7582       result = filter_pstatus(tag_ents, PSTATUS_SHARED, PSTATUS_AND, *sit);MB_CHK_SET_ERR(result, "Failed pstatus AND check");
7583 
7584       // Pack-send
7585       std::vector<Range> tag_ranges;
7586       for (vit = src_tags.begin(); vit != src_tags.end(); ++vit) {
7587         const void* ptr;
7588         int sz;
7589         if (mbImpl->tag_get_default_value(*vit, ptr, sz) != MB_SUCCESS) {
7590           Range tagged_ents;
7591           mbImpl->get_entities_by_type_and_tag(0, MBMAXTYPE, &*vit, 0, 1, tagged_ents);
7592           tag_ranges.push_back(intersect(tag_ents, tagged_ents));
7593         }
7594         else
7595           tag_ranges.push_back(tag_ents);
7596       }
7597 
7598       // Pack the data
7599       // Reserve space on front for size and for initial buff size
7600       localOwnedBuffs[ind]->reset_ptr(sizeof(int));
7601 
7602       result = pack_tags(tag_ents,
7603                          src_tags, dst_tags, tag_ranges,
7604                          localOwnedBuffs[ind], true, *sit);MB_CHK_SET_ERR(result, "Failed to count buffer in pack_send_tag");
7605 
7606       // Now send it
7607       result = send_buffer(*sit, localOwnedBuffs[ind], MB_MESG_TAGS_SIZE, sendReqs[3*ind],
7608                            recv_tag_reqs[3*ind + 2], &dum_ack_buff, incoming);MB_CHK_SET_ERR(result, "Failed to send buffer");
7609     }
7610 
7611     // Receive/unpack tags
7612     while (incoming) {
7613       MPI_Status status;
7614       int index_in_recv_requests;
7615       PRINT_DEBUG_WAITANY(recv_tag_reqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank());
7616       success = MPI_Waitany(3*buffProcs.size(), &recv_tag_reqs[0], &index_in_recv_requests, &status);
7617       if (MPI_SUCCESS != success) {
7618         MB_SET_ERR(MB_FAILURE, "Failed in waitany in ghost exchange");
7619       }
7620       ind = index_in_recv_requests / 3;
7621 
7622       PRINT_DEBUG_RECD(status);
7623 
7624       // OK, received something; decrement incoming counter
7625       incoming--;
7626 
7627       bool done = false;
7628       std::vector<EntityHandle> dum_vec;
7629       result = recv_buffer(MB_MESG_TAGS_SIZE, status,
7630                         remoteOwnedBuffs[ind],
7631                         recv_tag_reqs[3*ind + 1], // This is for receiving the second message
7632                         recv_tag_reqs[3*ind + 2], // This would be for ack, but it is not used; consider removing it
7633                         incoming, localOwnedBuffs[ind],
7634                         sendReqs[3*ind + 1], // Send request for sending the second message
7635                         sendReqs[3*ind + 2], // This is for sending the ack
7636                         done);MB_CHK_SET_ERR(result, "Failed to resize recv buffer");
7637       if (done) {
7638         remoteOwnedBuffs[ind]->reset_ptr(sizeof(int));
7639         result = unpack_tags(remoteOwnedBuffs[ind]->buff_ptr,
7640                              dum_vec, true, buffProcs[ind], &mpi_op);MB_CHK_SET_ERR(result, "Failed to recv-unpack-tag message");
7641       }
7642     }
7643 
7644     // OK, now wait
7645     if (myDebug->get_verbosity() == 5) {
7646       success = MPI_Barrier(procConfig.proc_comm());
7647     }
7648     else {
7649       MPI_Status status[3*MAX_SHARING_PROCS];
7650       success = MPI_Waitall(3*buffProcs.size(), &sendReqs[0], status);
7651     }
7652     if (MPI_SUCCESS != success) {
7653       MB_SET_ERR(MB_FAILURE, "Failure in waitall in tag exchange");
7654     }
7655 
7656     myDebug->tprintf(1, "Exiting reduce_tags");
7657 
7658     return MB_SUCCESS;
7659   }
7660 
7661   //! return sharedp tag
sharedp_tag()7662   Tag ParallelComm::sharedp_tag()
7663   {
7664     if (!sharedpTag) {
7665       int def_val = -1;
7666       ErrorCode result = mbImpl->tag_get_handle(PARALLEL_SHARED_PROC_TAG_NAME,
7667                                                 1, MB_TYPE_INTEGER, sharedpTag,
7668                                                 MB_TAG_DENSE | MB_TAG_CREAT, &def_val);
7669       if (MB_SUCCESS != result)
7670         return 0;
7671     }
7672 
7673     return sharedpTag;
7674   }
7675 
7676   //! return sharedps tag
sharedps_tag()7677   Tag ParallelComm::sharedps_tag()
7678   {
7679     if (!sharedpsTag) {
7680       ErrorCode result = mbImpl->tag_get_handle(PARALLEL_SHARED_PROCS_TAG_NAME,
7681                                                 MAX_SHARING_PROCS, MB_TYPE_INTEGER,
7682                                                 sharedpsTag, MB_TAG_SPARSE | MB_TAG_CREAT);
7683       if (MB_SUCCESS != result)
7684         return 0;
7685     }
7686 
7687     return sharedpsTag;
7688   }
7689 
7690   //! return sharedh tag
sharedh_tag()7691   Tag ParallelComm::sharedh_tag()
7692   {
7693     if (!sharedhTag) {
7694       EntityHandle def_val = 0;
7695       ErrorCode result = mbImpl->tag_get_handle(PARALLEL_SHARED_HANDLE_TAG_NAME,
7696                                                 1, MB_TYPE_HANDLE, sharedhTag,
7697                                                 MB_TAG_DENSE | MB_TAG_CREAT, &def_val);
7698       if (MB_SUCCESS != result)
7699         return 0;
7700     }
7701 
7702     return sharedhTag;
7703   }
7704 
7705   //! return sharedhs tag
sharedhs_tag()7706   Tag ParallelComm::sharedhs_tag()
7707   {
7708     if (!sharedhsTag) {
7709       ErrorCode result = mbImpl->tag_get_handle(PARALLEL_SHARED_HANDLES_TAG_NAME,
7710                                                 MAX_SHARING_PROCS, MB_TYPE_HANDLE,
7711                                                 sharedhsTag, MB_TAG_SPARSE | MB_TAG_CREAT);
7712       if (MB_SUCCESS != result)
7713         return 0;
7714     }
7715 
7716     return sharedhsTag;
7717   }
7718 
7719   //! return pstatus tag
pstatus_tag()7720   Tag ParallelComm::pstatus_tag()
7721   {
7722     if (!pstatusTag) {
7723       unsigned char tmp_pstatus = 0;
7724       ErrorCode result = mbImpl->tag_get_handle(PARALLEL_STATUS_TAG_NAME,
7725                                                 1, MB_TYPE_OPAQUE, pstatusTag,
7726                                                 MB_TAG_DENSE | MB_TAG_CREAT,
7727                                                 &tmp_pstatus);
7728       if (MB_SUCCESS != result)
7729         return 0;
7730     }
7731 
7732     return pstatusTag;
7733   }
7734 
7735   //! return partition set tag
partition_tag()7736   Tag ParallelComm::partition_tag()
7737   {
7738     if (!partitionTag) {
7739       int dum_id = -1;
7740       ErrorCode result = mbImpl->tag_get_handle(PARALLEL_PARTITION_TAG_NAME,
7741                                                 1, MB_TYPE_INTEGER, partitionTag,
7742                                                 MB_TAG_SPARSE | MB_TAG_CREAT, &dum_id);
7743       if (MB_SUCCESS != result)
7744         return 0;
7745     }
7746 
7747     return partitionTag;
7748   }
7749 
7750   //! return pcomm tag; passes in impl 'cuz this is a static function
pcomm_tag(Interface * impl,bool create_if_missing)7751   Tag ParallelComm::pcomm_tag(Interface *impl,
7752                               bool create_if_missing)
7753   {
7754     Tag this_tag = 0;
7755     ErrorCode result;
7756     if (create_if_missing) {
7757       result = impl->tag_get_handle(PARALLEL_COMM_TAG_NAME,
7758                                     MAX_SHARING_PROCS*sizeof(ParallelComm*),
7759                                     MB_TYPE_OPAQUE, this_tag, MB_TAG_SPARSE | MB_TAG_CREAT);
7760     }
7761     else {
7762       result = impl->tag_get_handle(PARALLEL_COMM_TAG_NAME,
7763                                     MAX_SHARING_PROCS*sizeof(ParallelComm*),
7764                                     MB_TYPE_OPAQUE, this_tag, MB_TAG_SPARSE);
7765     }
7766 
7767     if (MB_SUCCESS != result)
7768       return 0;
7769 
7770     return this_tag;
7771   }
7772 
7773   //! get the indexed pcomm object from the interface
get_pcomm(Interface * impl,const int index)7774   ParallelComm *ParallelComm::get_pcomm(Interface *impl, const int index)
7775   {
7776     Tag pc_tag = pcomm_tag(impl, false);
7777     if (0 == pc_tag)
7778       return NULL;
7779 
7780     const EntityHandle root = 0;
7781     ParallelComm *pc_array[MAX_SHARING_PROCS];
7782     ErrorCode result = impl->tag_get_data(pc_tag, &root, 1, (void*)pc_array);
7783     if (MB_SUCCESS != result)
7784       return NULL;
7785 
7786     return pc_array[index];
7787   }
7788 
get_all_pcomm(Interface * impl,std::vector<ParallelComm * > & list)7789   ErrorCode ParallelComm::get_all_pcomm(Interface* impl, std::vector<ParallelComm*>& list)
7790   {
7791     Tag pc_tag = pcomm_tag(impl, false);
7792     if (0 == pc_tag)
7793       return MB_TAG_NOT_FOUND;
7794 
7795     const EntityHandle root = 0;
7796     ParallelComm *pc_array[MAX_SHARING_PROCS];
7797     ErrorCode rval = impl->tag_get_data(pc_tag, &root, 1, pc_array);
7798     if (MB_SUCCESS != rval)
7799       return rval;
7800 
7801     for (int i = 0; i < MAX_SHARING_PROCS; i++) {
7802       if (pc_array[i])
7803         list.push_back(pc_array[i]);
7804     }
7805 
7806     return MB_SUCCESS;
7807   }
7808 
7809   //! get the indexed pcomm object from the interface
get_pcomm(Interface * impl,EntityHandle prtn,const MPI_Comm * comm)7810   ParallelComm *ParallelComm::get_pcomm(Interface *impl,
7811                                         EntityHandle prtn,
7812                                         const MPI_Comm* comm)
7813   {
7814     ErrorCode rval;
7815     ParallelComm* result = 0;
7816 
7817     Tag prtn_tag;
7818     rval = impl->tag_get_handle(PARTITIONING_PCOMM_TAG_NAME,
7819                                 1, MB_TYPE_INTEGER, prtn_tag,
7820                                 MB_TAG_SPARSE | MB_TAG_CREAT);
7821     if (MB_SUCCESS != rval)
7822       return 0;
7823 
7824     int pcomm_id;
7825     rval = impl->tag_get_data(prtn_tag, &prtn, 1, &pcomm_id);
7826     if (MB_SUCCESS == rval) {
7827       result = get_pcomm(impl, pcomm_id);
7828     }
7829     else if (MB_TAG_NOT_FOUND == rval && comm) {
7830       result = new ParallelComm(impl, *comm, &pcomm_id);
7831       if (!result)
7832         return 0;
7833       result->set_partitioning(prtn);
7834 
7835       rval = impl->tag_set_data(prtn_tag, &prtn, 1, &pcomm_id);
7836       if (MB_SUCCESS != rval) {
7837         delete result;
7838         result = 0;
7839       }
7840     }
7841 
7842     return result;
7843   }
7844 
set_partitioning(EntityHandle set)7845   ErrorCode ParallelComm::set_partitioning(EntityHandle set)
7846   {
7847     ErrorCode rval;
7848     Tag prtn_tag;
7849     rval = mbImpl->tag_get_handle(PARTITIONING_PCOMM_TAG_NAME,
7850                                   1, MB_TYPE_INTEGER, prtn_tag,
7851                                   MB_TAG_SPARSE | MB_TAG_CREAT);
7852     if (MB_SUCCESS != rval)
7853       return rval;
7854 
7855     // Get my id
7856     ParallelComm* pcomm_arr[MAX_SHARING_PROCS];
7857     Tag pc_tag = pcomm_tag(mbImpl, false);
7858     if (0 == pc_tag)
7859       return MB_FAILURE;
7860     const EntityHandle root = 0;
7861     ErrorCode result = mbImpl->tag_get_data(pc_tag, &root, 1, pcomm_arr);
7862     if (MB_SUCCESS != result)
7863       return MB_FAILURE;
7864     int id = std::find(pcomm_arr, pcomm_arr + MAX_SHARING_PROCS, this) - pcomm_arr;
7865     if (id == MAX_SHARING_PROCS)
7866       return MB_FAILURE;
7867 
7868     EntityHandle old = partitioningSet;
7869     if (old) {
7870       rval = mbImpl->tag_delete_data(prtn_tag, &old, 1);
7871       if (MB_SUCCESS != rval)
7872         return rval;
7873       partitioningSet = 0;
7874     }
7875 
7876     if (!set)
7877       return MB_SUCCESS;
7878 
7879     Range contents;
7880     if (old) {
7881       rval = mbImpl->get_entities_by_handle(old, contents);
7882       if (MB_SUCCESS != rval)
7883         return rval;
7884     }
7885     else {
7886       contents = partition_sets();
7887     }
7888 
7889     rval = mbImpl->add_entities(set, contents);
7890     if (MB_SUCCESS != rval)
7891       return rval;
7892 
7893     // Store pcomm id on new partition set
7894     rval = mbImpl->tag_set_data(prtn_tag, &set, 1, &id);
7895     if (MB_SUCCESS != rval)
7896       return rval;
7897 
7898     partitioningSet = set;
7899     return MB_SUCCESS;
7900   }
7901 
7902   //! return all the entities in parts owned locally
get_part_entities(Range & ents,int dim)7903   ErrorCode ParallelComm::get_part_entities(Range &ents, int dim)
7904   {
7905     ErrorCode result;
7906 
7907     for (Range::iterator rit = partitionSets.begin();
7908          rit != partitionSets.end(); ++rit) {
7909       Range tmp_ents;
7910       if (-1 == dim)
7911         result = mbImpl->get_entities_by_handle(*rit, tmp_ents, true);
7912       else
7913         result = mbImpl->get_entities_by_dimension(*rit, dim, tmp_ents, true);
7914 
7915       if (MB_SUCCESS != result) return result;
7916       ents.merge(tmp_ents);
7917     }
7918 
7919     return MB_SUCCESS;
7920   }
7921 
7922   /** \brief Return the rank of the entity owner
7923    */
get_owner_handle(EntityHandle entity,int & owner,EntityHandle & handle)7924   ErrorCode ParallelComm::get_owner_handle(EntityHandle entity,
7925                                            int &owner,
7926                                            EntityHandle &handle)
7927   {
7928     unsigned char pstat;
7929     int sharing_procs[MAX_SHARING_PROCS];
7930     EntityHandle sharing_handles[MAX_SHARING_PROCS];
7931 
7932     ErrorCode result = mbImpl->tag_get_data(pstatus_tag(), &entity, 1,
7933                                             &pstat);MB_CHK_SET_ERR(result, "Failed to get pstatus tag data");
7934     if (!(pstat & PSTATUS_NOT_OWNED)) {
7935       owner = proc_config().proc_rank();
7936       handle = entity;
7937     }
7938     else if (pstat & PSTATUS_MULTISHARED) {
7939       result = mbImpl->tag_get_data(sharedps_tag(), &entity, 1,
7940                                     sharing_procs);MB_CHK_SET_ERR(result, "Failed to get sharedps tag data");
7941       owner = sharing_procs[0];
7942       result = mbImpl->tag_get_data(sharedhs_tag(), &entity, 1,
7943                                     sharing_handles);MB_CHK_SET_ERR(result, "Failed to get sharedhs tag data");
7944       handle = sharing_handles[0];
7945     }
7946     else if (pstat & PSTATUS_SHARED) {
7947       result = mbImpl->tag_get_data(sharedp_tag(), &entity, 1,
7948                                     sharing_procs);MB_CHK_SET_ERR(result, "Failed to get sharedp tag data");
7949       owner = sharing_procs[0];
7950       result = mbImpl->tag_get_data(sharedh_tag(), &entity, 1,
7951                                     sharing_handles);MB_CHK_SET_ERR(result, "Failed to get sharedh tag data");
7952       handle = sharing_handles[0];
7953     }
7954     else {
7955       owner = -1;
7956       handle = 0;
7957     }
7958 
7959     return MB_SUCCESS;
7960   }
7961 
get_global_part_count(int & count_out) const7962   ErrorCode ParallelComm::get_global_part_count(int& count_out) const
7963   {
7964     count_out = globalPartCount;
7965     return count_out < 0 ? MB_FAILURE : MB_SUCCESS;
7966   }
7967 
get_part_owner(int part_id,int & owner) const7968   ErrorCode ParallelComm::get_part_owner(int part_id, int& owner) const
7969   {
7970     // FIXME: assumes only 1 local part
7971     owner = part_id;
7972     return MB_SUCCESS;
7973   }
7974 
get_part_id(EntityHandle,int & id_out) const7975   ErrorCode ParallelComm::get_part_id(EntityHandle /*part*/, int& id_out) const
7976   {
7977     // FIXME: assumes only 1 local part
7978     id_out = proc_config().proc_rank();
7979     return MB_SUCCESS;
7980   }
7981 
get_part_handle(int id,EntityHandle & handle_out) const7982   ErrorCode ParallelComm::get_part_handle(int id, EntityHandle& handle_out) const
7983   {
7984     // FIXME: assumes only 1 local part
7985     if ((unsigned)id != proc_config().proc_rank())
7986       return MB_ENTITY_NOT_FOUND;
7987     handle_out = partition_sets().front();
7988     return MB_SUCCESS;
7989   }
7990 
create_part(EntityHandle & set_out)7991   ErrorCode ParallelComm::create_part(EntityHandle& set_out)
7992   {
7993     // Mark as invalid so we know that it needs to be updated
7994     globalPartCount = -1;
7995 
7996     // Create set representing part
7997     ErrorCode rval = mbImpl->create_meshset(MESHSET_SET, set_out);
7998     if (MB_SUCCESS != rval)
7999       return rval;
8000 
8001     // Set tag on set
8002     int val = proc_config().proc_rank();
8003     rval = mbImpl->tag_set_data(part_tag(), &set_out, 1, &val);
8004 
8005     if (MB_SUCCESS != rval) {
8006       mbImpl->delete_entities(&set_out, 1);
8007       return rval;
8008     }
8009 
8010     if (get_partitioning()) {
8011       rval = mbImpl->add_entities(get_partitioning(), &set_out, 1);
8012       if (MB_SUCCESS != rval) {
8013         mbImpl->delete_entities(&set_out, 1);
8014         return rval;
8015       }
8016     }
8017 
8018     moab::Range& pSets = this->partition_sets();
8019     if (pSets.index(set_out) < 0) {
8020       pSets.insert(set_out);
8021     }
8022 
8023     return MB_SUCCESS;
8024   }
8025 
destroy_part(EntityHandle part_id)8026   ErrorCode ParallelComm::destroy_part(EntityHandle part_id)
8027   {
8028     // Mark as invalid so we know that it needs to be updated
8029     globalPartCount = -1;
8030 
8031     ErrorCode rval;
8032     if (get_partitioning()) {
8033       rval = mbImpl->remove_entities(get_partitioning(), &part_id, 1);
8034       if (MB_SUCCESS != rval)
8035         return rval;
8036     }
8037 
8038     moab::Range& pSets = this->partition_sets();
8039     if (pSets.index(part_id) >= 0) {
8040       pSets.erase(part_id);
8041     }
8042     return mbImpl->delete_entities(&part_id, 1);
8043   }
8044 
collective_sync_partition()8045   ErrorCode ParallelComm::collective_sync_partition()
8046   {
8047     int count = partition_sets().size();
8048     globalPartCount = 0;
8049     int err = MPI_Allreduce(&count, &globalPartCount, 1, MPI_INT, MPI_SUM,
8050                             proc_config().proc_comm());
8051     return err ? MB_FAILURE : MB_SUCCESS;
8052   }
8053 
get_part_neighbor_ids(EntityHandle part,int neighbors_out[MAX_SHARING_PROCS],int & num_neighbors_out)8054   ErrorCode ParallelComm::get_part_neighbor_ids(EntityHandle part,
8055                                                 int neighbors_out[MAX_SHARING_PROCS],
8056                                                 int& num_neighbors_out)
8057   {
8058     ErrorCode rval;
8059     Range iface;
8060     rval = get_interface_sets(part, iface);
8061     if (MB_SUCCESS != rval)
8062       return rval;
8063 
8064     num_neighbors_out = 0;
8065     int n, j = 0;
8066     int tmp[MAX_SHARING_PROCS] = {0}, curr[MAX_SHARING_PROCS] = {0};
8067     int *parts[2] = { neighbors_out, tmp };
8068     for (Range::iterator i = iface.begin(); i != iface.end(); ++i) {
8069       unsigned char pstat;
8070       rval = get_sharing_data(*i, curr, NULL, pstat, n);
8071       if (MB_SUCCESS != rval)
8072         return rval;
8073       std::sort(curr, curr + n);
8074       assert(num_neighbors_out < MAX_SHARING_PROCS);
8075       int* k = std::set_union(parts[j], parts[j] + num_neighbors_out,
8076                               curr, curr + n, parts[1 - j]);
8077       j = 1 - j;
8078       num_neighbors_out = k - parts[j];
8079     }
8080     if (parts[j] != neighbors_out)
8081       std::copy(parts[j], parts[j] + num_neighbors_out, neighbors_out);
8082 
8083     // Remove input part from list
8084     int id;
8085     rval = get_part_id(part, id);
8086     if (MB_SUCCESS == rval)
8087       num_neighbors_out = std::remove(neighbors_out, neighbors_out + num_neighbors_out, id) - neighbors_out;
8088     return rval;
8089   }
8090 
get_interface_sets(EntityHandle,Range & iface_sets_out,int * adj_part_id)8091   ErrorCode ParallelComm::get_interface_sets(EntityHandle,
8092                                              Range& iface_sets_out,
8093                                              int* adj_part_id)
8094   {
8095     // FIXME : assumes one part per processor.
8096     // Need to store part iface sets as children to implement
8097     // this correctly.
8098     iface_sets_out = interface_sets();
8099 
8100     if (adj_part_id) {
8101       int part_ids[MAX_SHARING_PROCS], num_parts;
8102       Range::iterator i = iface_sets_out.begin();
8103       while (i != iface_sets_out.end()) {
8104         unsigned char pstat;
8105         ErrorCode rval = get_sharing_data(*i, part_ids, NULL, pstat, num_parts);
8106         if (MB_SUCCESS != rval)
8107           return rval;
8108 
8109         if (std::find(part_ids, part_ids + num_parts, *adj_part_id) - part_ids != num_parts)
8110           ++i;
8111         else
8112           i = iface_sets_out.erase(i);
8113       }
8114     }
8115 
8116     return MB_SUCCESS;
8117   }
8118 
get_owning_part(EntityHandle handle,int & owning_part_id,EntityHandle * remote_handle)8119   ErrorCode ParallelComm::get_owning_part(EntityHandle handle,
8120                                           int& owning_part_id,
8121                                           EntityHandle* remote_handle)
8122   {
8123     // FIXME : assumes one part per proc, and therefore part_id == rank
8124 
8125     // If entity is not shared, then we're the owner.
8126     unsigned char pstat;
8127     ErrorCode result = mbImpl->tag_get_data(pstatus_tag(), &handle, 1,
8128                                             &pstat);MB_CHK_SET_ERR(result, "Failed to get pstatus tag data");
8129     if (!(pstat & PSTATUS_NOT_OWNED)) {
8130       owning_part_id = proc_config().proc_rank();
8131       if (remote_handle)
8132         *remote_handle = handle;
8133       return MB_SUCCESS;
8134     }
8135 
8136     // If entity is shared with one other proc, then
8137     // sharedp_tag will contain a positive value.
8138     result = mbImpl->tag_get_data(sharedp_tag(), &handle, 1,
8139                                   &owning_part_id);MB_CHK_SET_ERR(result, "Failed to get sharedp tag data");
8140     if (owning_part_id != -1) {
8141       // Done?
8142       if (!remote_handle)
8143         return MB_SUCCESS;
8144 
8145       // Get handles on remote processors (and this one)
8146       return mbImpl->tag_get_data(sharedh_tag(), &handle, 1, remote_handle);
8147     }
8148 
8149     // If here, then the entity is shared with at least two other processors.
8150     // Get the list from the sharedps_tag
8151     const void* part_id_list = 0;
8152     result = mbImpl->tag_get_by_ptr(sharedps_tag(), &handle, 1, &part_id_list);
8153     if (MB_SUCCESS != result)
8154       return result;
8155     owning_part_id = ((const int*)part_id_list)[0];
8156 
8157     // Done?
8158     if (!remote_handle)
8159       return MB_SUCCESS;
8160 
8161     // Get remote handles
8162     const void* handle_list = 0;
8163     result = mbImpl->tag_get_by_ptr(sharedhs_tag(), &handle, 1, &handle_list);
8164     if (MB_SUCCESS != result)
8165       return result;
8166 
8167     *remote_handle = ((const EntityHandle*)handle_list)[0];
8168     return MB_SUCCESS;
8169   }
8170 
get_sharing_parts(EntityHandle entity,int part_ids_out[MAX_SHARING_PROCS],int & num_part_ids_out,EntityHandle remote_handles[MAX_SHARING_PROCS])8171   ErrorCode ParallelComm::get_sharing_parts(EntityHandle entity,
8172                                             int part_ids_out[MAX_SHARING_PROCS],
8173                                             int& num_part_ids_out,
8174                                             EntityHandle remote_handles[MAX_SHARING_PROCS])
8175   {
8176     // FIXME : assumes one part per proc, and therefore part_id == rank
8177 
8178     // If entity is not shared, then we're the owner.
8179     unsigned char pstat;
8180     ErrorCode result = mbImpl->tag_get_data(pstatus_tag(), &entity, 1,
8181                                             &pstat);MB_CHK_SET_ERR(result, "Failed to get pstatus tag data");
8182     if (!(pstat & PSTATUS_SHARED)) {
8183       part_ids_out[0] = proc_config().proc_rank();
8184       if (remote_handles)
8185         remote_handles[0] = entity;
8186       num_part_ids_out = 1;
8187       return MB_SUCCESS;
8188     }
8189 
8190     // If entity is shared with one other proc, then
8191     // sharedp_tag will contain a positive value.
8192     result = mbImpl->tag_get_data(sharedp_tag(), &entity, 1,
8193                                   part_ids_out);MB_CHK_SET_ERR(result, "Failed to get sharedp tag data");
8194     if (part_ids_out[0] != -1) {
8195       num_part_ids_out = 2;
8196       part_ids_out[1] = proc_config().proc_rank();
8197 
8198       // Done?
8199       if (!remote_handles)
8200         return MB_SUCCESS;
8201 
8202       // Get handles on remote processors (and this one)
8203       remote_handles[1] = entity;
8204       return mbImpl->tag_get_data(sharedh_tag(), &entity, 1, remote_handles);
8205     }
8206 
8207     // If here, then the entity is shared with at least two other processors.
8208     // Get the list from the sharedps_tag
8209     result = mbImpl->tag_get_data(sharedps_tag(), &entity, 1, part_ids_out);
8210     if (MB_SUCCESS != result)
8211       return result;
8212     // Count number of valid (positive) entries in sharedps_tag
8213     for (num_part_ids_out = 0; num_part_ids_out < MAX_SHARING_PROCS &&
8214            part_ids_out[num_part_ids_out] >= 0; num_part_ids_out++);
8215     //part_ids_out[num_part_ids_out++] = proc_config().proc_rank();
8216 #ifndef NDEBUG
8217     int my_idx = std::find(part_ids_out, part_ids_out + num_part_ids_out, proc_config().proc_rank()) - part_ids_out;
8218     assert(my_idx < num_part_ids_out);
8219 #endif
8220 
8221     // Done?
8222     if (!remote_handles)
8223       return MB_SUCCESS;
8224 
8225     // Get remote handles
8226     result = mbImpl->tag_get_data(sharedhs_tag(), &entity, 1, remote_handles);
8227     //remote_handles[num_part_ids_out - 1] = entity;
8228     assert(remote_handles[my_idx] == entity);
8229 
8230     return result;
8231   }
8232 
pack_shared_handles(std::vector<std::vector<SharedEntityData>> & send_data)8233   ErrorCode ParallelComm::pack_shared_handles(std::vector<std::vector<SharedEntityData> > &send_data)
8234   {
8235     // Build up send buffers
8236     ErrorCode rval = MB_SUCCESS;
8237     int ent_procs[MAX_SHARING_PROCS];
8238     EntityHandle handles[MAX_SHARING_PROCS];
8239     int num_sharing, tmp_int;
8240     SharedEntityData tmp;
8241     send_data.resize(buffProcs.size());
8242     for (std::vector<EntityHandle>::iterator i = sharedEnts.begin(); i != sharedEnts.end(); ++i) {
8243       tmp.remote = *i; // Swap local/remote so they're correct on the remote proc.
8244       rval = get_owner(*i, tmp_int);
8245       tmp.owner = tmp_int;
8246       if (MB_SUCCESS != rval)
8247         return rval;
8248 
8249       unsigned char pstat;
8250       rval = get_sharing_data(*i, ent_procs, handles, pstat, num_sharing);
8251       if (MB_SUCCESS != rval)
8252         return rval;
8253       for (int j = 0; j < num_sharing; j++) {
8254         if (ent_procs[j] == (int)proc_config().proc_rank())
8255           continue;
8256         tmp.local = handles[j];
8257         int ind = get_buffers(ent_procs[j]);
8258         assert(-1 != ind);
8259         if ((int)send_data.size() < ind + 1)
8260           send_data.resize(ind + 1);
8261         send_data[ind].push_back(tmp);
8262       }
8263     }
8264 
8265     return MB_SUCCESS;
8266   }
8267 
exchange_all_shared_handles(std::vector<std::vector<SharedEntityData>> & send_data,std::vector<std::vector<SharedEntityData>> & result)8268   ErrorCode ParallelComm::exchange_all_shared_handles(std::vector<std::vector<SharedEntityData> > &send_data,
8269                                                       std::vector<std::vector<SharedEntityData> > &result)
8270   {
8271     int ierr;
8272     const int tag = 0;
8273     const MPI_Comm cm = procConfig.proc_comm();
8274     const int num_proc = buffProcs.size();
8275     const std::vector<int> procs(buffProcs.begin(), buffProcs.end());
8276     std::vector<MPI_Request> recv_req(buffProcs.size(), MPI_REQUEST_NULL);
8277     std::vector<MPI_Request> send_req(buffProcs.size(), MPI_REQUEST_NULL);
8278 
8279     // Set up to receive sizes
8280     std::vector<int> sizes_send(num_proc), sizes_recv(num_proc);
8281     for (int i = 0; i < num_proc; i++) {
8282       ierr = MPI_Irecv(&sizes_recv[i], 1, MPI_INT, procs[i], tag, cm, &recv_req[i]);
8283       if (ierr)
8284         return MB_FILE_WRITE_ERROR;
8285     }
8286 
8287     // Send sizes
8288     assert(num_proc == (int)send_data.size());
8289 
8290     result.resize(num_proc);
8291     for (int i = 0; i < num_proc; i++) {
8292       sizes_send[i] = send_data[i].size();
8293       ierr = MPI_Isend(&sizes_send[i], 1, MPI_INT, buffProcs[i], tag, cm, &send_req[i]);
8294       if (ierr)
8295         return MB_FILE_WRITE_ERROR;
8296     }
8297 
8298     // Receive sizes
8299     std::vector<MPI_Status> stat(num_proc);
8300     ierr = MPI_Waitall(num_proc, &recv_req[0], &stat[0]);
8301     if (ierr)
8302       return MB_FILE_WRITE_ERROR;
8303 
8304     // Wait until all sizes are sent (clean up pending req's)
8305     ierr = MPI_Waitall(num_proc, &send_req[0], &stat[0]);
8306     if (ierr)
8307       return MB_FILE_WRITE_ERROR;
8308 
8309     // Set up to receive data
8310     for (int i = 0; i < num_proc; i++) {
8311       result[i].resize(sizes_recv[i]);
8312       ierr = MPI_Irecv( (void *)( &(result[i][0]) ),
8313                        sizeof(SharedEntityData)*sizes_recv[i],
8314                        MPI_UNSIGNED_CHAR,
8315                        buffProcs[i], tag, cm, &recv_req[i]);
8316       if (ierr)
8317         return MB_FILE_WRITE_ERROR;
8318     }
8319 
8320     // Send data
8321     for (int i = 0; i < num_proc; i++) {
8322       ierr = MPI_Isend((void *)( &(send_data[i][0]) ),
8323                        sizeof(SharedEntityData)*sizes_send[i],
8324                        MPI_UNSIGNED_CHAR,
8325                        buffProcs[i], tag, cm, &send_req[i]);
8326       if (ierr)
8327         return MB_FILE_WRITE_ERROR;
8328     }
8329 
8330     // Receive data
8331     ierr = MPI_Waitall(num_proc, &recv_req[0], &stat[0]);
8332     if (ierr)
8333       return MB_FILE_WRITE_ERROR;
8334 
8335     // Wait until everything is sent to release send buffers
8336     ierr = MPI_Waitall(num_proc, &send_req[0], &stat[0]);
8337     if (ierr)
8338       return MB_FILE_WRITE_ERROR;
8339 
8340     return MB_SUCCESS;
8341   }
8342 
check_all_shared_handles(bool print_em)8343   ErrorCode ParallelComm::check_all_shared_handles(bool print_em)
8344   {
8345     // Get all shared ent data from other procs
8346     std::vector<std::vector<SharedEntityData> > shents(buffProcs.size()),
8347       send_data(buffProcs.size());
8348 
8349     ErrorCode result;
8350     bool done = false;
8351 
8352     while (!done) {
8353       result = check_local_shared();
8354       if (MB_SUCCESS != result) {
8355         done = true;
8356         continue;
8357       }
8358 
8359       result = pack_shared_handles(send_data);
8360       if (MB_SUCCESS != result) {
8361         done = true;
8362         continue;
8363       }
8364 
8365       result = exchange_all_shared_handles(send_data, shents);
8366       if (MB_SUCCESS != result) {
8367         done = true;
8368         continue;
8369       }
8370 
8371       if (!shents.empty())
8372         result = check_my_shared_handles(shents);
8373       done = true;
8374     }
8375 
8376     if (MB_SUCCESS != result && print_em) {
8377 #ifdef MOAB_HAVE_HDF5
8378       std::ostringstream ent_str;
8379       ent_str << "mesh." << procConfig.proc_rank() << ".h5m";
8380       mbImpl->write_mesh(ent_str.str().c_str());
8381 #endif
8382     }
8383 
8384     return result;
8385   }
8386 
check_local_shared()8387   ErrorCode ParallelComm::check_local_shared()
8388   {
8389     // Do some local checks on shared entities to make sure things look
8390     // consistent
8391 
8392     // Check that non-vertex shared entities are shared by same procs as all
8393     // their vertices
8394     //std::pair<Range::const_iterator,Range::const_iterator> vert_it =
8395     //    sharedEnts.equal_range(MBVERTEX);
8396     std::vector<EntityHandle> dum_connect;
8397     const EntityHandle *connect;
8398     int num_connect;
8399     int tmp_procs[MAX_SHARING_PROCS];
8400     EntityHandle tmp_hs[MAX_SHARING_PROCS];
8401     std::set<int> tmp_set, vset;
8402     int num_ps;
8403     ErrorCode result;
8404     unsigned char pstat;
8405     Range bad_ents;
8406     std::vector<std::string> errors;
8407 
8408     std::vector<EntityHandle>::const_iterator vit;
8409     for (vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit) {
8410       // Get sharing procs for this ent
8411       result = get_sharing_data(*vit, tmp_procs, tmp_hs, pstat, num_ps);
8412       if (MB_SUCCESS != result) {
8413         bad_ents.insert(*vit);
8414         errors.push_back(std::string("Failure getting sharing data."));
8415         continue;
8416       }
8417 
8418       bool bad = false;
8419       // Entity must be shared
8420       if (!(pstat & PSTATUS_SHARED))
8421         errors.push_back(std::string("Entity should be shared but isn't.")), bad = true;
8422 
8423       // If entity is not owned this must not be first proc
8424       if (pstat & PSTATUS_NOT_OWNED && tmp_procs[0] == (int)procConfig.proc_rank())
8425         errors.push_back(std::string("Entity not owned but is first proc.")), bad = true;
8426 
8427       // If entity is owned and multishared, this must be first proc
8428       if (!(pstat & PSTATUS_NOT_OWNED) && pstat & PSTATUS_MULTISHARED &&
8429           (tmp_procs[0] != (int)procConfig.proc_rank() || tmp_hs[0] != *vit))
8430         errors.push_back(std::string("Entity owned and multishared but not first proc or not first handle.")), bad = true;
8431 
8432       if (bad) {
8433         bad_ents.insert(*vit);
8434         continue;
8435       }
8436 
8437       EntityType type = mbImpl->type_from_handle(*vit);
8438       if (type == MBVERTEX || type == MBENTITYSET)
8439         continue;
8440 
8441       // Copy element's procs to vset and save size
8442       int orig_ps = num_ps; vset.clear();
8443       std::copy(tmp_procs, tmp_procs + num_ps, std::inserter(vset, vset.begin()));
8444 
8445       // Get vertices for this ent and intersection of sharing procs
8446       result = mbImpl->get_connectivity(*vit, connect, num_connect, false, &dum_connect);
8447       if (MB_SUCCESS != result) {
8448         bad_ents.insert(*vit);
8449         errors.push_back(std::string("Failed to get connectivity."));
8450         continue;
8451       }
8452 
8453       for (int i = 0; i < num_connect; i++) {
8454         result = get_sharing_data(connect[i], tmp_procs, NULL, pstat, num_ps);
8455         if (MB_SUCCESS != result) {
8456           bad_ents.insert(*vit);
8457           continue;
8458         }
8459         if (!num_ps) {
8460           vset.clear();
8461           break;
8462         }
8463         std::sort(tmp_procs, tmp_procs + num_ps);
8464         tmp_set.clear();
8465         std::set_intersection(tmp_procs, tmp_procs + num_ps,
8466                               vset.begin(), vset.end(), std::inserter(tmp_set, tmp_set.end()));
8467         vset.swap(tmp_set);
8468         if (vset.empty())
8469           break;
8470       }
8471 
8472       // Intersect them; should be the same size as orig_ps
8473       tmp_set.clear();
8474       std::set_intersection(tmp_procs, tmp_procs + num_ps,
8475                             vset.begin(), vset.end(), std::inserter(tmp_set, tmp_set.end()));
8476       if (orig_ps != (int)tmp_set.size()) {
8477         errors.push_back(std::string("Vertex proc set not same size as entity proc set."));
8478         bad_ents.insert(*vit);
8479       }
8480     }
8481 
8482     if (!bad_ents.empty()) {
8483       std::cout << "Found bad entities in check_local_shared, proc rank "
8484                 << procConfig.proc_rank() << "," << std::endl;
8485       std::vector<std::string>::iterator sit;
8486       Range::iterator rit;
8487       for (rit = bad_ents.begin(), sit = errors.begin(); rit != bad_ents.end(); ++rit, ++sit) {
8488         list_entities(&(*rit), 1);
8489         std::cout << "Reason: " << *sit << std::endl;
8490       }
8491       return MB_FAILURE;
8492     }
8493 
8494     // To do: check interface sets
8495 
8496     return MB_SUCCESS;
8497   }
8498 
check_all_shared_handles(ParallelComm ** pcs,int num_pcs)8499   ErrorCode ParallelComm::check_all_shared_handles(ParallelComm **pcs,
8500                                                    int num_pcs)
8501   {
8502     std::vector<std::vector<std::vector<SharedEntityData> > > shents, send_data;
8503     ErrorCode result = MB_SUCCESS, tmp_result;
8504 
8505     // Get all shared ent data from each proc to all other procs
8506     send_data.resize(num_pcs);
8507     for (int p = 0; p < num_pcs; p++) {
8508       tmp_result = pcs[p]->pack_shared_handles(send_data[p]);
8509       if (MB_SUCCESS != tmp_result) result = tmp_result;
8510     }
8511     if (MB_SUCCESS != result) return result;
8512 
8513     // Move the data sorted by sending proc to data sorted by receiving proc
8514     shents.resize(num_pcs);
8515     for (int p = 0; p < num_pcs; p++)
8516       shents[p].resize(pcs[p]->buffProcs.size());
8517 
8518     for (int p = 0; p < num_pcs; p++) {
8519       for (unsigned int idx_p = 0; idx_p < pcs[p]->buffProcs.size(); idx_p++) {
8520         // Move send_data[p][to_p] to shents[to_p][idx_p]
8521         int to_p = pcs[p]->buffProcs[idx_p];
8522         int top_idx_p = pcs[to_p]->get_buffers(p);
8523         assert(-1 != top_idx_p);
8524         shents[to_p][top_idx_p] = send_data[p][idx_p];
8525       }
8526     }
8527 
8528     for (int p = 0; p < num_pcs; p++) {
8529       std::ostringstream ostr;
8530       ostr << "Processor " << p << " bad entities:";
8531       tmp_result = pcs[p]->check_my_shared_handles(shents[p], ostr.str().c_str());
8532       if (MB_SUCCESS != tmp_result) result = tmp_result;
8533     }
8534 
8535     return result;
8536   }
8537 
check_my_shared_handles(std::vector<std::vector<SharedEntityData>> & shents,const char * prefix)8538   ErrorCode ParallelComm::check_my_shared_handles(std::vector<std::vector<SharedEntityData> > &shents,
8539                                                   const char *prefix)
8540   {
8541     // Now check against what I think data should be
8542     // Get all shared entities
8543     ErrorCode result;
8544     Range all_shared;
8545     std::copy(sharedEnts.begin(), sharedEnts.end(), range_inserter(all_shared));
8546     std::vector<EntityHandle> dum_vec;
8547     all_shared.erase(all_shared.upper_bound(MBPOLYHEDRON), all_shared.end());
8548 
8549     Range bad_ents, local_shared;
8550     std::vector<SharedEntityData>::iterator vit;
8551     unsigned char tmp_pstat;
8552     for (unsigned int i = 0; i < shents.size(); i++) {
8553       int other_proc = buffProcs[i];
8554       result = get_shared_entities(other_proc, local_shared);
8555       if (MB_SUCCESS != result) return result;
8556       for (vit = shents[i].begin(); vit != shents[i].end(); ++vit) {
8557         EntityHandle localh = vit->local, remoteh = vit->remote, dumh;
8558         local_shared.erase(localh);
8559         result = get_remote_handles(true, &localh, &dumh, 1, other_proc, dum_vec);
8560         if (MB_SUCCESS != result || dumh != remoteh)
8561           bad_ents.insert(localh);
8562         result = get_pstatus(localh, tmp_pstat);
8563         if (MB_SUCCESS != result ||
8564             (!tmp_pstat&PSTATUS_NOT_OWNED && (unsigned)vit->owner != rank()) ||
8565             (tmp_pstat&PSTATUS_NOT_OWNED && (unsigned)vit->owner == rank()))
8566           bad_ents.insert(localh);
8567       }
8568 
8569       if (!local_shared.empty())
8570         bad_ents.merge(local_shared);
8571     }
8572 
8573     if (!bad_ents.empty()) {
8574       if (prefix)
8575         std::cout << prefix << std::endl;
8576       list_entities(bad_ents);
8577       return MB_FAILURE;
8578     }
8579     else
8580       return MB_SUCCESS;
8581   }
8582 
get_shared_entities(int other_proc,Range & shared_ents,int dim,const bool iface,const bool owned_filter)8583   ErrorCode ParallelComm::get_shared_entities(int other_proc,
8584                                               Range &shared_ents,
8585                                               int dim,
8586                                               const bool iface,
8587                                               const bool owned_filter)
8588   {
8589     shared_ents.clear();
8590     ErrorCode result = MB_SUCCESS;
8591 
8592     // Dimension
8593     if (-1 != dim) {
8594       DimensionPair dp = CN::TypeDimensionMap[dim];
8595       Range dum_range;
8596       std::copy(sharedEnts.begin(), sharedEnts.end(), range_inserter(dum_range));
8597       shared_ents.merge(dum_range.lower_bound(dp.first),
8598                         dum_range.upper_bound(dp.second));
8599     }
8600     else
8601       std::copy(sharedEnts.begin(), sharedEnts.end(), range_inserter(shared_ents));
8602 
8603     // Filter by iface
8604     if (iface) {
8605       result = filter_pstatus(shared_ents, PSTATUS_INTERFACE, PSTATUS_AND);MB_CHK_SET_ERR(result, "Failed to filter by iface");
8606     }
8607 
8608     // Filter by owned
8609     if (owned_filter) {
8610       result = filter_pstatus(shared_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT);MB_CHK_SET_ERR(result, "Failed to filter by owned");
8611     }
8612 
8613     // Filter by proc
8614     if (-1 != other_proc) {
8615       result = filter_pstatus(shared_ents, PSTATUS_SHARED, PSTATUS_AND, other_proc);MB_CHK_SET_ERR(result, "Failed to filter by proc");
8616     }
8617 
8618     return result;
8619   }
8620 
clean_shared_tags(std::vector<Range * > & exchange_ents)8621   ErrorCode ParallelComm::clean_shared_tags(std::vector<Range*>& exchange_ents)
8622   {
8623     for (unsigned int i = 0; i < exchange_ents.size(); i++) {
8624       Range* ents = exchange_ents[i];
8625       int num_ents = ents->size();
8626       Range::iterator it = ents->begin();
8627 
8628       for (int n = 0; n < num_ents; n++) {
8629         int sharing_proc;
8630         ErrorCode result = mbImpl->tag_get_data(sharedp_tag(), &(*ents->begin()), 1,
8631                                                 &sharing_proc);
8632         if (result != MB_TAG_NOT_FOUND && sharing_proc == -1) {
8633           result = mbImpl->tag_delete_data(sharedp_tag(), &(*it), 1);MB_CHK_SET_ERR(result, "Failed to delete sharedp tag data");
8634           result = mbImpl->tag_delete_data(sharedh_tag(), &(*it), 1);MB_CHK_SET_ERR(result, "Failed to delete sharedh tag data");
8635           result = mbImpl->tag_delete_data(pstatus_tag(), &(*it), 1);MB_CHK_SET_ERR(result, "Failed to delete pstatus tag data");
8636         }
8637         ++it;
8638       }
8639     }
8640 
8641     return MB_SUCCESS;
8642   }
8643 
set_debug_verbosity(int verb)8644   void ParallelComm::set_debug_verbosity(int verb)
8645   {
8646     myDebug->set_verbosity(verb);
8647   }
8648 
get_debug_verbosity()8649   int ParallelComm::get_debug_verbosity()
8650   {
8651     return myDebug->get_verbosity();
8652   }
8653 
get_entityset_procs(EntityHandle set,std::vector<unsigned> & ranks) const8654   ErrorCode ParallelComm::get_entityset_procs(EntityHandle set,
8655                                               std::vector<unsigned>& ranks) const
8656   {
8657     return sharedSetData->get_sharing_procs(set, ranks);
8658   }
8659 
get_entityset_owner(EntityHandle entity_set,unsigned & owner_rank,EntityHandle * remote_handle) const8660   ErrorCode ParallelComm::get_entityset_owner(EntityHandle entity_set,
8661                                               unsigned& owner_rank,
8662                                               EntityHandle* remote_handle) const
8663   {
8664     if (remote_handle)
8665       return sharedSetData->get_owner(entity_set, owner_rank, *remote_handle);
8666     else
8667       return sharedSetData->get_owner(entity_set, owner_rank);
8668   }
8669 
get_entityset_local_handle(unsigned owning_rank,EntityHandle remote_handle,EntityHandle & local_handle) const8670   ErrorCode ParallelComm::get_entityset_local_handle(unsigned owning_rank,
8671                                                      EntityHandle remote_handle,
8672                                                      EntityHandle& local_handle) const
8673   {
8674     return sharedSetData->get_local_handle(owning_rank, remote_handle, local_handle);
8675   }
8676 
get_shared_sets(Range & result) const8677   ErrorCode ParallelComm::get_shared_sets(Range& result) const
8678   {
8679     return sharedSetData->get_shared_sets(result);
8680   }
8681 
get_entityset_owners(std::vector<unsigned> & ranks) const8682   ErrorCode ParallelComm::get_entityset_owners(std::vector<unsigned>& ranks) const
8683   {
8684     return sharedSetData->get_owning_procs(ranks);
8685   }
8686 
get_owned_sets(unsigned owning_rank,Range & sets_out) const8687   ErrorCode ParallelComm::get_owned_sets(unsigned owning_rank, Range& sets_out) const
8688   {
8689     return sharedSetData->get_shared_sets(owning_rank, sets_out);
8690   }
8691 
gather_data(Range & gather_ents,Tag & tag_handle,Tag id_tag,EntityHandle gather_set,int root_proc_rank)8692   ErrorCode ParallelComm::gather_data(Range &gather_ents, Tag &tag_handle,
8693 				      Tag id_tag, EntityHandle gather_set, int root_proc_rank)
8694   {
8695     int dim = mbImpl->dimension_from_handle(*gather_ents.begin());
8696     int bytes_per_tag = 0;
8697     ErrorCode rval = mbImpl->tag_get_bytes(tag_handle, bytes_per_tag);
8698     if (rval != MB_SUCCESS) return rval;
8699 
8700     int sz_buffer = sizeof(int) + gather_ents.size()*(sizeof(int) + bytes_per_tag);
8701     void* senddata = malloc(sz_buffer);
8702     ((int*)senddata)[0] = (int) gather_ents.size();
8703     int* ptr_int = (int*)senddata + 1;
8704     rval = mbImpl->tag_get_data(id_tag, gather_ents, (void*)ptr_int);
8705     if (rval != MB_SUCCESS) return rval;
8706     ptr_int = (int*)(senddata) + 1 + gather_ents.size();
8707     rval = mbImpl->tag_get_data(tag_handle, gather_ents, (void*)ptr_int);
8708     if (rval != MB_SUCCESS) return rval;
8709     std::vector<int> displs(proc_config().proc_size(), 0);
8710     MPI_Gather(&sz_buffer, 1, MPI_INT, &displs[0], 1, MPI_INT, root_proc_rank, comm());
8711     std::vector<int> recvcnts(proc_config().proc_size(), 0);
8712     std::copy(displs.begin(), displs.end(), recvcnts.begin());
8713     std::partial_sum(displs.begin(), displs.end(), displs.begin());
8714     std::vector<int>::iterator lastM1 = displs.end() - 1;
8715     std::copy_backward(displs.begin(), lastM1, displs.end());
8716     //std::copy_backward(displs.begin(), --displs.end(), displs.end());
8717     displs[0] = 0;
8718 
8719     if ((int)rank() != root_proc_rank)
8720       MPI_Gatherv(senddata, sz_buffer, MPI_BYTE, NULL, NULL, NULL, MPI_BYTE, root_proc_rank, comm());
8721     else {
8722       Range gents;
8723       mbImpl->get_entities_by_dimension(gather_set, dim, gents);
8724       int recvbuffsz = gents.size() * (bytes_per_tag + sizeof(int)) + proc_config().proc_size() * sizeof(int);
8725       void* recvbuf = malloc(recvbuffsz);
8726       MPI_Gatherv(senddata, sz_buffer, MPI_BYTE, recvbuf, &recvcnts[0], &displs[0], MPI_BYTE, root_proc_rank, comm());
8727 
8728       void* gvals = NULL;
8729 
8730       // Test whether gents has multiple sequences
8731       bool multiple_sequences = false;
8732       if (gents.psize() > 1)
8733         multiple_sequences = true;
8734       else {
8735         int count;
8736         rval = mbImpl->tag_iterate(tag_handle, gents.begin(), gents.end(), count, gvals);
8737         assert(NULL != gvals);
8738         assert(count > 0);
8739         if ((size_t)count != gents.size()) {
8740           multiple_sequences = true;
8741           gvals = NULL;
8742         }
8743       }
8744 
8745       // If gents has multiple sequences, create a temp buffer for gathered values
8746       if (multiple_sequences) {
8747         gvals = malloc(gents.size() * bytes_per_tag);
8748         assert(NULL != gvals);
8749       }
8750 
8751       for (int i = 0; i != (int)size(); i++) {
8752         int numents = *(int*)(((char*)recvbuf) + displs[i]);
8753         int* id_ptr = (int*)(((char*)recvbuf) + displs[i] + sizeof(int));
8754         char* val_ptr = (char*)(id_ptr + numents);
8755         for (int j = 0; j != numents; j++) {
8756           int idx = id_ptr[j];
8757           memcpy((char*)gvals + (idx - 1)*bytes_per_tag, val_ptr + j*bytes_per_tag, bytes_per_tag);
8758         }
8759       }
8760 
8761       // Free the receive buffer
8762       free(recvbuf);
8763 
8764       // If gents has multiple sequences, copy tag data (stored in the temp buffer) to each sequence separately
8765       if (multiple_sequences) {
8766         Range::iterator iter = gents.begin();
8767         size_t start_idx = 0;
8768         while (iter != gents.end()) {
8769           int count;
8770           void* ptr;
8771           rval = mbImpl->tag_iterate(tag_handle, iter, gents.end(), count, ptr);
8772           assert(NULL != ptr);
8773           assert(count > 0);
8774           memcpy((char*)ptr, (char*)gvals + start_idx * bytes_per_tag, bytes_per_tag * count);
8775 
8776           iter += count;
8777           start_idx += count;
8778         }
8779         assert(start_idx == gents.size());
8780 
8781         // Free the temp buffer
8782         free(gvals);
8783       }
8784     }
8785 
8786     // Free the send data
8787     free(senddata);
8788 
8789     return MB_SUCCESS;
8790   }
8791 
8792   /*
8793    * This call is collective, so we will use the message ids for tag communications;
8794    * they are similar, but simpler
8795    * Pack the number of edges, the remote edge handles, then for each edge, the number
8796    *    of intersection points, and then 3 doubles for each intersection point
8797    * On average, there is one intx point per edge, in some cases 2, in some cases 0
8798    *   so on average, the message size is num_edges * (sizeof(eh) + sizeof(int) + 1*3*sizeof(double))
8799    *          = num_edges * (8 + 4 + 24)
8800    */
settle_intersection_points(Range & edges,Range & shared_edges_owned,std::vector<std::vector<EntityHandle> * > & extraNodesVec,double tolerance)8801   ErrorCode ParallelComm::settle_intersection_points(Range & edges, Range & shared_edges_owned,
8802                                                      std::vector<std::vector<EntityHandle> *> & extraNodesVec,
8803                                                      double tolerance)
8804   {
8805     // The index of an edge in the edges Range will give the index for extraNodesVec
8806     // the strategy of this follows exchange tags strategy:
8807     ErrorCode result;
8808     int success;
8809 
8810     myDebug->tprintf(1, "Entering settle_intersection_points\n");
8811 
8812     // Get all procs interfacing to this proc
8813     std::set<unsigned int> exch_procs;
8814     result = get_comm_procs(exch_procs);
8815 
8816     // Post ghost irecv's for all interface procs
8817     // Index requests the same as buffer/sharing procs indices
8818     std::vector<MPI_Request>  recv_intx_reqs(3 * buffProcs.size(), MPI_REQUEST_NULL);
8819     std::vector<unsigned int>::iterator sit;
8820     int ind;
8821 
8822     reset_all_buffers();
8823     int incoming = 0;
8824 
8825     for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++) {
8826       incoming++;
8827       PRINT_DEBUG_IRECV(*sit, procConfig.proc_rank(), remoteOwnedBuffs[ind]->mem_ptr,
8828           INITIAL_BUFF_SIZE, MB_MESG_TAGS_SIZE, incoming);
8829 
8830       success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
8831           MPI_UNSIGNED_CHAR, *sit, MB_MESG_TAGS_SIZE, procConfig.proc_comm(),
8832           &recv_intx_reqs[3 * ind]);
8833       if (success != MPI_SUCCESS) {
8834         MB_SET_ERR(MB_FAILURE, "Failed to post irecv in settle intersection point");
8835       }
8836     }
8837 
8838     // Pack and send intersection points from this proc to others
8839     // Make sendReqs vector to simplify initialization
8840     sendReqs.resize(3 * buffProcs.size(), MPI_REQUEST_NULL);
8841 
8842     // Take all shared entities if incoming list is empty
8843     Range & entities = shared_edges_owned;
8844 
8845     int dum_ack_buff;
8846 
8847     for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++) {
8848       Range edges_to_send = entities;
8849 
8850       // Get ents shared by proc *sit
8851       result = filter_pstatus(edges_to_send, PSTATUS_SHARED, PSTATUS_AND, *sit);MB_CHK_SET_ERR(result, "Failed pstatus AND check");
8852 
8853       // Remote nonowned entities; not needed, edges are already owned by this proc
8854 
8855       // Pack the data
8856       // Reserve space on front for size and for initial buff size
8857       Buffer* buff = localOwnedBuffs[ind];
8858       buff->reset_ptr(sizeof(int));
8859 
8860       /*result = pack_intx_points(edges_to_send, edges, extraNodesVec,
8861           localOwnedBuffs[ind], *sit);*/
8862 
8863       // Count first data, and see if it is enough room?
8864       // Send the remote handles
8865       std::vector<EntityHandle> dum_remote_edges(edges_to_send.size());
8866       /*
8867        *  get_remote_handles(const bool store_remote_handles,
8868                                  EntityHandle *from_vec,
8869                                  EntityHandle *to_vec_tmp,
8870                                  int num_ents, int to_proc,
8871                                  const std::vector<EntityHandle> &new_ents);
8872        */
8873       // We are sending count, num edges, remote edges handles, and then, for each edge:
8874       //          -- nb intx points, 3*nbintPointsforEdge "doubles"
8875       std::vector<EntityHandle> dum_vec;
8876       result = get_remote_handles(true,
8877           edges_to_send, &dum_remote_edges[0], *sit,
8878                                       dum_vec);MB_CHK_SET_ERR(result, "Failed to get remote handles");
8879       int count = 4; // Size of data
8880       count += sizeof(int)*(int)edges_to_send.size();
8881       count += sizeof(EntityHandle)*(int)edges_to_send.size(); // We will send the remote handles
8882       for (Range::iterator eit = edges_to_send.begin(); eit != edges_to_send.end(); ++eit) {
8883         EntityHandle edge = *eit;
8884         unsigned int indx = edges.find(edge) - edges.begin();
8885         std::vector<EntityHandle> & intx_nodes = *(extraNodesVec[indx]);
8886         count += (int)intx_nodes.size() * 3 * sizeof(double); // 3 integer for each entity handle
8887       }
8888       //
8889       buff->check_space(count);
8890       PACK_INT(buff->buff_ptr, edges_to_send.size());
8891       PACK_EH(buff->buff_ptr, &dum_remote_edges[0], dum_remote_edges.size());
8892       for (Range::iterator eit = edges_to_send.begin(); eit != edges_to_send.end(); ++eit) {
8893         EntityHandle edge = *eit;
8894         // Pack the remote edge
8895         unsigned int indx = edges.find(edge) - edges.begin();
8896         std::vector<EntityHandle> & intx_nodes = *(extraNodesVec[indx]);
8897         PACK_INT(buff->buff_ptr, intx_nodes.size());
8898 
8899         result = mbImpl->get_coords(&intx_nodes[0], intx_nodes.size(),
8900                                     (double*)buff->buff_ptr);MB_CHK_SET_ERR(result, "Failed to get coords");
8901         buff->buff_ptr += 3 * sizeof(double) * intx_nodes.size();
8902       }
8903 
8904       // Done packing the intx points and remote edges
8905       buff->set_stored_size();
8906 
8907       // Now send it
8908       result = send_buffer(*sit, localOwnedBuffs[ind], MB_MESG_TAGS_SIZE,
8909           sendReqs[3 * ind], recv_intx_reqs[3 * ind + 2], &dum_ack_buff, incoming);MB_CHK_SET_ERR(result, "Failed to send buffer");
8910     }
8911 
8912     // Receive/unpack intx points
8913     while (incoming) {
8914       MPI_Status status;
8915       int index_in_recv_requests;
8916       PRINT_DEBUG_WAITANY(recv_intx_reqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank());
8917       success = MPI_Waitany(3 * buffProcs.size(), &recv_intx_reqs[0],
8918           &index_in_recv_requests, &status);
8919       if (MPI_SUCCESS != success) {
8920         MB_SET_ERR(MB_FAILURE, "Failed in waitany in ghost exchange");
8921       }
8922       // Processor index in the list is divided by 3
8923       ind = index_in_recv_requests / 3;
8924 
8925       PRINT_DEBUG_RECD(status);
8926 
8927       // OK, received something; decrement incoming counter
8928       incoming--;
8929 
8930       bool done = false;
8931       result = recv_buffer(MB_MESG_TAGS_SIZE, status,
8932           remoteOwnedBuffs[ind],
8933           recv_intx_reqs[3*ind + 1], // This is for receiving the second message
8934           recv_intx_reqs[3*ind + 2], // This would be for ack, but it is not used; consider removing it
8935           incoming,
8936           localOwnedBuffs[ind],
8937           sendReqs[3*ind + 1], // Send request for sending the second message
8938           sendReqs[3*ind + 2], // This is for sending the ack
8939           done);MB_CHK_SET_ERR(result, "Failed to resize recv buffer");
8940       if (done) {
8941         Buffer * buff = remoteOwnedBuffs[ind];
8942         buff->reset_ptr(sizeof(int));
8943         /*result = unpack_tags(remoteOwnedBuffs[ind/2]->buff_ptr, dum_vec, true,
8944             buffProcs[ind/2]);*/
8945         // Unpack now the edges and vertex info; compare with the existing vertex positions
8946 
8947         int num_edges;
8948 
8949         UNPACK_INT(buff->buff_ptr, num_edges);
8950         std::vector<EntityHandle> rec_edges;
8951         rec_edges.resize(num_edges);
8952         UNPACK_EH(buff->buff_ptr, &rec_edges[0], num_edges);
8953         for (int i = 0; i < num_edges; i++) {
8954           EntityHandle edge=rec_edges[i];
8955           unsigned int indx = edges.find(edge) - edges.begin();
8956           std::vector<EntityHandle> & intx_nodes = *(extraNodesVec[indx]);
8957           // Now get the number of nodes on this (now local) edge
8958           int nverts;
8959           UNPACK_INT(buff->buff_ptr, nverts);
8960           std::vector<double> pos_from_owner;
8961           pos_from_owner.resize(3*nverts);
8962           UNPACK_DBLS(buff->buff_ptr, &pos_from_owner[0], 3*nverts);
8963           std::vector<double> current_positions(3*intx_nodes.size());
8964           result = mbImpl->get_coords(&intx_nodes[0], intx_nodes.size(), &current_positions[0]);MB_CHK_SET_ERR(result, "Failed to get current positions");
8965           // Now, look at what we have in current pos, compare to pos from owner, and reset
8966           for (int k = 0;  k < (int)intx_nodes.size(); k++) {
8967             double * pk = &current_positions[3*k];
8968             // Take the current pos k, and settle among the ones from owner:
8969             bool found = false;
8970             for (int j = 0; j < nverts && !found; j++) {
8971               double * pj = &pos_from_owner[3*j];
8972               double dist2 = (pk[0] - pj[0])*(pk[0] - pj[0]) + (pk[1] - pj[1])*(pk[1] - pj[1]) +
8973                              (pk[2] - pj[2])*(pk[2] - pj[2]);
8974               if (dist2 < tolerance) {
8975                 pk[0] = pj[0]; pk[1] = pj[1]; pk[2] = pj[2]; // Correct it!
8976                 found = true;
8977                 break;
8978               }
8979             }
8980             if (!found) {
8981 #ifndef  NDEBUG
8982               std::cout << " pk:" << pk[0] << " " << pk[1] << " " << pk[2] << " not found \n";
8983 #endif
8984               result = MB_FAILURE;
8985             }
8986           }
8987           // After we are done resetting, we can set the new positions of nodes:
8988           result = mbImpl->set_coords(&intx_nodes[0], (int)intx_nodes.size(), &current_positions[0]);MB_CHK_SET_ERR(result, "Failed to set new current positions");
8989         }
8990       }
8991     }
8992 
8993     // OK, now wait
8994     if (myDebug->get_verbosity() == 5) {
8995       success = MPI_Barrier(procConfig.proc_comm());
8996     } else {
8997       MPI_Status status[3 * MAX_SHARING_PROCS];
8998       success = MPI_Waitall(3 * buffProcs.size(), &sendReqs[0], status);
8999     }
9000     if (MPI_SUCCESS != success) {
9001       MB_SET_ERR(MB_FAILURE, "Failure in waitall in tag exchange");
9002     }
9003 
9004     myDebug->tprintf(1, "Exiting settle_intersection_points");
9005 
9006     return MB_SUCCESS;
9007   }
9008 
delete_entities(Range & to_delete)9009   ErrorCode ParallelComm::delete_entities(Range & to_delete)
9010   {
9011     // Will not look at shared sets yet, but maybe we should
9012     // First, see if any of the entities to delete is shared; then inform the other processors
9013     // about their fate (to be deleted), using a crystal router transfer
9014     ErrorCode rval = MB_SUCCESS;
9015     unsigned char pstat;
9016     EntityHandle tmp_handles[MAX_SHARING_PROCS];
9017     int tmp_procs[MAX_SHARING_PROCS];
9018     unsigned int num_ps;
9019     TupleList ents_to_delete;
9020     ents_to_delete.initialize(1, 0, 1, 0, to_delete.size() * (MAX_SHARING_PROCS + 1)); // A little bit of overkill
9021     ents_to_delete.enableWriteAccess();
9022     unsigned int i = 0;
9023     for (Range::iterator it = to_delete.begin(); it != to_delete.end(); ++it) {
9024       EntityHandle eh = *it; // Entity to be deleted
9025 
9026       rval = get_sharing_data(eh, tmp_procs, tmp_handles,
9027                               pstat, num_ps);
9028       if (rval != MB_SUCCESS || num_ps == 0)
9029         continue;
9030       // Add to the tuple list the information to be sent (to the remote procs)
9031       for (unsigned int p = 0; p < num_ps; p++) {
9032         ents_to_delete.vi_wr[i] = tmp_procs[p];
9033         ents_to_delete.vul_wr[i] = (unsigned long)tmp_handles[p];
9034         i++;
9035         ents_to_delete.inc_n();
9036       }
9037     }
9038 
9039     gs_data::crystal_data *cd = this->procConfig.crystal_router();
9040     // All communication happens here; no other mpi calls
9041     // Also, this is a collective call
9042     rval = cd->gs_transfer(1, ents_to_delete, 0);MB_CHK_SET_ERR(rval, "Error in tuple transfer");
9043 
9044     // Add to the range of ents to delete the new ones that were sent from other procs
9045     unsigned int received = ents_to_delete.get_n();
9046     for (i = 0; i < received; i++) {
9047       //int from = ents_to_delete.vi_rd[i];
9048       unsigned long valrec = ents_to_delete.vul_rd[i];
9049       to_delete.insert((EntityHandle)valrec);
9050     }
9051     rval = mbImpl->delete_entities(to_delete);MB_CHK_SET_ERR(rval, "Error in deleting actual entities");
9052 
9053     std::vector<EntityHandle> good_ents;
9054     for (size_t j = 0; j<sharedEnts.size(); j++) {
9055       int index = to_delete.index(sharedEnts[j]);
9056       if (-1 == index)
9057         good_ents.push_back(sharedEnts[j]);
9058     }
9059     sharedEnts = good_ents;
9060 
9061     // What about shared sets? Who is updating them?
9062     return MB_SUCCESS;
9063   }
9064 
print_pstatus(unsigned char pstat,std::string & ostr)9065   void ParallelComm::print_pstatus(unsigned char pstat, std::string &ostr)
9066   {
9067     std::ostringstream str;
9068     int num = 0;
9069 #define ppstat(a, b) { if (pstat & a) { if (num) str << ", "; str << b; num++; } }
9070 
9071     ppstat(PSTATUS_NOT_OWNED, "NOT_OWNED");
9072     ppstat(PSTATUS_SHARED, "SHARED");
9073     ppstat(PSTATUS_MULTISHARED, "MULTISHARED");
9074     ppstat(PSTATUS_INTERFACE, "INTERFACE");
9075     ppstat(PSTATUS_GHOST, "GHOST");
9076 
9077     ostr = str.str();
9078   }
9079 
print_pstatus(unsigned char pstat)9080   void ParallelComm::print_pstatus(unsigned char pstat)
9081   {
9082     std::string str;
9083     print_pstatus(pstat, str);
9084     std::cout << str.c_str() << std::endl;
9085   }
9086 
9087 } // namespace moab
9088