1 /** \file iMOAB.cpp
2 */
3 
4 #include "moab/MOABConfig.h"
5 #include "moab/Core.hpp"
6 
7 using namespace moab;
8 
9 #ifdef MOAB_HAVE_MPI
10     #include "moab_mpi.h"
11     #include "moab/ParallelComm.hpp"
12     #include "moab/ParCommGraph.hpp"
13 #endif
14 
15 #include <assert.h>
16 #include "moab/iMOAB.h"
17 
18 /*
19  this is needed so far because of direct access to hdf5/mhdf
20   */
21 #ifdef MOAB_HAVE_HDF5
22     #include "mhdf.h"
23     #include <H5Tpublic.h>
24 #endif
25 
26 #include "MBTagConventions.hpp"
27 #include "moab/MeshTopoUtil.hpp"
28 #include "moab/ReadUtilIface.hpp"
29 #include "moab/MergeMesh.hpp"
30 
31 #ifdef MOAB_HAVE_TEMPESTREMAP
32     #include "moab/IntxMesh/IntxUtils.hpp"
33 
34     #include "moab/Remapping/TempestRemapper.hpp"
35     #include "moab/Remapping/TempestOfflineMap.hpp"
36 #endif
37 
38 // C++ includes
39 #include <stdio.h>
40 #include <sstream>
41 #include <iostream>
42 
43 // #define VERBOSE
44 
45 // global variables ; should they be organized in a structure, for easier references?
46 // or how do we keep them global?
47 
48 #ifdef __cplusplus
49 extern "C" {
50 #endif
51 
52 #define CHKERRVAL(ierr) { if ( moab::MB_SUCCESS != ierr ) return 1; }
53 #define CHKIERRVAL(ierr) { if ( 0 != ierr ) return 1; }
54 
55 struct appData
56 {
57     EntityHandle file_set;
58     int   external_id;  // external component id, unique for application
59     Range all_verts;
60     Range local_verts; // it could include shared, but not owned at the interface
61     // these vertices would be all_verts if no ghosting was required
62     Range ghost_vertices; // locally ghosted from other processors
63     Range primary_elems;
64     Range owned_elems;
65     Range ghost_elems;
66     int dimension; // 2 or 3, dimension of primary elements (redundant?)
67     long num_global_elements; // reunion of all elements in primary_elements; either from hdf5 reading or from reduce
68     long num_global_vertices; // reunion of all nodes, after sharing is resolved; it could be determined from hdf5 reading
69     Range mat_sets;
70     std::map<int, int> matIndex; // map from global block id to index in mat_sets
71     Range neu_sets;
72     Range diri_sets;
73     std::map< std::string, Tag> tagMap;
74     std::vector<Tag>  tagList;
75 
76 #ifdef MOAB_HAVE_MPI
77     std::vector<ParCommGraph*> pgraph; // created in order of other applications that communicate with this one
78     // constructor for this ParCommGraph takes the joint comm and the MPI groups for each application
79     moab::EntityHandle covering_set;
80 #endif
81 
82 #ifdef MOAB_HAVE_TEMPESTREMAP
83 	moab::TempestRemapper* remapper;
84     std::map<std::string,moab::TempestOfflineMap*> weightMaps;
85 	iMOAB_AppID pid_src;
86 	iMOAB_AppID pid_dest;
87 #endif
88 };
89 
90 struct GlobalContext
91 {
92     // are there reasons to have multiple moab inits? Is ref count needed?
93     Interface* MBI;
94     // we should also have the default tags stored, initialized
95     Tag material_tag, neumann_tag, dirichlet_tag, globalID_tag; // material, neumann, dirichlet,  globalID
96     int refCountMB;
97     int iArgc;
98     iMOAB_String* iArgv;
99     int unused_pid;
100 
101     std::map<std::string, int> appIdMap;     // from app string (uppercase) to app id
102     std::map<int, int> appIdCompMap;         // from component id to app id
103 
104 #ifdef MOAB_HAVE_MPI
105     std::vector<ParallelComm*> pcomms; // created in order of applications, one moab::ParallelComm for each
106 #endif
107 
108     std::vector<appData> appDatas; // the same order as pcomms
109 
GlobalContextGlobalContext110     GlobalContext() {MBI = 0; refCountMB = 0; unused_pid = 0; }
111 }  ;
112 
113 static struct GlobalContext context;
114 
115 
iMOAB_Initialize(int argc,iMOAB_String * argv)116 ErrCode iMOAB_Initialize ( int argc, iMOAB_String* argv )
117 {
118     context.iArgc = argc;
119     context.iArgv = argv; // shallow copy
120 
121     if ( 0 == context.refCountMB )
122     {
123         context.MBI = new ( std::nothrow ) moab::Core;
124         // retrieve the default tags
125         const char* const shared_set_tag_names[] = {MATERIAL_SET_TAG_NAME,
126                                                     NEUMANN_SET_TAG_NAME,
127                                                     DIRICHLET_SET_TAG_NAME,
128                                                     GLOBAL_ID_TAG_NAME
129                                                    };
130         // blocks, visible surfaceBC(neumann), vertexBC (Dirichlet), global id, parallel partition
131         Tag gtags[4];
132 
133         for ( int i = 0; i < 4; i++ )
134         {
135 
136             ErrorCode rval = context.MBI->tag_get_handle ( shared_set_tag_names[i], 1, MB_TYPE_INTEGER,
137                              gtags[i], MB_TAG_ANY );
138             CHKERRVAL(rval);
139         }
140 
141         context.material_tag = gtags[0];
142         context.neumann_tag = gtags[1];
143         context.dirichlet_tag = gtags[2];
144         context.globalID_tag = gtags[3];
145     }
146 
147     context.refCountMB++;
148     return 0;
149 }
150 
iMOAB_InitializeFortran()151 ErrCode iMOAB_InitializeFortran()
152 {
153     return iMOAB_Initialize ( 0, 0 );
154 }
155 
iMOAB_Finalize()156 ErrCode iMOAB_Finalize()
157 {
158     context.refCountMB--;
159 
160     if ( 0 == context.refCountMB )
161     { delete context.MBI; }
162 
163     return MB_SUCCESS;
164 }
165 
iMOAB_RegisterApplication(const iMOAB_String app_name,MPI_Comm * comm,int * compid,iMOAB_AppID pid)166 ErrCode iMOAB_RegisterApplication ( const iMOAB_String app_name,
167 #ifdef MOAB_HAVE_MPI
168                                     MPI_Comm* comm,
169 #endif
170                                     int* compid,
171                                     iMOAB_AppID pid )
172 {
173     // will create a parallel comm for this application too, so there will be a
174     // mapping from *pid to file set and to parallel comm instances
175     std::string name ( app_name );
176 
177     if ( context.appIdMap.find ( name ) != context.appIdMap.end() )
178     {
179         std::cout << " application " << name << " already registered \n";
180         return 1;
181     }
182 
183     *pid =  context.unused_pid++;
184     context.appIdMap[name] = *pid;
185     int rankHere=0;
186 #ifdef MOAB_HAVE_MPI
187     MPI_Comm_rank( *comm, &rankHere );
188 #endif
189     if (!rankHere) std::cout << " application " << name << " with ID = " << *pid << " is registered now \n";
190     if ( *compid <= 0 )
191     {
192         std::cout << " convention for external application is to have its id positive \n";
193         return 1;
194     }
195 
196     if ( context.appIdCompMap.find ( *compid ) != context.appIdCompMap.end() )
197     {
198         std::cout << " external application with comp id " << *compid << " is already registered\n";
199         return 1;
200     }
201 
202     context.appIdCompMap[*compid] = *pid;
203 
204     // now create ParallelComm and a file set for this application
205 #ifdef MOAB_HAVE_MPI
206     if (comm) {
207         ParallelComm* pco = new ParallelComm ( context.MBI, *comm );
208 
209 #ifndef NDEBUG
210         int index = pco->get_id(); // it could be useful to get app id from pcomm instance ...
211         assert ( index == *pid );
212         // here, we assert the the pid is the same as the id of the ParallelComm instance
213         // useful for writing in parallel
214 #endif
215         context.pcomms.push_back ( pco );
216     }
217     else {
218         context.pcomms.push_back ( 0 );
219     }
220 #endif
221 
222     // create now the file set that will be used for loading the model in
223     EntityHandle file_set;
224     ErrorCode rval = context.MBI->create_meshset ( MESHSET_SET, file_set );
225     CHKERRVAL(rval);
226 
227     appData app_data;
228     app_data.file_set = file_set;
229     app_data.external_id = * compid; // will be used mostly for par comm graph
230 
231 #ifdef MOAB_HAVE_MPI
232     app_data.covering_set = file_set;
233 #endif
234 
235 #ifdef MOAB_HAVE_TEMPESTREMAP
236 	app_data.remapper = NULL; // Only allocate as needed
237 #endif
238 
239     context.appDatas.push_back ( app_data ); // it will correspond to app_FileSets[*pid] will be the file set of interest
240     return 0;
241 }
242 
iMOAB_RegisterFortranApplication(const iMOAB_String app_name,int * comm,int * compid,iMOAB_AppID pid,int app_name_length)243 ErrCode iMOAB_RegisterFortranApplication ( const iMOAB_String app_name,
244 #ifdef MOAB_HAVE_MPI
245         int* comm,
246 #endif
247         int* compid, iMOAB_AppID pid, int app_name_length )
248 {
249     std::string name ( app_name );
250 
251     if ( ( int ) strlen ( app_name ) > app_name_length )
252     {
253         std::cout << " length of string issue \n";
254         return 1;
255     }
256 
257 #ifdef MOAB_HAVE_MPI
258     MPI_Comm ccomm;
259     if (comm) {
260       // convert from Fortran communicator to a C communicator
261       // see transfer of handles
262       // http://www.mpi-forum.org/docs/mpi-2.2/mpi22-report/node361.htm
263       ccomm = MPI_Comm_f2c ( ( MPI_Fint ) * comm );
264     }
265 #endif
266 
267     // now call C style registration function:
268     return iMOAB_RegisterApplication ( app_name,
269 #ifdef MOAB_HAVE_MPI
270                                     &ccomm,
271 #endif
272                                     compid,
273                                     pid );
274 }
275 
276 
iMOAB_DeregisterApplication(iMOAB_AppID pid)277 ErrCode iMOAB_DeregisterApplication ( iMOAB_AppID pid )
278 {
279     // the file set , parallel comm are all in vectors indexed by *pid
280     // assume we did not delete anything yet
281     // *pid will not be reused if we register another application
282 
283     EntityHandle fileSet = context.appDatas[*pid].file_set;
284     // get all entities part of the file set
285     Range fileents;
286     ErrorCode rval = context.MBI->get_entities_by_handle ( fileSet, fileents, /*recursive */true );CHKERRVAL(rval);
287 
288     fileents.insert ( fileSet );
289 
290     rval = context.MBI->get_entities_by_type ( fileSet, MBENTITYSET, fileents );CHKERRVAL(rval); // append all mesh sets
291 
292 #ifdef MOAB_HAVE_TEMPESTREMAP
293   if (context.appDatas[*pid].remapper) delete context.appDatas[*pid].remapper;
294 #endif
295 
296 #ifdef MOAB_HAVE_MPI
297     ParallelComm* pco = context.pcomms[*pid];
298 
299     // we could get the pco also with
300     // ParallelComm * pcomm = ParallelComm::get_pcomm(context.MBI, *pid);
301     if (pco) delete pco;
302     std::vector<ParCommGraph*>& pargs = context.appDatas[*pid].pgraph;
303 
304     // free the parallel comm graphs associated with this app
305     for ( size_t k = 0; k < pargs.size(); k++ )
306     { delete pargs[k]; }
307 
308 #endif
309 
310     // delete first all except vertices
311     Range vertices = fileents.subset_by_type ( MBVERTEX );
312     Range noverts = subtract ( fileents, vertices );
313 
314     rval = context.MBI->delete_entities ( noverts );CHKERRVAL(rval);
315     // now retrieve connected elements that still exist (maybe in other sets, pids?)
316     Range adj_ents_left;
317     rval = context.MBI->get_adjacencies(vertices, 1, false, adj_ents_left, Interface::UNION);CHKERRVAL(rval);
318     rval = context.MBI->get_adjacencies(vertices, 2, false, adj_ents_left, Interface::UNION);CHKERRVAL(rval);
319     rval = context.MBI->get_adjacencies(vertices, 3, false, adj_ents_left, Interface::UNION);CHKERRVAL(rval);
320 
321     if (!adj_ents_left.empty())
322     {
323       Range conn_verts;
324       rval = context.MBI->get_connectivity(adj_ents_left, conn_verts);CHKERRVAL(rval);
325       vertices = subtract(vertices, conn_verts);
326     }
327 
328     rval = context.MBI->delete_entities ( vertices );CHKERRVAL(rval);
329 
330     std::map<std::string, int>::iterator mit;
331 
332     for ( mit = context.appIdMap.begin(); mit != context.appIdMap.end(); mit++ )
333     {
334         int pidx = mit->second;
335 
336         if ( *pid == pidx )
337         {
338             break;
339         }
340     }
341 
342     context.appIdMap.erase ( mit );
343     std::map<int, int>::iterator mit1;
344 
345     for ( mit1 = context.appIdCompMap.begin(); mit1 != context.appIdCompMap.end(); mit1++ )
346     {
347         int pidx = mit1->second;
348 
349         if ( *pid == pidx )
350         {
351             break;
352         }
353     }
354 
355     context.appIdCompMap.erase ( mit1 );
356 
357     context.unused_pid--;
358     context.appDatas.pop_back();
359 #ifdef MOAB_HAVE_MPI
360     context.pcomms.pop_back();
361 #endif
362     return 0;
363 }
364 
iMOAB_ReadHeaderInfo(const iMOAB_String filename,int * num_global_vertices,int * num_global_elements,int * num_dimension,int * num_parts,int filename_length)365 ErrCode iMOAB_ReadHeaderInfo ( const iMOAB_String filename, int* num_global_vertices, int* num_global_elements, int* num_dimension, int* num_parts, int filename_length )
366 {
367 #ifdef MOAB_HAVE_HDF5
368     std::string filen ( filename );
369 
370     if ( filename_length < ( int ) strlen ( filename ) )
371     {
372         filen = filen.substr ( 0, filename_length );
373     }
374 
375     *num_global_vertices = 0;
376     int edges = 0;
377     int faces = 0;
378     int regions = 0;
379     *num_global_elements = 0;
380     *num_dimension = 0;
381     *num_parts = 0;
382 
383     mhdf_FileHandle file;
384     mhdf_Status status;
385     unsigned long max_id;
386     struct mhdf_FileDesc* data;
387 
388     file = mhdf_openFile ( filen.c_str(), 0, &max_id, -1, &status );
389 
390     if ( mhdf_isError ( &status ) )
391     {
392         fprintf ( stderr, "%s: %s\n", filename, mhdf_message ( &status ) );
393         return 1;
394     }
395 
396     data = mhdf_getFileSummary ( file, H5T_NATIVE_ULONG, &status, 1 ); // will use extra set info; will get parallel partition tag info too!
397 
398     if ( mhdf_isError ( &status ) )
399     {
400         fprintf ( stderr, "%s: %s\n", filename, mhdf_message ( &status ) );
401         return 1;
402     }
403 
404     *num_dimension = data->nodes.vals_per_ent;
405     *num_global_vertices = ( int ) data->nodes.count;
406 
407     for ( int i = 0; i < data->num_elem_desc; i++ )
408     {
409         struct mhdf_ElemDesc* el_desc = & ( data->elems[i] );
410         struct mhdf_EntDesc* ent_d = & ( el_desc->desc );
411 
412         if ( 0 == strcmp ( el_desc->type, mhdf_EDGE_TYPE_NAME ) ) { edges += ent_d->count; }
413 
414         if ( 0 == strcmp ( el_desc->type, mhdf_TRI_TYPE_NAME ) )  { faces += ent_d->count; }
415 
416         if ( 0 == strcmp ( el_desc->type, mhdf_QUAD_TYPE_NAME ) ) { faces += ent_d->count; }
417 
418         if ( 0 == strcmp ( el_desc->type, mhdf_POLYGON_TYPE_NAME ) ) { faces += ent_d->count; }
419 
420         if ( 0 == strcmp ( el_desc->type, mhdf_TET_TYPE_NAME ) ) { regions += ent_d->count; }
421 
422         if ( 0 == strcmp ( el_desc->type, mhdf_PYRAMID_TYPE_NAME ) ) { regions += ent_d->count; }
423 
424         if ( 0 == strcmp ( el_desc->type, mhdf_PRISM_TYPE_NAME ) ) { regions += ent_d->count; }
425 
426         if ( 0 == strcmp ( el_desc->type, mdhf_KNIFE_TYPE_NAME ) ) { regions += ent_d->count; }
427 
428         if ( 0 == strcmp ( el_desc->type, mdhf_HEX_TYPE_NAME ) ) { regions += ent_d->count; }
429 
430         if ( 0 == strcmp ( el_desc->type, mhdf_POLYHEDRON_TYPE_NAME ) ) { regions += ent_d->count; }
431 
432         if ( 0 == strcmp ( el_desc->type, mhdf_SEPTAHEDRON_TYPE_NAME ) ) { regions += ent_d->count; }
433     }
434 
435     *num_parts = data->numEntSets[0];
436 
437     // is this required?
438     if ( edges > 0 )
439     {
440         *num_dimension = 1; // I don't think it will ever return 1
441         *num_global_elements = edges;
442     }
443 
444     if ( faces > 0 )
445     {
446         *num_dimension = 2;
447         *num_global_elements = faces;
448     }
449 
450     if ( regions > 0 )
451     {
452         *num_dimension = 3;
453         *num_global_elements = regions;
454     }
455 
456     mhdf_closeFile ( file, &status );
457 
458     free ( data );
459 
460 #else
461     std::cout << filename << ": Please reconfigure with HDF5. Cannot retrieve header information for file formats other than a h5m file.\n";
462     *num_global_vertices = *num_global_elements = *num_dimension = *num_parts = 0;
463 #endif
464 
465     return 0;
466 }
467 
468 
iMOAB_LoadMesh(iMOAB_AppID pid,const iMOAB_String filename,const iMOAB_String read_options,int * num_ghost_layers,int filename_length,int read_options_length)469 ErrCode iMOAB_LoadMesh ( iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String read_options, int* num_ghost_layers, int filename_length, int read_options_length )
470 {
471     if ( ( int ) strlen ( filename ) > filename_length )
472     {
473         std::cout << " filename length issue\n";
474         return 1;
475     }
476 
477     if ( ( int ) strlen ( read_options ) > read_options_length )
478     {
479         std::cout << " read options length issue\n";
480         return 1;
481     }
482 
483     // make sure we use the file set and pcomm associated with the *pid
484     std::ostringstream newopts;
485     newopts  << read_options;
486 #ifdef MOAB_HAVE_MPI
487     int flagInit;
488     MPI_Initialized( &flagInit );
489 
490     if (flagInit) {
491         int nprocs;
492         MPI_Comm_size( MPI_COMM_WORLD, &nprocs );
493         if (nprocs > 1) {
494             std::string opts ( read_options );
495             std::string pcid ( "PARALLEL_COMM=" );
496             std::size_t found = opts.find ( pcid );
497 
498             if ( found != std::string::npos )
499             {
500                 std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
501                 return 1;
502             }
503 
504             // in serial, apply PARALLEL_COMM option only for h5m files; it does not work for .g files (used in test_remapping)
505             std::string filen(filename);
506             std::string::size_type idx = filen.rfind('.');
507 
508             if(idx != std::string::npos)
509             {
510               std::string extension = filen.substr(idx+1);
511               if (extension == std::string("h5m"))
512                   newopts << ";;PARALLEL_COMM=" << *pid;
513             }
514 
515 
516             if ( *num_ghost_layers >= 1 )
517             {
518               // if we want ghosts, we will want additional entities, the last .1
519               // because the addl ents can be edges, faces that are part of the neumann sets
520               std::string pcid2 ( "PARALLEL_GHOSTS=" );
521               std::size_t found2 = opts.find ( pcid2 );
522 
523               if ( found2 != std::string::npos )
524               {
525                   std::cout << " PARALLEL_GHOSTS option is already specified, ignore passed number of layers \n";
526               }
527               else
528               {
529                 // dimension of primary entities is 3 here, but it could be 2 for climate meshes; we would need to pass
530                 // PARALLEL_GHOSTS explicitly for 2d meshes, for example:  ";PARALLEL_GHOSTS=2.0.1"
531                 newopts << ";PARALLEL_GHOSTS=3.0." << *num_ghost_layers << ".3";
532               }
533             }
534         }
535     }
536 
537 #endif
538     ErrorCode rval = context.MBI->load_file ( filename, &context.appDatas[*pid].file_set, newopts.str().c_str() );CHKERRVAL(rval);
539 
540 #ifdef VERBOSE
541 
542     // some debugging stuff
543     std::ostringstream outfile;
544 #ifdef MOAB_HAVE_MPI
545     int rank = context.pcomms[*pid]->rank();
546     int nprocs = context.pcomms[*pid]->size();
547     outfile << "TaskMesh_n" << nprocs << "." << rank << ".h5m";
548 #else
549     outfile << "TaskMesh_n1.0.h5m";
550 #endif
551     // the mesh contains ghosts too, but they are not part of mat/neumann set
552     // write in serial the file, to see what tags are missing
553     rval = context.MBI->write_file ( outfile.str().c_str() );CHKERRVAL(rval); // everything on current task, written in serial
554 
555 #endif
556     int rc = iMOAB_UpdateMeshInfo ( pid );
557     return rc;
558 }
559 
560 
iMOAB_WriteMesh(iMOAB_AppID pid,iMOAB_String filename,iMOAB_String write_options,int filename_length,int write_options_length)561 ErrCode iMOAB_WriteMesh ( iMOAB_AppID pid, iMOAB_String filename, iMOAB_String write_options, int filename_length, int write_options_length )
562 {
563     // maybe do some processing of strings and lengths
564     if ( ( int ) strlen ( filename ) > filename_length )
565     {
566         std::cout << " file name length issue\n";
567         return 1;
568     }
569 
570     if ( ( int ) strlen ( write_options ) > write_options_length )
571     {
572         std::cout << " write options issue\n";
573         return 1;
574     }
575 
576     std::ostringstream newopts;
577 #ifdef MOAB_HAVE_MPI
578     std::string write_opts ( write_options );
579     std::string pcid ( "PARALLEL_COMM=" );
580     std::size_t found = write_opts.find ( pcid );
581 
582     if ( found != std::string::npos )
583     {
584         std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
585         return 1;
586     }
587 
588     // if write in parallel, add pc option, to be sure about which ParallelComm instance is used
589     std::string pw ( "PARALLEL=WRITE_PART" );
590     found = write_opts.find ( pw );
591 
592     if ( found != std::string::npos )
593     {
594         newopts << "PARALLEL_COMM=" << *pid << ";";
595     }
596 
597 #endif
598     newopts  << write_options;
599     ErrorCode rval = context.MBI->write_file ( filename, 0, newopts.str().c_str(),
600                                                 &context.appDatas[*pid].file_set, 1 );CHKERRVAL(rval);
601 
602     return 0;
603 }
604 
605 
iMOAB_UpdateMeshInfo(iMOAB_AppID pid)606 ErrCode iMOAB_UpdateMeshInfo ( iMOAB_AppID pid )
607 {
608     // this will include ghost elements info
609     //
610     appData& data = context.appDatas[*pid];
611     EntityHandle fileSet = data.file_set;
612     // first clear all data ranges; this can be called after ghosting
613     data.all_verts.clear();
614     data.primary_elems.clear();
615     data.local_verts.clear();
616     data.ghost_vertices.clear();
617     data.owned_elems.clear();
618     data.ghost_elems.clear();
619     data.mat_sets.clear();
620     data.neu_sets.clear();
621     data.diri_sets.clear();
622 
623     ErrorCode rval = context.MBI->get_entities_by_type ( fileSet, MBVERTEX, data.all_verts, true );CHKERRVAL(rval); // recursive
624 
625     rval = context.MBI->get_entities_by_dimension ( fileSet, 3, data.primary_elems, true );CHKERRVAL(rval); // recursive
626 
627     data.dimension = 3;
628 
629     if ( data.primary_elems.empty() )
630     {
631         context.appDatas[*pid].dimension = 2;
632         rval = context.MBI->get_entities_by_dimension ( fileSet, 2, data.primary_elems, true );CHKERRVAL(rval); // recursive
633 
634         if ( data.primary_elems.empty() )
635         {
636             context.appDatas[*pid].dimension = 1;
637             rval = context.MBI->get_entities_by_dimension ( fileSet, 1, data.primary_elems, true );CHKERRVAL(rval); // recursive
638 
639             if ( data.primary_elems.empty() )
640             { return 1; } // no elements of dimension 1 or 2 or 3
641         }
642     }
643 
644 #ifdef MOAB_HAVE_MPI
645     int flagInit;
646     MPI_Initialized( &flagInit );
647 
648     if (flagInit) {
649         ParallelComm* pco = context.pcomms[*pid];
650 
651         // filter ghost vertices, from local
652         rval = pco -> filter_pstatus ( data.all_verts, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.local_verts );CHKERRVAL(rval);
653 
654         data.ghost_vertices = subtract ( data.all_verts, data.local_verts );
655 
656         // get all blocks, BCs, etc
657 
658         // filter ghost elements, from local
659         rval = pco -> filter_pstatus ( data.primary_elems, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.owned_elems );CHKERRVAL(rval);
660 
661         data.ghost_elems = subtract ( data.primary_elems, data.owned_elems );
662     }
663     else {
664         data.local_verts = data.all_verts;
665         data.owned_elems = data.primary_elems;
666     }
667 
668 #else
669 
670     data.local_verts = data.all_verts;
671     data.owned_elems = data.primary_elems;
672 
673 #endif
674     rval = context.MBI->get_entities_by_type_and_tag ( fileSet, MBENTITYSET,
675                                                         & ( context.material_tag ), 0, 1,
676                                                         data.mat_sets, Interface::UNION );CHKERRVAL(rval);
677 
678     rval = context.MBI->get_entities_by_type_and_tag ( fileSet, MBENTITYSET,
679                                                         & ( context.neumann_tag ), 0, 1,
680                                                         data.neu_sets, Interface::UNION );CHKERRVAL(rval);
681 
682     rval = context.MBI->get_entities_by_type_and_tag ( fileSet, MBENTITYSET,
683                                                         & ( context.dirichlet_tag ), 0, 1,
684                                                         data.diri_sets, Interface::UNION );CHKERRVAL(rval);
685 
686     return 0;
687 }
688 
689 
iMOAB_GetMeshInfo(iMOAB_AppID pid,int * num_visible_vertices,int * num_visible_elements,int * num_visible_blocks,int * num_visible_surfaceBC,int * num_visible_vertexBC)690 ErrCode iMOAB_GetMeshInfo ( iMOAB_AppID pid, int* num_visible_vertices, int* num_visible_elements, int* num_visible_blocks, int* num_visible_surfaceBC, int* num_visible_vertexBC )
691 {
692 	ErrorCode rval;
693     // this will include ghost elements
694     appData& data = context.appDatas[*pid];
695     EntityHandle fileSet = data.file_set;
696     // first clear all data ranges; this can be called after ghosting
697 
698 	if (num_visible_elements) {
699 		num_visible_elements[2] = ( int ) data.primary_elems.size();
700 		// separate ghost and local/owned primary elements
701 		num_visible_elements[0] = ( int ) data.owned_elems.size();
702 		num_visible_elements[1] = ( int ) data.ghost_elems.size();
703     }
704     if (num_visible_vertices) {
705 		num_visible_vertices[2] = ( int ) data.all_verts.size();
706 		num_visible_vertices[1] = ( int ) data.ghost_vertices.size();
707 		num_visible_vertices[0] =  num_visible_vertices[2] - num_visible_vertices[1]; // local are those that are not ghosts
708 	}
709 
710 	if (num_visible_blocks) {
711 		rval = context.MBI->get_entities_by_type_and_tag ( fileSet, MBENTITYSET,
712                                                             & ( context.material_tag ), 0, 1,
713                                                             data.mat_sets, Interface::UNION );CHKERRVAL(rval);
714 
715 		num_visible_blocks[2] = data.mat_sets.size();
716 		num_visible_blocks[0] = num_visible_blocks[2];
717 		num_visible_blocks[1] = 0;
718     }
719 
720 	if (num_visible_surfaceBC) {
721 		rval = context.MBI->get_entities_by_type_and_tag ( fileSet, MBENTITYSET,
722                                                             & ( context.neumann_tag ), 0, 1,
723                                                             data.neu_sets, Interface::UNION );CHKERRVAL(rval);
724 
725 		num_visible_surfaceBC[2] = 0;
726 		// count how many faces are in each neu set, and how many regions are
727 		// adjacent to them;
728 		int numNeuSets = ( int ) data.neu_sets.size();
729 
730 		for ( int i = 0; i < numNeuSets; i++ )
731 		{
732 			Range subents;
733 			EntityHandle nset = data.neu_sets[i];
734 			rval = context.MBI->get_entities_by_dimension ( nset, data.dimension - 1, subents );CHKERRVAL(rval);
735 
736 			for ( Range::iterator it = subents.begin(); it != subents.end(); ++it )
737 			{
738 				EntityHandle subent = *it;
739 				Range adjPrimaryEnts;
740 				rval = context.MBI->get_adjacencies ( &subent, 1, data.dimension, false, adjPrimaryEnts );CHKERRVAL(rval);
741 
742 				num_visible_surfaceBC[2] += ( int ) adjPrimaryEnts.size();
743 			}
744 		}
745 
746 		num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
747 		num_visible_surfaceBC[1] = 0; //
748     }
749 
750 	if (num_visible_vertexBC) {
751 		rval = context.MBI->get_entities_by_type_and_tag ( fileSet, MBENTITYSET,
752                                                             & ( context.dirichlet_tag ), 0, 1,
753                                                             data.diri_sets, Interface::UNION );CHKERRVAL(rval);
754 
755 		num_visible_vertexBC[2] = 0;
756 		int numDiriSets = ( int ) data.diri_sets.size();
757 
758 		for ( int i = 0; i < numDiriSets; i++ )
759 		{
760 			Range verts;
761 			EntityHandle diset = data.diri_sets[i];
762 			rval = context.MBI->get_entities_by_dimension ( diset, 0, verts );CHKERRVAL(rval);
763 
764 			num_visible_vertexBC[2] += ( int ) verts.size();
765 		}
766 
767 		num_visible_vertexBC[0] = num_visible_vertexBC[2];
768 		num_visible_vertexBC[1] = 0;
769     }
770 
771     return 0;
772 }
773 
iMOAB_GetVertexID(iMOAB_AppID pid,int * vertices_length,iMOAB_GlobalID * global_vertex_ID)774 ErrCode iMOAB_GetVertexID ( iMOAB_AppID pid, int* vertices_length, iMOAB_GlobalID* global_vertex_ID )
775 {
776     //
777     Range& verts = context.appDatas[*pid].all_verts;
778 
779     if ( ( int ) verts.size() != *vertices_length )
780     { return 1; } // problem with array length
781 
782     // global id tag is context.globalID_tag
783     ErrorCode rval = context.MBI->tag_get_data ( context.globalID_tag, verts, global_vertex_ID );CHKERRVAL(rval);
784 
785     return 0;
786 }
787 
iMOAB_GetVertexOwnership(iMOAB_AppID pid,int * vertices_length,int * visible_global_rank_ID)788 ErrCode iMOAB_GetVertexOwnership ( iMOAB_AppID pid, int* vertices_length, int* visible_global_rank_ID )
789 {
790     Range& verts = context.appDatas[*pid].all_verts;
791     int i = 0;
792 #ifdef MOAB_HAVE_MPI
793     ParallelComm* pco = context.pcomms[*pid];
794 
795     for ( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
796     {
797         ErrorCode rval = pco->get_owner ( *vit, visible_global_rank_ID[i] );CHKERRVAL(rval);
798     }
799 
800     if ( i != *vertices_length )
801     { return 1; } // warning array allocation problem
802 
803 #else
804 
805     /* everything owned by proc 0 */
806     if ( ( int ) verts.size() != *vertices_length )
807     { return 1; } // warning array allocation problem
808 
809     for ( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
810     { visible_global_rank_ID[i] = 0; } // all vertices are owned by processor 0, as this is serial run
811 
812 #endif
813     return 0;
814 }
815 
iMOAB_GetVisibleVerticesCoordinates(iMOAB_AppID pid,int * coords_length,double * coordinates)816 ErrCode iMOAB_GetVisibleVerticesCoordinates ( iMOAB_AppID pid, int* coords_length, double* coordinates )
817 {
818     Range& verts = context.appDatas[*pid].all_verts;
819 
820     // interleaved coordinates, so that means deep copy anyway
821     if ( *coords_length != 3 * ( int ) verts.size() )
822     { return 1; }
823 
824     ErrorCode rval = context.MBI->get_coords ( verts, coordinates );CHKERRVAL(rval);
825 
826     return 0;
827 }
828 
iMOAB_GetBlockID(iMOAB_AppID pid,int * block_length,iMOAB_GlobalID * global_block_IDs)829 ErrCode iMOAB_GetBlockID ( iMOAB_AppID pid, int* block_length, iMOAB_GlobalID* global_block_IDs )
830 {
831     // local id blocks? they are counted from 0 to number of visible blocks ...
832     // will actually return material set tag value for global
833     Range& matSets = context.appDatas[*pid].mat_sets;
834 
835     if ( *block_length != ( int ) matSets.size() )
836     { return 1; }
837 
838     // return material set tag gtags[0 is material set tag
839     ErrorCode rval = context.MBI->tag_get_data ( context.material_tag, matSets, global_block_IDs );CHKERRVAL(rval);
840 
841     // populate map with index
842     std::map <int, int>& matIdx = context.appDatas[*pid].matIndex;
843     for ( unsigned i = 0; i < matSets.size(); i++ )
844     {
845         matIdx[global_block_IDs[i]] = i;
846     }
847 
848     return 0;
849 }
850 
iMOAB_GetBlockInfo(iMOAB_AppID pid,iMOAB_GlobalID * global_block_ID,int * vertices_per_element,int * num_elements_in_block)851 ErrCode iMOAB_GetBlockInfo ( iMOAB_AppID pid, iMOAB_GlobalID* global_block_ID,
852                              int* vertices_per_element, int* num_elements_in_block )
853 {
854     std::map<int, int>& matMap = context.appDatas[*pid].matIndex;
855     std::map<int, int>::iterator it = matMap.find ( *global_block_ID );
856 
857     if ( it == matMap.end() )
858     { return 1; } // error in finding block with id
859 
860     int blockIndex = matMap[*global_block_ID];
861     EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
862     Range blo_elems;
863     ErrorCode rval = context.MBI-> get_entities_by_handle ( matMeshSet, blo_elems );
864 
865     if ( MB_SUCCESS != rval ||  blo_elems.empty() )
866     { return 1; }
867 
868     EntityType type = context.MBI->type_from_handle ( blo_elems[0] );
869 
870     if ( !blo_elems.all_of_type ( type ) )
871     { return 1; } //not all of same  type
872 
873     const EntityHandle* conn = NULL;
874     int num_verts = 0;
875     rval = context.MBI->get_connectivity ( blo_elems[0], conn, num_verts );
876 	CHKERRVAL(rval);
877 
878     *vertices_per_element = num_verts;
879     *num_elements_in_block = ( int ) blo_elems.size();
880 
881     return 0;
882 }
883 
iMOAB_GetVisibleElementsInfo(iMOAB_AppID pid,int * num_visible_elements,iMOAB_GlobalID * element_global_IDs,int * ranks,iMOAB_GlobalID * block_IDs)884 ErrCode iMOAB_GetVisibleElementsInfo ( iMOAB_AppID pid, int* num_visible_elements,
885                                        iMOAB_GlobalID* element_global_IDs, int* ranks, iMOAB_GlobalID* block_IDs )
886 {
887     appData& data =  context.appDatas[*pid];
888 #ifdef MOAB_HAVE_MPI
889     ParallelComm* pco = context.pcomms[*pid];
890 #endif
891 
892     ErrorCode rval = context.MBI-> tag_get_data ( context.globalID_tag,
893                                                     data.primary_elems,
894                                                     element_global_IDs );CHKERRVAL(rval);
895 
896     int i = 0;
897 
898     for ( Range::iterator eit = data.primary_elems.begin(); eit != data.primary_elems.end(); ++eit, ++i )
899     {
900 #ifdef MOAB_HAVE_MPI
901         rval = pco->get_owner ( *eit, ranks[i] );CHKERRVAL(rval);
902 
903 #else
904         /* everything owned by task 0 */
905         ranks[i] = 0;
906 #endif
907     }
908 
909     for ( Range::iterator mit = data.mat_sets.begin(); mit != data.mat_sets.end(); ++mit )
910     {
911         EntityHandle matMeshSet = *mit;
912         Range elems;
913         rval = context.MBI-> get_entities_by_handle ( matMeshSet, elems );CHKERRVAL(rval);
914 
915         int valMatTag;
916         rval = context.MBI->tag_get_data ( context.material_tag, &matMeshSet, 1, &valMatTag );
917         CHKERRVAL(rval);
918 
919         for ( Range::iterator eit = elems.begin(); eit != elems.end(); ++eit )
920         {
921             EntityHandle eh = *eit;
922             int index = data.primary_elems.index ( eh );
923 
924             if ( -1 == index )
925             { return 1; }
926 
927             if ( -1 >= *num_visible_elements )
928             { return 1; }
929 
930             block_IDs[index] = valMatTag;
931         }
932     }
933 
934     return 0;
935 }
936 
iMOAB_GetBlockElementConnectivities(iMOAB_AppID pid,iMOAB_GlobalID * global_block_ID,int * connectivity_length,int * element_connectivity)937 ErrCode iMOAB_GetBlockElementConnectivities ( iMOAB_AppID pid, iMOAB_GlobalID* global_block_ID, int* connectivity_length, int* element_connectivity )
938 {
939     appData& data =  context.appDatas[*pid];
940     std::map<int, int>& matMap = data.matIndex;
941     std::map<int, int>::iterator it = matMap.find ( *global_block_ID );
942 
943     if ( it == matMap.end() )
944     { return 1; } // error in finding block with id
945 
946     int blockIndex = matMap[*global_block_ID];
947     EntityHandle matMeshSet = data.mat_sets[blockIndex];
948     std::vector<EntityHandle> elems;
949 
950     ErrorCode rval = context.MBI-> get_entities_by_handle ( matMeshSet, elems );CHKERRVAL(rval);
951 
952     if ( elems.empty() )
953     { return 1; }
954 
955     std::vector<EntityHandle> vconnect;
956     rval = context.MBI->get_connectivity ( &elems[0], elems.size(), vconnect );CHKERRVAL(rval);
957 
958     if ( *connectivity_length != ( int ) vconnect.size() )
959     { return 1; } // mismatched sizes
960 
961     for ( int i = 0; i < *connectivity_length; i++ )
962     {
963         int inx = data.all_verts.index ( vconnect[i] );
964 
965         if ( -1 == inx )
966         { return 1; } // error, vertex not in local range
967 
968         element_connectivity[i] = inx;
969     }
970 
971     return 0;
972 }
973 
iMOAB_GetElementConnectivity(iMOAB_AppID pid,iMOAB_LocalID * elem_index,int * connectivity_length,int * element_connectivity)974 ErrCode iMOAB_GetElementConnectivity ( iMOAB_AppID pid, iMOAB_LocalID* elem_index, int* connectivity_length, int* element_connectivity )
975 {
976     appData& data =  context.appDatas[*pid];
977     assert ( ( *elem_index >= 0 )  && ( *elem_index < ( int ) data.primary_elems.size() ) );
978 
979     int num_nodes;
980     const EntityHandle* conn;
981 
982     EntityHandle eh = data.primary_elems[*elem_index];
983 
984     ErrorCode rval = context.MBI->get_connectivity ( eh, conn, num_nodes );CHKERRVAL(rval);
985 
986     if ( * connectivity_length < num_nodes )
987     { return 1; } // wrong number of vertices
988 
989     for ( int i = 0; i < num_nodes; i++ )
990     {
991         int index = data.all_verts.index ( conn[i] );
992 
993         if ( -1 == index )
994         { return 1; }
995 
996         element_connectivity[i] = index;
997     }
998 
999     *connectivity_length = num_nodes;
1000 
1001     return 0;
1002 }
1003 
iMOAB_GetElementOwnership(iMOAB_AppID pid,iMOAB_GlobalID * global_block_ID,int * num_elements_in_block,int * element_ownership)1004 ErrCode iMOAB_GetElementOwnership ( iMOAB_AppID pid, iMOAB_GlobalID* global_block_ID, int* num_elements_in_block, int* element_ownership )
1005 {
1006     std::map<int, int>& matMap = context.appDatas[*pid].matIndex;
1007 
1008     std::map<int, int>::iterator it = matMap.find ( *global_block_ID );
1009 
1010     if ( it == matMap.end() )
1011     { return 1; } // error in finding block with id
1012 
1013     int blockIndex = matMap[*global_block_ID];
1014     EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
1015     Range elems;
1016 
1017     ErrorCode rval = context.MBI-> get_entities_by_handle ( matMeshSet, elems );CHKERRVAL(rval);
1018 
1019     if ( elems.empty() )
1020     { return 1; }
1021 
1022     if ( *num_elements_in_block != ( int ) elems.size() )
1023     { return 1; } // bad memory allocation
1024 
1025     int i = 0;
1026 #ifdef MOAB_HAVE_MPI
1027     ParallelComm* pco = context.pcomms[*pid];
1028 #endif
1029 
1030     for ( Range::iterator vit = elems.begin(); vit != elems.end(); vit++, i++ )
1031     {
1032 #ifdef MOAB_HAVE_MPI
1033         rval = pco->  get_owner ( *vit, element_ownership[i] );CHKERRVAL(rval);
1034 #else
1035         element_ownership[i] = 0; /* owned by 0 */
1036 #endif
1037     }
1038 
1039     return 0;
1040 }
1041 
iMOAB_GetElementID(iMOAB_AppID pid,iMOAB_GlobalID * global_block_ID,int * num_elements_in_block,iMOAB_GlobalID * global_element_ID,iMOAB_LocalID * local_element_ID)1042 ErrCode iMOAB_GetElementID ( iMOAB_AppID pid, iMOAB_GlobalID* global_block_ID, int* num_elements_in_block, iMOAB_GlobalID* global_element_ID, iMOAB_LocalID* local_element_ID )
1043 {
1044     appData& data = context.appDatas[*pid];
1045     std::map<int, int>& matMap = data.matIndex;
1046 
1047     std::map<int, int>::iterator it = matMap.find ( *global_block_ID );
1048 
1049     if ( it == matMap.end() )
1050     { return 1; } // error in finding block with id
1051 
1052     int blockIndex = matMap[*global_block_ID];
1053     EntityHandle matMeshSet = data.mat_sets[blockIndex];
1054     Range elems;
1055     ErrorCode rval = context.MBI-> get_entities_by_handle ( matMeshSet, elems );CHKERRVAL(rval);
1056 
1057     if ( elems.empty() )
1058     { return 1; }
1059 
1060     if ( *num_elements_in_block != ( int ) elems.size() )
1061     { return 1; } // bad memory allocation
1062 
1063     rval = context.MBI->tag_get_data ( context.globalID_tag, elems, global_element_ID );CHKERRVAL(rval);
1064 
1065     // check that elems are among primary_elems in data
1066     for ( int i = 0; i < *num_elements_in_block; i++ )
1067     {
1068         local_element_ID[i] = data.primary_elems.index ( elems[i] );
1069 
1070         if ( -1 == local_element_ID[i] )
1071         { return 1; }// error, not in local primary elements
1072     }
1073 
1074     return 0;
1075 }
1076 
iMOAB_GetPointerToSurfaceBC(iMOAB_AppID pid,int * surface_BC_length,iMOAB_LocalID * local_element_ID,int * reference_surface_ID,int * boundary_condition_value)1077 ErrCode iMOAB_GetPointerToSurfaceBC ( iMOAB_AppID pid, int* surface_BC_length, iMOAB_LocalID* local_element_ID, int* reference_surface_ID, int* boundary_condition_value )
1078 {
1079     // we have to fill bc data for neumann sets;/
1080     ErrorCode rval;
1081 
1082     // it was counted above, in GetMeshInfo
1083     appData& data = context.appDatas[*pid];
1084     int numNeuSets = ( int ) data.neu_sets.size();
1085 
1086     int index = 0; // index [0, surface_BC_length) for the arrays returned
1087 
1088     for ( int i = 0; i < numNeuSets; i++ )
1089     {
1090         Range subents;
1091         EntityHandle nset = data.neu_sets[i];
1092         rval = context.MBI->get_entities_by_dimension ( nset, data.dimension - 1, subents );CHKERRVAL(rval);
1093 
1094         int neuVal ;
1095         rval = context.MBI->tag_get_data ( context.neumann_tag, &nset, 1, &neuVal );CHKERRVAL(rval);
1096 
1097         for ( Range::iterator it = subents.begin(); it != subents.end(); ++it )
1098         {
1099             EntityHandle subent = *it;
1100             Range adjPrimaryEnts;
1101             rval = context.MBI->get_adjacencies ( &subent, 1, data.dimension, false, adjPrimaryEnts );CHKERRVAL(rval);
1102 
1103             // get global id of the primary ents, and side number of the quad/subentity
1104             // this is moab ordering
1105             for ( Range::iterator pit = adjPrimaryEnts.begin(); pit != adjPrimaryEnts.end(); pit++ )
1106             {
1107                 EntityHandle primaryEnt = *pit;
1108                 // get global id
1109                 /*int globalID;
1110                 rval = context.MBI->tag_get_data(gtags[3], &primaryEnt, 1, &globalID);
1111                 if (MB_SUCCESS!=rval)
1112                   return 1;
1113                 global_element_ID[index] = globalID;*/
1114 
1115                 // get local element id
1116                 local_element_ID[index] = data.primary_elems.index ( primaryEnt );
1117 
1118                 if ( -1 == local_element_ID[index] )
1119                 { return 1; } // did not find the element locally
1120 
1121                 int side_number, sense, offset;
1122                 rval = context.MBI->side_number ( primaryEnt, subent,  side_number, sense, offset );
1123                 CHKERRVAL(rval);
1124 
1125                 reference_surface_ID[index] = side_number + 1; // moab is from 0 to 5, it needs 1 to 6
1126                 boundary_condition_value[index] = neuVal;
1127                 index++;
1128             }
1129         }
1130     }
1131 
1132     if ( index != *surface_BC_length )
1133     { return 1; } // error in array allocations
1134 
1135     return 0;
1136 }
1137 
iMOAB_GetPointerToVertexBC(iMOAB_AppID pid,int * vertex_BC_length,iMOAB_LocalID * local_vertex_ID,int * boundary_condition_value)1138 ErrCode iMOAB_GetPointerToVertexBC ( iMOAB_AppID pid, int* vertex_BC_length,
1139                                      iMOAB_LocalID* local_vertex_ID, int* boundary_condition_value )
1140 {
1141     ErrorCode rval;
1142 
1143     // it was counted above, in GetMeshInfo
1144     appData& data = context.appDatas[*pid];
1145     int numDiriSets = ( int ) data.diri_sets.size();
1146     int index = 0; // index [0, *vertex_BC_length) for the arrays returned
1147 
1148     for ( int i = 0; i < numDiriSets; i++ )
1149     {
1150         Range verts;
1151         EntityHandle diset = data.diri_sets[i];
1152         rval = context.MBI->get_entities_by_dimension ( diset, 0, verts );CHKERRVAL(rval);
1153 
1154         int diriVal;
1155         rval = context.MBI->tag_get_data ( context.dirichlet_tag, &diset, 1, &diriVal );CHKERRVAL(rval);
1156 
1157         for ( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
1158         {
1159             EntityHandle vt = *vit;
1160             /*int vgid;
1161             rval = context.MBI->tag_get_data(gtags[3], &vt, 1, &vgid);
1162             if (MB_SUCCESS!=rval)
1163               return 1;
1164             global_vertext_ID[index] = vgid;*/
1165             local_vertex_ID[index] = data.all_verts.index ( vt );
1166 
1167             if ( -1 == local_vertex_ID[index] )
1168             { return 1; } // vertex was not found
1169 
1170             boundary_condition_value[index] = diriVal;
1171             index++;
1172         }
1173     }
1174 
1175     if ( *vertex_BC_length != index )
1176     { return 1; } // array allocation issue
1177 
1178     return 0;
1179 }
1180 
iMOAB_DefineTagStorage(iMOAB_AppID pid,const iMOAB_String tag_storage_name,int * tag_type,int * components_per_entity,int * tag_index,int tag_storage_name_length)1181 ErrCode iMOAB_DefineTagStorage ( iMOAB_AppID pid, const iMOAB_String tag_storage_name, int* tag_type, int* components_per_entity, int* tag_index,  int tag_storage_name_length )
1182 {
1183     // see if the tag is already existing, and if yes, check the type, length
1184     if ( *tag_type < 0 || *tag_type > 5 )
1185     { return 1; } // we have 6 types of tags supported so far
1186 
1187     DataType tagDataType;
1188     TagType tagType;
1189     void* defaultVal = NULL;
1190     int* defInt = new int [*components_per_entity];
1191     double* defDouble = new double [*components_per_entity];
1192     EntityHandle* defHandle = new EntityHandle[*components_per_entity];
1193 
1194     for ( int i = 0; i < *components_per_entity; i++ )
1195     {
1196         defInt[i] = 0;
1197         defDouble[i] = -1e+10;
1198         defHandle[i] = ( EntityHandle ) 0;
1199     }
1200 
1201     switch ( *tag_type )
1202     {
1203         case 0: tagDataType = MB_TYPE_INTEGER; tagType = MB_TAG_DENSE; defaultVal = defInt; break;
1204 
1205         case 1: tagDataType = MB_TYPE_DOUBLE;  tagType = MB_TAG_DENSE; defaultVal = defDouble; break;
1206 
1207         case 2: tagDataType = MB_TYPE_HANDLE;  tagType = MB_TAG_DENSE; defaultVal = defHandle; break;
1208 
1209         case 3: tagDataType = MB_TYPE_INTEGER; tagType = MB_TAG_SPARSE; defaultVal = defInt; break;
1210 
1211         case 4: tagDataType = MB_TYPE_DOUBLE;  tagType = MB_TAG_SPARSE; defaultVal = defDouble; break;
1212 
1213         case 5: tagDataType = MB_TYPE_HANDLE;  tagType = MB_TAG_SPARSE; defaultVal = defHandle; break;
1214 
1215         default :
1216         {
1217             delete [] defInt;
1218             delete [] defDouble;
1219             delete [] defHandle;
1220             return 1;
1221         } // error
1222     }
1223 
1224     std::string tag_name ( tag_storage_name );
1225 
1226     if ( tag_storage_name_length < ( int ) strlen ( tag_storage_name ) )
1227     {
1228         tag_name = tag_name.substr ( 0, tag_storage_name_length );
1229     }
1230 
1231     Tag tagHandle;
1232     ErrorCode rval = context.MBI->tag_get_handle ( tag_name.c_str(), *components_per_entity,
1233                      tagDataType,
1234                      tagHandle, tagType, defaultVal );
1235 
1236     if ( MB_TAG_NOT_FOUND == rval )
1237     {
1238 		rval = context.MBI->tag_get_handle ( tag_name.c_str(), *components_per_entity,
1239 						 tagDataType,
1240 						 tagHandle, tagType|MB_TAG_CREAT, defaultVal );
1241     }
1242 
1243     // we don't need default values anymore, avoid leaks
1244     delete [] defInt;
1245     delete [] defDouble;
1246     delete [] defHandle;
1247 
1248     appData& data = context.appDatas[*pid];
1249 
1250     if ( MB_ALREADY_ALLOCATED == rval )
1251     {
1252         std::map<std::string, Tag>& mTags = data.tagMap;
1253         std::map<std::string, Tag>::iterator mit = mTags.find ( tag_name );
1254 
1255         if ( mit == mTags.end() )
1256         {
1257             // add it to the map
1258             mTags[tag_name] = tagHandle;
1259             // push it to the list of tags, too
1260             *tag_index = ( int ) data.tagList.size();
1261             data.tagList.push_back ( tagHandle ) ;
1262         }
1263 
1264         return 0; // OK, we found it, and we have it stored in the map tag
1265     }
1266     else if ( MB_SUCCESS == rval )
1267     {
1268         data.tagMap[tag_name] = tagHandle;
1269         *tag_index = ( int ) data.tagList.size();
1270         data.tagList.push_back ( tagHandle ) ;
1271         return 0;
1272     }
1273     else
1274 	    return 1; // some error, maybe the tag was not created
1275 }
1276 
iMOAB_SetIntTagStorage(iMOAB_AppID pid,const iMOAB_String tag_storage_name,int * num_tag_storage_length,int * ent_type,int * tag_storage_data,int tag_storage_name_length)1277 ErrCode iMOAB_SetIntTagStorage ( iMOAB_AppID pid, const iMOAB_String tag_storage_name,
1278                                  int* num_tag_storage_length, int* ent_type, int* tag_storage_data,
1279                                  int tag_storage_name_length )
1280 {
1281     std::string tag_name ( tag_storage_name );
1282 
1283     if ( tag_storage_name_length < ( int ) strlen ( tag_storage_name ) )
1284     {
1285         tag_name = tag_name.substr ( 0, tag_storage_name_length );
1286     }
1287 
1288     appData& data = context.appDatas[*pid];
1289 
1290     if ( data.tagMap.find ( tag_name ) == data.tagMap.end() )
1291     { return 1; } // tag not defined
1292 
1293     Tag tag =  data.tagMap[tag_name];
1294 
1295     int tagLength = 0;
1296     ErrorCode rval = context.MBI->tag_get_length ( tag, tagLength );
1297     CHKERRVAL(rval);
1298 
1299     DataType  dtype;
1300     rval = context.MBI->tag_get_data_type ( tag, dtype );
1301 
1302     if ( MB_SUCCESS != rval || dtype != MB_TYPE_INTEGER )
1303     { return 1; }
1304 
1305     // set it on a subset of entities, based on type and length
1306     Range* ents_to_set;
1307 
1308     if ( *ent_type == 0 ) // vertices
1309     { ents_to_set = &data.all_verts; }
1310     else  // if (*ent_type == 1) // *ent_type can be 0 (vertices) or 1 (elements)
1311     { ents_to_set = &data.primary_elems; }
1312 
1313     int nents_to_be_set = *num_tag_storage_length / tagLength;
1314 
1315     if ( nents_to_be_set > ( int ) ents_to_set->size() || nents_to_be_set < 1 )
1316     { return 1; } // to many entities to be set or too few
1317 
1318     // restrict the range; everything is contiguous; or not?
1319 
1320     Range contig_range ( * ( ents_to_set->begin() ), * ( ents_to_set->begin() + nents_to_be_set - 1 ) );
1321     rval = context.MBI->tag_set_data ( tag, contig_range, tag_storage_data );CHKERRVAL(rval);
1322 
1323     return 0; // no error
1324 }
1325 
iMOAB_GetIntTagStorage(iMOAB_AppID pid,const iMOAB_String tag_storage_name,int * num_tag_storage_length,int * ent_type,int * tag_storage_data,int tag_storage_name_length)1326 ErrCode iMOAB_GetIntTagStorage ( iMOAB_AppID pid, const iMOAB_String tag_storage_name, int* num_tag_storage_length, int* ent_type, int* tag_storage_data, int tag_storage_name_length )
1327 {
1328     ErrorCode rval;
1329     std::string tag_name ( tag_storage_name );
1330 
1331     if ( tag_storage_name_length < ( int ) tag_name.length() )
1332     {
1333         tag_name = tag_name.substr ( 0, tag_storage_name_length );
1334     }
1335 
1336     appData& data = context.appDatas[*pid];
1337 
1338     if ( data.tagMap.find ( tag_name ) == data.tagMap.end() )
1339     { return 1; } // tag not defined
1340 
1341     Tag tag =  data.tagMap[tag_name];
1342 
1343     int tagLength = 0;
1344     rval = context.MBI->tag_get_length ( tag, tagLength );CHKERRVAL(rval);
1345 
1346     DataType  dtype;
1347     rval = context.MBI->tag_get_data_type ( tag, dtype );CHKERRVAL(rval);
1348 
1349     if ( dtype != MB_TYPE_INTEGER )
1350     { return 1; }
1351 
1352     // set it on a subset of entities, based on type and length
1353     Range* ents_to_get;
1354 
1355     if ( *ent_type == 0 ) // vertices
1356     { ents_to_get = &data.all_verts; }
1357     else  // if (*ent_type == 1)
1358     { ents_to_get = &data.primary_elems; }
1359 
1360     int nents_to_get = *num_tag_storage_length / tagLength;
1361 
1362     if ( nents_to_get > ( int ) ents_to_get->size() || nents_to_get < 1 )
1363     { return 1; } // to many entities to get, or too little
1364 
1365     // restrict the range; everything is contiguous; or not?
1366     Range contig_range ( * ( ents_to_get->begin() ), * ( ents_to_get->begin() + nents_to_get - 1 ) );
1367 
1368     rval = context.MBI->tag_get_data ( tag, contig_range, tag_storage_data );CHKERRVAL(rval);
1369 
1370     return 0; // no error
1371 }
1372 
iMOAB_SetDoubleTagStorage(iMOAB_AppID pid,const iMOAB_String tag_storage_name,int * num_tag_storage_length,int * ent_type,double * tag_storage_data,int tag_storage_name_length)1373 ErrCode iMOAB_SetDoubleTagStorage ( iMOAB_AppID pid, const iMOAB_String tag_storage_name, int* num_tag_storage_length, int* ent_type, double* tag_storage_data, int tag_storage_name_length )
1374 {
1375     ErrorCode rval;
1376     // exactly the same code as for int tag :) maybe should check the type of tag too
1377     std::string tag_name ( tag_storage_name );
1378 
1379     if ( tag_storage_name_length < ( int ) tag_name.length() )
1380     {
1381         tag_name = tag_name.substr ( 0, tag_storage_name_length );
1382     }
1383 
1384     appData& data = context.appDatas[*pid];
1385 
1386     if ( data.tagMap.find ( tag_name ) == data.tagMap.end() )
1387     { return 1; } // tag not defined
1388 
1389     Tag tag =  data.tagMap[tag_name];
1390 
1391     int tagLength = 0;
1392     rval = context.MBI->tag_get_length ( tag, tagLength );CHKERRVAL(rval);
1393 
1394     DataType  dtype;
1395     rval = context.MBI->tag_get_data_type ( tag, dtype );
1396 
1397     if ( MB_SUCCESS != rval || dtype != MB_TYPE_DOUBLE )
1398     { return 1; }
1399 
1400     // set it on a subset of entities, based on type and length
1401     Range* ents_to_set = NULL;
1402 
1403     if ( * ent_type == 0 ) // vertices
1404     { ents_to_set = &data.all_verts; }
1405     else if ( * ent_type == 1 )
1406     { ents_to_set = &data.primary_elems; }
1407 
1408     int nents_to_be_set = *num_tag_storage_length / tagLength;
1409 
1410     if ( nents_to_be_set > ( int ) ents_to_set->size() || nents_to_be_set < 1 )
1411     { return 1; } // to many entities to be set
1412 
1413     // restrict the range; everything is contiguous; or not?
1414     Range contig_range ( * ( ents_to_set->begin() ), * ( ents_to_set->begin() + nents_to_be_set - 1 ) );
1415 
1416     rval = context.MBI->tag_set_data ( tag, contig_range, tag_storage_data );CHKERRVAL(rval);
1417 
1418     return 0; // no error
1419 }
1420 
iMOAB_GetDoubleTagStorage(iMOAB_AppID pid,const iMOAB_String tag_storage_name,int * num_tag_storage_length,int * ent_type,double * tag_storage_data,int tag_storage_name_length)1421 ErrCode iMOAB_GetDoubleTagStorage ( iMOAB_AppID pid, const iMOAB_String tag_storage_name, int* num_tag_storage_length, int* ent_type, double* tag_storage_data, int tag_storage_name_length )
1422 {
1423     ErrorCode rval;
1424     // exactly the same code, except tag type check
1425     std::string tag_name ( tag_storage_name );
1426 
1427     if ( tag_storage_name_length < ( int ) tag_name.length() )
1428     {
1429         tag_name = tag_name.substr ( 0, tag_storage_name_length );
1430     }
1431 
1432     appData& data = context.appDatas[*pid];
1433 
1434     if ( data.tagMap.find ( tag_name ) == data.tagMap.end() )
1435     { return 1; } // tag not defined
1436 
1437     Tag tag =  data.tagMap[tag_name];
1438 
1439     int tagLength = 0;
1440     rval = context.MBI->tag_get_length ( tag, tagLength );CHKERRVAL(rval);
1441 
1442     DataType  dtype;
1443     rval = context.MBI->tag_get_data_type ( tag, dtype );CHKERRVAL(rval);
1444 
1445     if ( dtype != MB_TYPE_DOUBLE )
1446     { return 1; }
1447 
1448     // set it on a subset of entities, based on type and length
1449     Range* ents_to_get = NULL;
1450 
1451     if ( * ent_type == 0 ) // vertices
1452     { ents_to_get = &data.all_verts; }
1453     else if ( * ent_type == 1 )
1454     { ents_to_get = &data.primary_elems; }
1455 
1456     int nents_to_get = *num_tag_storage_length / tagLength;
1457 
1458     if ( nents_to_get > ( int ) ents_to_get->size() || nents_to_get < 1 )
1459     { return 1; } // to many entities to get
1460 
1461     // restrict the range; everything is contiguous; or not?
1462 
1463     Range contig_range ( * ( ents_to_get->begin() ), * ( ents_to_get->begin() + nents_to_get - 1 ) );
1464     rval = context.MBI->tag_get_data ( tag, contig_range, tag_storage_data );CHKERRVAL(rval);
1465 
1466     return 0; // no error
1467 }
1468 
iMOAB_SynchronizeTags(iMOAB_AppID pid,int * num_tag,int * tag_indices,int * ent_type)1469 ErrCode iMOAB_SynchronizeTags ( iMOAB_AppID pid, int* num_tag, int* tag_indices, int* ent_type )
1470 {
1471 #ifdef MOAB_HAVE_MPI
1472     appData& data = context.appDatas[*pid];
1473     Range ent_exchange;
1474     std::vector<Tag> tags;
1475 
1476     for ( int i = 0; i < * num_tag; i++ )
1477     {
1478         if ( tag_indices[i] < 0 || tag_indices[i] >= ( int ) data.tagList.size() )
1479         { return 1 ; } // error in tag index
1480 
1481         tags.push_back ( data.tagList[tag_indices[i]] );
1482     }
1483 
1484     if ( * ent_type == 0 )
1485     { ent_exchange = data.all_verts; }
1486     else if ( *ent_type == 1 )
1487     { ent_exchange = data.primary_elems; }
1488     else
1489     { return 1; } // unexpected type
1490 
1491     ParallelComm* pco = context.pcomms[*pid];
1492 
1493     ErrorCode rval = pco->exchange_tags ( tags, tags, ent_exchange );
1494 	CHKERRVAL(rval);
1495 
1496 #else
1497     /* do nothing if serial */
1498     // just silence the warning
1499     // do not call sync tags in serial!
1500     int k = *pid + *num_tag + *tag_indices + *ent_type; k++;
1501 #endif
1502 
1503     return 0;
1504 }
1505 
iMOAB_ReduceTagsMax(iMOAB_AppID pid,int * tag_index,int * ent_type)1506 ErrCode iMOAB_ReduceTagsMax ( iMOAB_AppID pid, int* tag_index, int* ent_type )
1507 {
1508 
1509 #ifdef MOAB_HAVE_MPI
1510     appData& data = context.appDatas[*pid];
1511     Range ent_exchange;
1512 
1513 
1514 	if ( *tag_index < 0 || *tag_index >= ( int ) data.tagList.size() )
1515         { return 1 ; } // error in tag index
1516 
1517 	Tag tagh = data.tagList[*tag_index];
1518 
1519 
1520     if ( * ent_type == 0 )
1521     { ent_exchange = data.all_verts; }
1522     else if ( *ent_type == 1 )
1523     { ent_exchange = data.primary_elems; }
1524     else
1525     { return 1; } // unexpected type
1526 
1527     ParallelComm* pco = context.pcomms[*pid];
1528     // we could do different MPI_Op; do not bother now, we will call from fortran
1529     ErrorCode rval = pco->reduce_tags ( tagh, MPI_MAX, ent_exchange );
1530 	CHKERRVAL(rval);
1531 
1532 #else
1533     /* do nothing if serial */
1534     // just silence the warning
1535     // do not call sync tags in serial!
1536     int k = *pid + *tag_index + *ent_type; k++; // just do junk, to avoid complaints
1537 #endif
1538     return 0;
1539 }
1540 
1541 
iMOAB_GetNeighborElements(iMOAB_AppID pid,iMOAB_LocalID * local_index,int * num_adjacent_elements,iMOAB_LocalID * adjacent_element_IDs)1542 ErrCode iMOAB_GetNeighborElements ( iMOAB_AppID pid, iMOAB_LocalID* local_index, int* num_adjacent_elements, iMOAB_LocalID* adjacent_element_IDs )
1543 {
1544     ErrorCode rval;
1545 
1546     // one neighbor for each subentity of dimension-1
1547     MeshTopoUtil mtu ( context.MBI );
1548     appData& data = context.appDatas[*pid];
1549     EntityHandle eh = data.primary_elems[*local_index];
1550     Range adjs;
1551     rval = mtu.get_bridge_adjacencies ( eh, data.dimension - 1, data.dimension, adjs );CHKERRVAL(rval);
1552 
1553     if ( * num_adjacent_elements < ( int ) adjs.size() )
1554     { return 1; } // not dimensioned correctly
1555 
1556     *num_adjacent_elements = ( int ) adjs.size();
1557 
1558     for ( int i = 0; i < * num_adjacent_elements; i++ )
1559     {
1560         adjacent_element_IDs[i] = data.primary_elems.index ( adjs[i] );
1561     }
1562 
1563     return 0;
1564 }
1565 
1566 #if 0
1567 
1568 ErrCode iMOAB_GetNeighborVertices ( iMOAB_AppID pid, iMOAB_LocalID* local_vertex_ID, int* num_adjacent_vertices, iMOAB_LocalID* adjacent_vertex_IDs )
1569 {
1570     return 0;
1571 }
1572 
1573 #endif
1574 
1575 
iMOAB_CreateVertices(iMOAB_AppID pid,int * coords_len,int * dim,double * coordinates)1576 ErrCode iMOAB_CreateVertices ( iMOAB_AppID pid, int* coords_len, int* dim, double* coordinates )
1577 {
1578     ErrorCode rval;
1579     appData& data = context.appDatas[*pid];
1580 
1581     if ( !data.local_verts.empty() ) // we should have no vertices in the app
1582     { return 1; }
1583 
1584     int nverts = *coords_len / *dim;
1585 
1586     rval = context.MBI->create_vertices ( coordinates, nverts, data.local_verts );CHKERRVAL(rval);
1587 
1588     rval = context.MBI->add_entities ( data.file_set, data.local_verts );CHKERRVAL(rval);
1589 
1590     // also add the vertices to the all_verts range
1591     data.all_verts.merge ( data.local_verts );
1592     return 0;
1593 }
1594 
1595 
iMOAB_CreateElements(iMOAB_AppID pid,int * num_elem,int * type,int * num_nodes_per_element,int * connectivity,int * block_ID)1596 ErrCode iMOAB_CreateElements ( iMOAB_AppID pid, int* num_elem, int* type,  int* num_nodes_per_element,  int* connectivity,
1597                                int* block_ID )
1598 {
1599     // Create elements
1600     appData& data = context.appDatas[*pid];
1601 
1602     ReadUtilIface* read_iface;
1603     ErrorCode rval = context.MBI->query_interface ( read_iface );CHKERRVAL(rval);
1604 
1605     EntityType mbtype = ( EntityType ) ( *type );
1606     EntityHandle actual_start_handle;
1607     EntityHandle* array = NULL;
1608     rval = read_iface->get_element_connect ( *num_elem,
1609             *num_nodes_per_element,
1610             mbtype,
1611             1,
1612             actual_start_handle,
1613             array );CHKERRVAL(rval);
1614 
1615     // fill up with actual connectivity from input; assume the vertices are in order, and start vertex is
1616     // the first in the current data vertex range
1617     EntityHandle firstVertex = data.local_verts[0];
1618 
1619     for ( int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
1620     { array[j] = connectivity[j] + firstVertex - 1; } // assumes connectivity uses 1 based array (from fortran, mostly)
1621 
1622     Range new_elems ( actual_start_handle, actual_start_handle + *num_elem - 1 );
1623 
1624     rval = context.MBI->add_entities ( data.file_set, new_elems );CHKERRVAL(rval);
1625 
1626     data.primary_elems.merge ( new_elems );
1627 
1628     // add to adjacency
1629     rval  = read_iface->update_adjacencies(actual_start_handle,
1630         *num_elem,
1631         *num_nodes_per_element,
1632         array); CHKERRVAL(rval);
1633     // organize all new elements in block, with the given block ID; if the block set is not existing, create  a
1634     // new mesh set;
1635     Range sets;
1636     int set_no = *block_ID;
1637     const void* setno_ptr = &set_no;
1638     rval = context.MBI->get_entities_by_type_and_tag ( data.file_set, MBENTITYSET,
1639             &context.material_tag, &setno_ptr, 1, sets );
1640     EntityHandle block_set;
1641 
1642     if ( MB_FAILURE == rval || sets.empty() )
1643     {
1644         // create a new set, with this block ID
1645         rval = context.MBI->create_meshset ( MESHSET_SET, block_set );CHKERRVAL(rval);
1646 
1647         rval = context.MBI->tag_set_data ( context.material_tag, &block_set, 1, &set_no );CHKERRVAL(rval);
1648 
1649         // add the material set to file set
1650         rval = context.MBI->add_entities ( data.file_set, &block_set, 1 );CHKERRVAL(rval);
1651     }
1652     else
1653     { block_set = sets[0]; } // first set is the one we want
1654 
1655     /// add the new ents to the clock set
1656     rval = context.MBI->add_entities ( block_set, new_elems );CHKERRVAL(rval);
1657 
1658     return 0;
1659 }
1660 
1661 
1662 // this makes sense only for parallel runs
iMOAB_ResolveSharedEntities(iMOAB_AppID pid,int * num_verts,int * marker)1663 ErrCode iMOAB_ResolveSharedEntities (  iMOAB_AppID pid, int* num_verts, int* marker )
1664 {
1665 #ifdef MOAB_HAVE_MPI
1666     appData& data = context.appDatas[*pid];
1667     ParallelComm* pco = context.pcomms[*pid];
1668 
1669     // create an integer tag for resolving ; maybe it can be a long tag in the future
1670     // (more than 2 B vertices;)
1671     int dum_id = 0;
1672     Tag stag;
1673     ErrorCode rval = context.MBI->tag_get_handle ( "__sharedmarker", 1,  MB_TYPE_INTEGER, stag,
1674                      MB_TAG_CREAT | MB_TAG_DENSE, &dum_id );CHKERRVAL(rval);
1675 
1676     if ( *num_verts > ( int ) data.local_verts.size() )
1677     { return 1; } // we are not setting the size
1678 
1679     rval = context.MBI->tag_set_data ( stag, data.local_verts, ( void* ) marker ); // assumes integer tag
1680     EntityHandle cset = data.file_set;
1681     rval = pco->resolve_shared_ents ( cset, -1, -1, &stag );CHKERRVAL(rval);
1682 
1683     rval = context.MBI->tag_delete(stag); CHKERRVAL(rval);
1684     // provide partition tag equal to rank
1685     Tag part_tag;
1686     dum_id = -1;
1687     rval = context.MBI->tag_get_handle ( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER,
1688                                          part_tag, MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );CHKERRVAL(rval);
1689 
1690     int rank = pco->rank();
1691     rval = context.MBI->tag_set_data ( part_tag, &cset, 1, &rank );CHKERRVAL(rval);
1692 
1693 #endif
1694     return 0;
1695 }
1696 
1697 
1698 // this assumes that this was not called before
iMOAB_DetermineGhostEntities(iMOAB_AppID pid,int * ghost_dim,int * num_ghost_layers,int * bridge_dim)1699 ErrCode iMOAB_DetermineGhostEntities (  iMOAB_AppID pid, int* ghost_dim, int* num_ghost_layers, int* bridge_dim )
1700 {
1701     if ( *num_ghost_layers <= 0 )
1702     { return 0; } // nothing to do
1703 
1704 #ifdef MOAB_HAVE_MPI
1705     appData& data = context.appDatas[*pid];
1706     ParallelComm* pco = context.pcomms[*pid];
1707 
1708     int addl_ents = 0; //maybe we should be passing this too; most of the time we do not need additional ents
1709     ErrorCode rval = pco->exchange_ghost_cells ( *ghost_dim, *bridge_dim,
1710                      *num_ghost_layers, addl_ents, true, true, &data.file_set ); // collective call
1711 
1712     if ( rval != MB_SUCCESS )
1713     { return 1; }
1714 
1715     // now re-establish all mesh info; will reconstruct mesh info, based solely on what is in the file set
1716     int rc = iMOAB_UpdateMeshInfo ( pid );
1717     return rc;
1718 #endif
1719     return 0;
1720 }
1721 
1722 
iMOAB_SetGlobalInfo(iMOAB_AppID pid,int * num_global_verts,int * num_global_elems)1723 ErrCode iMOAB_SetGlobalInfo ( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
1724 {
1725     appData& data = context.appDatas[*pid];
1726     data.num_global_vertices = *num_global_verts;
1727     data.num_global_elements = *num_global_elems;
1728     return 0;
1729 }
1730 
1731 
iMOAB_GetGlobalInfo(iMOAB_AppID pid,int * num_global_verts,int * num_global_elems)1732 ErrCode iMOAB_GetGlobalInfo ( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
1733 {
1734     appData& data = context.appDatas[*pid];
1735 
1736     if ( NULL != num_global_verts ) { *num_global_verts = data.num_global_vertices; }
1737 
1738     if ( NULL != num_global_elems ) { *num_global_elems = data.num_global_elements; }
1739 
1740     return 0;
1741 }
1742 
1743 
1744 #ifdef MOAB_HAVE_MPI
1745 
iMOAB_SendMesh(iMOAB_AppID pid,MPI_Comm * global,MPI_Group * receivingGroup,int * rcompid,int * method)1746 ErrCode iMOAB_SendMesh ( iMOAB_AppID pid, MPI_Comm* global, MPI_Group* receivingGroup, int* rcompid, int * method )
1747 {
1748     int ierr=0;
1749     ErrorCode rval=MB_SUCCESS;
1750     //appData & data = context.appDatas[*pid];
1751     ParallelComm* pco = context.pcomms[*pid];
1752 
1753     MPI_Comm sender = pco->comm(); // the sender comm is obtained from parallel comm in moab;
1754     // no need to pass it along
1755     // first see what are the processors in each group; get the sender group too, from the sender communicator
1756     MPI_Group senderGroup;
1757     ierr = MPI_Comm_group ( sender, &senderGroup );
1758     if ( ierr != 0 ) return 1;
1759 
1760     // instantiate the par comm graph
1761     // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1, int coid2)
1762     ParCommGraph* cgraph = new ParCommGraph ( *global, senderGroup, *receivingGroup, context.appDatas[*pid].external_id, *rcompid );
1763     // we should search if we have another pcomm with the same comp ids in the list already
1764     // sort of check existing comm graphs in the list context.appDatas[*pid].pgraph
1765     context.appDatas[*pid].pgraph.push_back ( cgraph );
1766 
1767     int sender_rank = -1;
1768     MPI_Comm_rank ( sender, &sender_rank );
1769 
1770     // decide how to distribute elements to each processor
1771     // now, get the entities on local processor, and pack them into a buffer for various processors
1772     // we will do trivial partition: first get the total number of elements from "sender"
1773     std::vector<int> number_elems_per_part;
1774     // how to distribute local elements to receiving tasks?
1775     // trivial partition: compute first the total number of elements need to be sent
1776     Range& owned = context.appDatas[*pid].owned_elems;
1777 
1778     if (*method == 0) // trivial partitioning, old method
1779     {
1780       int local_owned_elem = ( int ) owned.size();
1781       int size = pco->size();
1782       int rank = pco->rank();
1783       number_elems_per_part.resize ( size ); //
1784       number_elems_per_part[rank] = local_owned_elem;
1785   #if (MPI_VERSION >= 2)
1786       // Use "in place" option
1787       ierr = MPI_Allgather ( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL,
1788                              &number_elems_per_part[0], 1, MPI_INTEGER,
1789                              sender );
1790   #else
1791       {
1792           std::vector<int> all_tmp ( size );
1793           ierr = MPI_Allgather ( &number_elems_per_part[rank], 1, MPI_INTEGER,
1794                                  &all_tmp[0], 1, MPI_INTEGER,
1795                                  sender );
1796           number_elems_per_part = all_tmp;
1797       }
1798   #endif
1799 
1800       if ( ierr != 0 )
1801       { return 1; }
1802 
1803       // every sender computes the trivial partition, it is cheap, and we need to send it anyway to each sender
1804       rval = cgraph->compute_trivial_partition ( number_elems_per_part );
1805 
1806       if ( MB_SUCCESS != rval )
1807       { return 1; }
1808 
1809       rval = cgraph->send_graph ( *global );
1810 
1811       if ( MB_SUCCESS != rval )
1812       { return 1; }
1813     }
1814     else // *method != 0, so it is either graph or geometric, parallel
1815     {
1816       // owned are the primary elements on this app
1817         rval = cgraph->compute_partition(pco, owned, *method);
1818         if ( rval != MB_SUCCESS )
1819         { return 1; }
1820         // basically, send the graph to the receiver side, with unblocking send
1821         rval = cgraph->send_graph_partition (pco, *global );
1822         if ( MB_SUCCESS != rval )
1823         { return 1; }
1824     }
1825     // pco is needed to pack, not for communication
1826     rval = cgraph->send_mesh_parts ( *global, pco, owned );
1827 
1828     if ( rval != MB_SUCCESS ) { return 1; }
1829 
1830     // mark for deletion
1831     MPI_Group_free(&senderGroup);
1832     return 0;
1833 }
1834 
1835 
iMOAB_ReceiveMesh(iMOAB_AppID pid,MPI_Comm * global,MPI_Group * sendingGroup,int * scompid)1836 ErrCode iMOAB_ReceiveMesh ( iMOAB_AppID pid, MPI_Comm* global, MPI_Group* sendingGroup,
1837                             int* scompid )
1838 {
1839     appData& data = context.appDatas[*pid];
1840     ParallelComm* pco = context.pcomms[*pid];
1841     MPI_Comm receive = pco->comm();
1842     EntityHandle local_set = data.file_set;
1843     ErrorCode rval;
1844 
1845     // first see what are the processors in each group; get the sender group too, from the sender communicator
1846     MPI_Group receiverGroup;
1847     int ierr = MPI_Comm_group ( receive, &receiverGroup );CHKIERRVAL(ierr);
1848 
1849     // instantiate the par comm graph
1850     ParCommGraph* cgraph = new ParCommGraph ( *global, *sendingGroup, receiverGroup, *scompid, context.appDatas[*pid].external_id );
1851     // TODO we should search if we have another pcomm with the same comp ids in the list already
1852     // sort of check existing comm graphs in the list context.appDatas[*pid].pgraph
1853     context.appDatas[*pid].pgraph.push_back ( cgraph );
1854 
1855     int receiver_rank = -1;
1856     MPI_Comm_rank ( receive, &receiver_rank );
1857 
1858     // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
1859     // knows what data to expect
1860     std::vector<int> pack_array;
1861     rval = cgraph->receive_comm_graph ( *global, pco, pack_array );CHKERRVAL(rval);
1862 
1863     // senders across for the current receiver
1864     int current_receiver = cgraph->receiver ( receiver_rank );
1865 
1866     std::vector<int> senders_local;
1867     size_t n = 0;
1868 
1869     while ( n < pack_array.size() )
1870     {
1871         if ( current_receiver == pack_array[n] )
1872         {
1873             for ( int j = 0; j < pack_array[n + 1]; j++ )
1874             { senders_local.push_back ( pack_array[n + 2 + j] ); }
1875 
1876             break;
1877         }
1878 
1879         n = n + 2 + pack_array[n + 1];
1880     }
1881 
1882 #ifdef VERBOSE
1883     std:: cout << " receiver " << current_receiver << " at rank " <<
1884                receiver_rank << " will receive from " << senders_local.size() << " tasks: ";
1885 
1886     for ( int k = 0; k < ( int ) senders_local.size(); k++ )
1887     { std::cout << " " << senders_local[k]; }
1888 
1889     std::cout << "\n";
1890 #endif
1891 
1892     if (senders_local.empty())
1893     {
1894       std::cout <<" we do not have any senders for receiver rank " << receiver_rank << "\n";
1895     }
1896     rval = cgraph->receive_mesh ( *global, pco, local_set, senders_local );CHKERRVAL(rval);
1897 
1898     // after we are done, we could merge vertices that come from different senders, but
1899     // have the same global id
1900     Tag idtag;
1901     rval = context.MBI->tag_get_handle ( "GLOBAL_ID", idtag );CHKERRVAL(rval);
1902 
1903     if ( ( int ) senders_local.size() >= 2 ) // need to remove duplicate vertices
1904         // that might come from different senders
1905     {
1906         Range local_ents;
1907         rval = context.MBI->get_entities_by_handle ( local_set, local_ents );CHKERRVAL(rval);
1908 
1909         Range local_verts = local_ents.subset_by_type ( MBVERTEX );
1910         Range local_elems = subtract ( local_ents, local_verts );
1911 
1912         // remove from local set the vertices
1913         rval = context.MBI->remove_entities ( local_set, local_verts );CHKERRVAL(rval);
1914 
1915 #ifdef VERBOSE
1916         std::cout << "current_receiver " << current_receiver << " local verts: " << local_verts.size() << "\n";
1917 #endif
1918         MergeMesh mm ( context.MBI );
1919 
1920         rval = mm.merge_using_integer_tag ( local_verts, idtag );CHKERRVAL(rval);
1921 
1922         Range new_verts; // local elems are local entities without vertices
1923         rval = context.MBI->get_connectivity ( local_elems, new_verts );CHKERRVAL(rval);
1924 
1925 #ifdef VERBOSE
1926         std::cout << "after merging: new verts: " << new_verts.size() << "\n";
1927 #endif
1928         rval = context.MBI->add_entities ( local_set, new_verts );CHKERRVAL(rval);
1929     }
1930 
1931     // still need to resolve shared entities (in this case, vertices )
1932     rval = pco->resolve_shared_ents ( local_set, -1, -1, &idtag );
1933 
1934     if ( rval != MB_SUCCESS ) { return 1; }
1935 
1936     // set the parallel partition tag
1937     Tag part_tag;
1938 	int dum_id = -1;
1939 	rval = context.MBI->tag_get_handle ( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER,
1940 										 part_tag, MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );CHKERRVAL(rval);
1941 
1942 	int rank = pco->rank();
1943 	rval = context.MBI->tag_set_data ( part_tag, &local_set, 1, &rank );CHKERRVAL(rval);
1944 
1945     // populate the mesh with current data info
1946     ierr = iMOAB_UpdateMeshInfo ( pid );CHKIERRVAL(ierr);
1947 
1948     // mark for deletion
1949     MPI_Group_free(&receiverGroup);
1950 
1951     return 0;
1952 }
1953 
FindParCommGraph(iMOAB_AppID pid,int * scompid,int * rcompid,ParCommGraph * & cgraph,int * sense)1954 ErrCode FindParCommGraph(iMOAB_AppID pid, int *scompid, int *rcompid, ParCommGraph *& cgraph, int * sense)
1955 {
1956   //appData& data = context.appDatas[*pid];
1957   cgraph = NULL;
1958   //ParallelComm* pco = context.pcomms[*pid];
1959   std::vector<ParCommGraph*> & vpg = context.appDatas[*pid].pgraph;
1960   size_t i = -1;
1961   *sense = 0;
1962   for (i=0; i<vpg.size(); i++)
1963   {
1964     ParCommGraph * pg=vpg[i];
1965     if ( (pg-> get_component_id1() == *scompid )&& (pg-> get_component_id2() == *rcompid ))
1966     {
1967       cgraph = pg;
1968       *sense = 1;
1969       break;
1970     }
1971     if ( (pg-> get_component_id2() == *scompid )&& (pg-> get_component_id1() == *rcompid ))
1972     {
1973       cgraph = pg;
1974       *sense = -1;
1975       break;
1976     }
1977   }
1978   if ( i< vpg.size() && NULL!=cgraph )
1979     return 0;
1980 
1981   return 1; // error, we did not find cgraph
1982 }
1983 
split_tag_names(std::string input_names,std::string & separator,std::vector<std::string> & list_tag_names)1984 void split_tag_names(std::string input_names, std::string & separator, std::vector<std::string> & list_tag_names)
1985 {
1986   size_t pos = 0;
1987   std::string token;
1988   while ((pos = input_names.find(separator)) != std::string::npos) {
1989       token = input_names.substr(0, pos);
1990       list_tag_names.push_back(token);
1991       //std::cout << token << std::endl;
1992       input_names.erase(0, pos + separator.length());
1993   }
1994   if (!input_names.empty())
1995   {
1996     // if leftover something, or if not ended with delimiter
1997     list_tag_names.push_back(input_names);
1998   }
1999   return ;
2000 }
2001 
iMOAB_SendElementTag(iMOAB_AppID pid,int * scompid,int * rcompid,const iMOAB_String tag_storage_name,MPI_Comm * join,int tag_storage_name_length)2002 ErrCode iMOAB_SendElementTag(iMOAB_AppID pid, int* scompid, int* rcompid, const iMOAB_String tag_storage_name,
2003     MPI_Comm* join, int tag_storage_name_length)
2004 {
2005   // first, based on the scompid and rcompid, find the parCommGraph corresponding to this exchange
2006   // instantiate the par comm graph
2007   // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1, int coid2)
2008   ParCommGraph* cgraph = NULL;
2009   int sense  = 0;
2010   int ierr = FindParCommGraph(pid, scompid, rcompid, cgraph, &sense);CHKIERRVAL(ierr);
2011   if ( NULL == cgraph ) { return 1; }
2012 
2013   ParallelComm* pco = context.pcomms[*pid];
2014   Range& owned = context.appDatas[*pid].owned_elems;
2015 
2016   std::string tag_name ( tag_storage_name );
2017 
2018   if ( tag_storage_name_length < ( int ) strlen ( tag_storage_name ) )
2019   {
2020       tag_name = tag_name.substr ( 0, tag_storage_name_length );
2021   }
2022   // basically, we assume everything is defined already on the tag,
2023   //   and we can get the tags just by its name
2024   // we assume that there are separators ";" between the tag names
2025   std::vector<std::string> tagNames;
2026   std::vector<Tag> tagHandles;
2027   std::string separator(";");
2028   split_tag_names( tag_name, separator, tagNames);
2029   ErrorCode rval ;
2030   for (size_t i=0; i < tagNames.size(); i++ )
2031   {
2032     Tag tagHandle;
2033     rval = context.MBI->tag_get_handle ( tagNames[i].c_str(), tagHandle);
2034     if ( MB_SUCCESS != rval || NULL == tagHandle) { return 1; }
2035     tagHandles.push_back(tagHandle);
2036   }
2037 
2038   // pco is needed to pack, and for moab instance, not for communication!
2039   // still use nonblocking communication, over the joint comm
2040   rval = cgraph->send_tag_values ( *join, pco, owned, tagHandles );
2041 
2042   if ( MB_SUCCESS != rval ) { return 1; }
2043   // now, send to each corr_tasks[i] tag data for corr_sizes[i] primary entities
2044 
2045   return 0;
2046 }
2047 
iMOAB_ReceiveElementTag(iMOAB_AppID pid,int * scompid,int * rcompid,const iMOAB_String tag_storage_name,MPI_Comm * join,int tag_storage_name_length)2048 ErrCode iMOAB_ReceiveElementTag(iMOAB_AppID pid, int* scompid, int* rcompid, const iMOAB_String tag_storage_name,
2049     MPI_Comm* join, int tag_storage_name_length)
2050 {
2051   appData& data = context.appDatas[*pid];
2052   // first, based on the scompid and rcompid, find the parCommGraph corresponding to this exchange
2053   // instantiate the par comm graph
2054   // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1, int coid2)
2055   ParCommGraph* cgraph = NULL;
2056   int sense  = 0;
2057   int ierr = FindParCommGraph(pid, scompid, rcompid, cgraph, &sense);CHKIERRVAL(ierr);
2058   if ( NULL == cgraph ) { return 1; }
2059 
2060   ParallelComm* pco = context.pcomms[*pid];
2061   Range& owned = context.appDatas[*pid].owned_elems;
2062 
2063   // how do I know if this receiver already participated in an intersection driven by coupler?
2064   // also, what if this was the "source" mesh in intx?
2065   // in that case, the elements might have been instantiated in the coverage set locally, the "owned"
2066   // range can be different
2067   // the elements are now in tempestRemap coverage_set
2068   /*
2069    * data_intx.remapper exists though only on the intersection application
2070    *  how do we get from here ( we know the pid that receives, and the commgraph used by migrate mesh )
2071    */
2072 
2073   std::string tag_name ( tag_storage_name );
2074 
2075   if ( tag_storage_name_length < ( int ) strlen ( tag_storage_name ) )
2076   {
2077       tag_name = tag_name.substr ( 0, tag_storage_name_length );
2078   }
2079 
2080   // we assume that there are separators ";" between the tag names
2081   std::vector<std::string> tagNames;
2082   std::vector<Tag> tagHandles;
2083   std::string separator(";");
2084   split_tag_names( tag_name, separator, tagNames);
2085   ErrorCode rval ;
2086   for (size_t i=0; i < tagNames.size(); i++ )
2087   {
2088     Tag tagHandle;
2089     rval = context.MBI->tag_get_handle ( tagNames[i].c_str(), tagHandle);
2090     if ( MB_SUCCESS != rval || NULL == tagHandle) { return 1; }
2091     tagHandles.push_back(tagHandle);
2092   }
2093 
2094   if ( data.file_set != data.covering_set) // coverage mesh is different from original mesh, it means we are on a source mesh, after intx
2095   {
2096     rval = context.MBI->get_entities_by_dimension(data.covering_set, 2, owned); if ( MB_SUCCESS != rval ) { return 1; }
2097   }
2098   // pco is needed to pack, and for moab instance, not for communication!
2099   // still use nonblocking communication
2100   rval = cgraph->receive_tag_values ( *join, pco, owned, tagHandles );
2101 
2102   if ( data.file_set != data.covering_set) // coverage mesh is different from original mesh, it means we are on a source mesh, after intx
2103   {
2104 #ifdef VERBOSE
2105     std::ostringstream outfile;
2106     int rank = pco->rank();
2107     outfile << "CovMeshWithTag_0" << rank << ".h5m";
2108     rval = context.MBI->write_file ( outfile.str().c_str(), 0, 0, &(data.covering_set), 1 );CHKERRVAL(rval); // coverage mesh
2109 #endif
2110   }
2111 
2112   if ( MB_SUCCESS != rval ) { return 1; }
2113   // now, send to each corr_tasks[i] tag data for corr_sizes[i] primary entities
2114 
2115   return 0;
2116 }
2117 
2118 
iMOAB_FreeSenderBuffers(iMOAB_AppID pid,MPI_Comm * join,int * rcompid)2119 ErrCode iMOAB_FreeSenderBuffers ( iMOAB_AppID pid, MPI_Comm* join, int* rcompid )
2120 {
2121     // need first to find the pgraph that holds the information we need
2122     // this will be called on sender side only
2123     appData& data = context.appDatas[*pid];
2124     std::vector<ParCommGraph*>& pgrs = context.appDatas[*pid].pgraph;
2125     int ext_id = data.external_id;
2126     ParCommGraph* pg = NULL;
2127 
2128     for ( size_t i = 0; i < pgrs.size(); i++ )
2129     {
2130         if (  ( pgrs[i]->get_component_id2() == *rcompid  && ext_id ==  pgrs[i]->get_component_id1() ) ||
2131             ( pgrs[i]->get_component_id2() == ext_id  && *rcompid ==  pgrs[i]->get_component_id1())) // sense -1
2132         {
2133             pg =  pgrs[i];
2134             break;
2135         }
2136     }
2137 
2138     // if not found, problem
2139     if ( pg == NULL )
2140     { return 1; } // cannot find the graph
2141 
2142     pg->release_send_buffers ( *join );
2143     return 0;
2144 }
2145 
2146 
2147 // this call must be collective on the joint communicator
2148 //  intersection tasks on coupler will need to send to the components tasks the list of
2149 // id elements that are relevant: they intersected some of the target elements (which are not needed here)
2150 //  in the intersection
iMOAB_CoverageGraph(MPI_Comm * join,iMOAB_AppID pid_src,int * scompid,iMOAB_AppID pid_migr,int * migrcomp,iMOAB_AppID pid_intx)2151 ErrCode iMOAB_CoverageGraph ( MPI_Comm * join, iMOAB_AppID pid_src,
2152                               int* scompid, iMOAB_AppID pid_migr,
2153                               int* migrcomp, iMOAB_AppID pid_intx )
2154 {
2155     // first, based on the scompid and migrcomp, find the parCommGraph corresponding to this exchange
2156     ErrorCode rval;
2157     std::vector<int> srcSenders;
2158     std::vector<int> receivers;
2159     ParCommGraph* sendGraph = NULL;
2160     int sense = 0;
2161     int ierr;
2162     if (*pid_src>=0)
2163     {
2164       ierr = FindParCommGraph(pid_src, scompid, migrcomp, sendGraph, &sense);
2165       if ( 0 != ierr || NULL == sendGraph || sense != 1) {
2166         std::cout<<" probably not on component source PEs \n";
2167       }
2168       else
2169       {
2170         // report the sender and receiver tasks in the joint comm
2171         srcSenders = sendGraph->senders();
2172         receivers = sendGraph->receivers();
2173 #ifdef VERBOSE
2174         std::cout << "senders: " << srcSenders.size() << " first sender: "<< srcSenders[0] << std::endl;
2175 #endif
2176       }
2177     }
2178     ParCommGraph * recvGraph = NULL; // will be non null on receiver tasks (intx tasks)
2179     int senseRec = 0;
2180     if (*pid_migr>=0)
2181     {
2182       ierr = FindParCommGraph(pid_migr, scompid, migrcomp, recvGraph, &senseRec);
2183       if ( 0 != ierr || NULL == recvGraph ) {
2184         std::cout << " not on receive PEs for migrated mesh \n";
2185       }
2186       else
2187       {
2188         // report the sender and receiver tasks in the joint comm, from migrated mesh pt of view
2189         srcSenders = recvGraph->senders();
2190         receivers = recvGraph->receivers();
2191 #ifdef VERBOSE
2192         std::cout << "receivers: " << receivers.size() << " first receiver: "<< receivers[0] << std::endl;
2193 #endif
2194       }
2195     }
2196 
2197     // loop over pid_intx elements, to see what original processors in joint comm have sent the coverage mesh
2198     // if we are on intx tasks, send coverage info towards original component tasks, about needed cells
2199     TupleList TLcovIDs;
2200     TLcovIDs.initialize ( 2, 0, 0, 0, 0 ); // to proc, GLOBAL ID; estimate about 100 IDs to be sent
2201     // will push_back a new tuple, if needed
2202     TLcovIDs.enableWriteAccess();
2203     // the crystal router will send ID cell to the original source, on the component task
2204     // if we are on intx tasks, loop over all intx elements and
2205 
2206     int currentRankInJointComm = -1;
2207     ierr = MPI_Comm_rank ( *join, &currentRankInJointComm );CHKIERRVAL(ierr);
2208 
2209     // if currentRankInJointComm is in receivers list, it means that we are on intx tasks too, we need to
2210     // send information towards component tasks
2211     if ( find ( receivers.begin(), receivers.end(), currentRankInJointComm ) != receivers.end() ) // we are on receivers tasks, we can request intx info
2212     {
2213         // find the pcomm for the intx pid
2214         if ( *pid_intx >= ( int ) context.appDatas.size() )
2215             return 1;
2216 
2217         appData& dataIntx = context.appDatas[*pid_intx];
2218         Tag parentTag ;
2219         rval = context.MBI->tag_get_handle ( "BlueParent", parentTag ); CHKERRVAL ( rval ); // id of the blue, source element
2220         if ( !parentTag )
2221             return 1; // fatal error, abort
2222         Tag orgSendProcTag ;
2223         rval = context.MBI->tag_get_handle ( "orig_sending_processor", orgSendProcTag ); CHKERRVAL ( rval );
2224         if ( !orgSendProcTag )
2225             return 1; // fatal error, abort
2226         // find the file set, red parents for intx cells, and put them in tuples
2227         EntityHandle intxSet = dataIntx.file_set;
2228         // get all entities from the set, and look at their RedParent
2229         Range cells;
2230         rval = context.MBI->get_entities_by_dimension ( intxSet, 2, cells ); CHKERRVAL ( rval );
2231 
2232         std::map<int, std::set<int> > idsFromProcs; // send that info back to enhance parCommGraph cache
2233         for ( Range::iterator it = cells.begin(); it != cells.end(); it++ )
2234         {
2235             EntityHandle intx_cell = *it;
2236             int gidCell, origProc; // look at receivers
2237 
2238             rval = context.MBI->tag_get_data ( parentTag, &intx_cell, 1, &gidCell ); CHKERRVAL ( rval );
2239             rval = context.MBI->tag_get_data ( orgSendProcTag, &intx_cell, 1, &origProc ); CHKERRVAL ( rval );
2240             // we have augmented the overlap set with ghost cells ; in that case, the orig_sending_processor is not set
2241             // so it will be -1;
2242             if (origProc<0)
2243               continue;
2244 
2245 
2246             std::set<int> &setInts = idsFromProcs[origProc];
2247             setInts.insert ( gidCell );
2248             //std::cout << origProc << " id:" << gidCell << " size: " << setInts.size() << std::endl;
2249         }
2250 #ifdef VERBOSE
2251         std::ofstream dbfile;
2252         std::stringstream outf;
2253         outf << "idsFromProc_0" << currentRankInJointComm << ".txt";
2254         dbfile.open (outf.str().c_str());
2255         dbfile << "Writing this to a file.\n";
2256 
2257         dbfile << " map size:" << idsFromProcs.size() << std::endl; // on the receiver side, these show how much data to receive
2258         // from the sender (how many ids, and how much tag data later; we need to size up the receiver buffer)
2259         // arrange in tuples , use map iterators to send the ids
2260         for (std::map<int, std::set<int> >::iterator mt=idsFromProcs.begin(); mt!=idsFromProcs.end(); mt++)
2261           {
2262 
2263             std::set<int> & setIds = mt->second;
2264             dbfile << "from id: " << mt->first <<  " receive " << setIds.size() << " cells \n";
2265             int counter = 0;
2266             for ( std::set<int>::iterator st= setIds.begin(); st!=setIds.end(); st++)
2267             {
2268               int valueID = *st;
2269               dbfile << " " << valueID;
2270               counter ++;
2271               if (counter%10 == 0)
2272                 dbfile<<"\n";
2273 
2274             }
2275             dbfile<<"\n";
2276           }
2277         dbfile.close();
2278 #endif
2279         if ( NULL != recvGraph )
2280             recvGraph->SetReceivingAfterCoverage ( idsFromProcs );
2281         for ( std::map<int, std::set<int> >::iterator mit = idsFromProcs.begin(); mit != idsFromProcs.end(); mit++ )
2282         {
2283             int procToSendTo = mit->first;
2284             std::set<int> & idSet = mit->second;
2285             for ( std::set<int>::iterator sit = idSet.begin(); sit != idSet.end(); sit++ )
2286             {
2287                 int n = TLcovIDs.get_n();
2288                 TLcovIDs.reserve();
2289                 TLcovIDs.vi_wr[2 * n] = procToSendTo; // send to processor
2290                 TLcovIDs.vi_wr[2 * n + 1] = *sit; // global id needs index in the local_verts range
2291             }
2292         }
2293     }
2294 
2295     ProcConfig pc ( *join ); // proc config does the crystal router
2296     pc.crystal_router()->gs_transfer ( 1, TLcovIDs, 0 ); // communication towards component tasks, with what ids are needed
2297     // for each task from receiver
2298 
2299     // a test to know if we are on the sender tasks (original component, in this case, atmosphere)
2300     if ( NULL != sendGraph )
2301     {
2302         // collect TLcovIDs tuple, will set in a local map/set, the ids that are sent to each receiver task
2303         rval = sendGraph->settle_send_graph ( TLcovIDs ); CHKERRVAL ( rval );
2304     }
2305     return 0;// success
2306 }
2307 
2308 #endif // MOAB_HAVE_MPI
2309 
2310 #ifdef MOAB_HAVE_TEMPESTREMAP
2311 
2312 #define USE_API
2313 
ComputeSphereRadius(iMOAB_AppID pid,double * radius)2314 static ErrCode ComputeSphereRadius ( iMOAB_AppID pid, double* radius)
2315 {
2316     ErrorCode rval;
2317     CartVect pos;
2318 
2319     Range& verts = context.appDatas[*pid].all_verts;
2320     moab::EntityHandle firstVertex = (verts[0]);
2321 
2322     // coordinate data
2323     rval = context.MBI->get_coords ( &(firstVertex), 1, (double*) &(pos[0]) );CHKERRVAL(rval);
2324 
2325     // compute the distance from origin
2326     // TODO: we could do this in a loop to verify if the pid represents a spherical mesh
2327     *radius = pos.length();
2328     return 0;
2329 }
2330 
iMOAB_ComputeMeshIntersectionOnSphere(iMOAB_AppID pid_src,iMOAB_AppID pid_tgt,iMOAB_AppID pid_intx)2331 ErrCode iMOAB_ComputeMeshIntersectionOnSphere ( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx)
2332 {
2333     ErrorCode rval;
2334 
2335     double radius_source=1.0;
2336     double radius_target=1.0;
2337     const double epsrel=1e-15;
2338     const double boxeps=1.e-8;
2339 
2340     // Get the source and target data and pcomm objects
2341     appData& data_src = context.appDatas[*pid_src];
2342     appData& data_tgt = context.appDatas[*pid_tgt];
2343 	appData& data_intx = context.appDatas[*pid_intx];
2344 #ifdef MOAB_HAVE_MPI
2345     ParallelComm* pco_intx = context.pcomms[*pid_intx];
2346 #endif
2347 
2348 	//  Sanity check: Check that the source and target meshes belong to the same pes.
2349     //  assert(pco_src->get_id() == pco_tgt->get_id());
2350     //  assert(pco_src->get_id() == pco_intx->get_id());
2351 
2352     // Mesh intersection has already been computed; Return early.
2353     if(data_intx.remapper != NULL) return 0;
2354 
2355 #ifdef MOAB_HAVE_MPI
2356     if (pco_intx) {
2357         rval = pco_intx->check_all_shared_handles();CHKERRVAL(rval);
2358     }
2359 #endif
2360 
2361     // Rescale the radius of both to compute the intersection
2362     ComputeSphereRadius(pid_src, &radius_source);
2363     ComputeSphereRadius(pid_tgt, &radius_target);
2364 #ifdef VERBOSE
2365     // std::cout << "Radius of spheres: source = " << radius_source << " and target = " << radius_target << "\n";
2366 #endif
2367 	// print verbosely about the problem setting
2368 	{
2369 		moab::Range rintxverts, rintxelems;
2370 		rval = context.MBI->get_entities_by_dimension ( data_src.file_set, 0, rintxverts );CHKERRVAL(rval);
2371 		rval = context.MBI->get_entities_by_dimension ( data_src.file_set, 2, rintxelems );CHKERRVAL(rval);
2372 		rval = fix_degenerate_quads ( context.MBI, data_src.file_set );CHKERRVAL(rval);
2373 		rval = positive_orientation ( context.MBI, data_src.file_set, radius_source );CHKERRVAL(rval);
2374 		ErrCode ierr = iMOAB_UpdateMeshInfo(pid_src); CHKIERRVAL(ierr);
2375 #ifdef VERBOSE
2376  		std::cout << "The red set contains " << rintxverts.size() << " vertices and " << rintxelems.size() << " elements \n";
2377 #endif
2378 
2379 		moab::Range bintxverts, bintxelems;
2380 		rval = context.MBI->get_entities_by_dimension ( data_tgt.file_set, 0, bintxverts );CHKERRVAL(rval);
2381 		rval = context.MBI->get_entities_by_dimension ( data_tgt.file_set, 2, bintxelems );CHKERRVAL(rval);
2382 		rval = fix_degenerate_quads ( context.MBI, data_tgt.file_set );CHKERRVAL(rval);
2383 		rval = positive_orientation ( context.MBI, data_tgt.file_set, radius_target );CHKERRVAL(rval);
2384 		ierr = iMOAB_UpdateMeshInfo(pid_tgt); CHKIERRVAL(ierr);
2385 #ifdef VERBOSE
2386  		std::cout << "The blue set contains " << bintxverts.size() << " vertices and " << bintxelems.size() << " elements \n";
2387 #endif
2388 	}
2389 
2390 	// set the context for the source and destination applications
2391 	data_intx.pid_src = pid_src;
2392 	data_intx.pid_dest = pid_tgt;
2393 
2394 	// Now allocate and initialize the remapper object
2395 #ifdef MOAB_HAVE_MPI
2396     data_intx.remapper = new moab::TempestRemapper ( context.MBI, pco_intx );
2397 #else
2398     data_intx.remapper = new moab::TempestRemapper ( context.MBI );
2399 #endif
2400     data_intx.remapper->meshValidate = true;
2401     data_intx.remapper->constructEdgeMap = true;
2402 
2403     // Do not create new filesets; Use the sets from our respective applications
2404     data_intx.remapper->initialize(false);
2405     data_intx.remapper->GetMeshSet ( moab::Remapper::SourceMesh ) = data_src.file_set;
2406     data_intx.remapper->GetMeshSet ( moab::Remapper::TargetMesh ) = data_tgt.file_set;
2407     data_intx.remapper->GetMeshSet ( moab::Remapper::IntersectedMesh ) = data_intx.file_set;
2408 
2409     /* Let make sure that the radius match for source and target meshes. If not, rescale now and unscale later. */
2410     bool radii_scaled = false;
2411     if (fabs(radius_source - radius_target) > 1e-10) { /* the radii are different */
2412         radii_scaled = true;
2413         rval = ScaleToRadius(context.MBI, data_src.file_set, 1.0);CHKERRVAL(rval);
2414         rval = ScaleToRadius(context.MBI, data_tgt.file_set, 1.0);CHKERRVAL(rval);
2415     }
2416 
2417     rval = data_intx.remapper->ConvertMeshToTempest ( moab::Remapper::SourceMesh );CHKERRVAL(rval);
2418     rval = data_intx.remapper->ConvertMeshToTempest ( moab::Remapper::TargetMesh );CHKERRVAL(rval);
2419 
2420 	// Compute intersections with MOAB
2421 	rval = data_intx.remapper->ComputeOverlapMesh ( epsrel, 1.0, 1.0, boxeps, false );CHKERRVAL(rval);
2422     // rval = data_intx.remapper->ConvertMeshToTempest ( moab::Remapper::IntersectedMesh );CHKERRVAL(rval);
2423 
2424 #ifdef MOAB_HAVE_MPI
2425 	data_src.covering_set = data_intx.remapper->GetCoveringSet();
2426 #endif
2427 
2428     // if (radii_scaled) { /* the radii are different, so lets rescale back */
2429     //     rval = ScaleToRadius(context.MBI, data_src.file_set, radius_source);CHKERRVAL(rval);
2430     //     rval = ScaleToRadius(context.MBI, data_tgt.file_set, radius_target);CHKERRVAL(rval);
2431     // }
2432 
2433     return 0;
2434 }
2435 
2436 
iMOAB_ComputeScalarProjectionWeights(iMOAB_AppID pid_intx,const iMOAB_String solution_weights_identifier,const iMOAB_String disc_method_source,int * disc_order_source,const iMOAB_String disc_method_target,int * disc_order_target,int * fMonotoneTypeID,int * fVolumetric,int * fNoConservation,int * fValidate,const iMOAB_String source_solution_tag_dof_name,const iMOAB_String target_solution_tag_dof_name,int solution_weights_identifier_length,int disc_method_source_length,int disc_method_target_length,int source_solution_tag_dof_name_length,int target_solution_tag_dof_name_length)2437 ErrCode iMOAB_ComputeScalarProjectionWeights ( iMOAB_AppID pid_intx,
2438                                                const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
2439                                                const iMOAB_String disc_method_source, int* disc_order_source,
2440                                                const iMOAB_String disc_method_target, int* disc_order_target,
2441                                                int* fMonotoneTypeID, int* fVolumetric, int* fNoConservation,
2442                                                int* fValidate,
2443                                                const iMOAB_String source_solution_tag_dof_name,
2444                                                const iMOAB_String target_solution_tag_dof_name,
2445                                                int solution_weights_identifier_length,
2446                                                int disc_method_source_length,
2447                                                int disc_method_target_length,
2448                                                int source_solution_tag_dof_name_length,
2449                                                int target_solution_tag_dof_name_length)
2450 {
2451 	moab::ErrorCode rval;
2452 
2453 	assert(disc_order_source && disc_order_target && *disc_order_source > 0 && *disc_order_target > 0);
2454 	assert(disc_method_source_length > 0 && disc_method_target_length > 0);
2455     assert(solution_weights_identifier_length > 0 && source_solution_tag_dof_name_length > 0 && target_solution_tag_dof_name_length > 0);
2456 
2457     // Get the source and target data and pcomm objects
2458 	appData& data_intx = context.appDatas[*pid_intx];
2459 #ifdef MOAB_HAVE_MPI
2460     ParallelComm* pco_intx = context.pcomms[*pid_intx];
2461 #endif
2462 
2463     bool is_parallel = false, is_root = true;
2464     int rank=0;
2465 #ifdef MOAB_HAVE_MPI
2466     int flagInit;
2467     MPI_Initialized( &flagInit );
2468     if (flagInit) {
2469         is_parallel = true;
2470         assert(pco_intx != NULL);
2471         rank = pco_intx->rank();
2472         is_root = (rank == 0);
2473     }
2474 #endif
2475 
2476 	// Setup computation of weights
2477     // Set the context for the OfflineMap computation
2478     data_intx.weightMaps[std::string(solution_weights_identifier)] = new moab::TempestOfflineMap ( data_intx.remapper );
2479 
2480 	// Call to generate an offline map with the tempest meshes
2481     moab::TempestOfflineMap* weightMap = data_intx.weightMaps[std::string(solution_weights_identifier)];
2482     assert(weightMap != NULL);
2483 
2484     // Now let us compute the local-global mapping and store it in the context
2485     // We need this mapping when computing matvec products and to do reductions in parallel
2486 	// Additionally, the call below will also compute weights with TempestRemap
2487 	rval = weightMap->GenerateOfflineMap ( std::string(disc_method_source), std::string(disc_method_target),        // std::string strInputType, std::string strOutputType,
2488 										   (*disc_order_source), (*disc_order_target),    // const int nPin, const int nPout,
2489                                            true, (fMonotoneTypeID ? *fMonotoneTypeID : 0),            // bool fBubble=false, int fMonotoneTypeID=0,
2490 										   (fVolumetric ? *fVolumetric > 0 : false),  // bool fVolumetric=false,
2491                                            (fNoConservation ? *fNoConservation > 0 : false), // bool fNoConservation=false,
2492                                            false, // bool fNoCheck=false,
2493                                            source_solution_tag_dof_name, target_solution_tag_dof_name,
2494 										   "", //"",   // std::string strVariables="", std::string strOutputMap="",
2495 										   "", "",   // std::string strInputData="", std::string strOutputData="",
2496 										   "", false,  // std::string strNColName="", bool fOutputDouble=false,
2497 										   "", false, 0.0,   // std::string strPreserveVariables="", bool fPreserveAll=false, double dFillValueOverride=0.0,
2498 										   false, false   // bool fInputConcave = false, bool fOutputConcave = false
2499 										 );CHKERRVAL(rval);
2500 
2501     // Mapping computation done
2502 	if (fValidate && *fValidate)
2503 	{
2504 		const double radius = 1.0 /*2.0*acos(-1.0)*/;
2505 		double local_areas[3], global_areas[3]; // Array for Initial area, and through Method 1 and Method 2
2506 		local_areas[0] = area_on_sphere_lHuiller ( context.MBI, context.appDatas[*(context.appDatas[*pid_intx].pid_dest)].file_set, radius );
2507 		local_areas[1] = area_on_sphere_lHuiller ( context.MBI, context.appDatas[*pid_intx].file_set, radius );
2508 		local_areas[2] = area_on_sphere ( context.MBI, context.appDatas[*pid_intx].file_set, radius );
2509 
2510 #ifdef MOAB_HAVE_MPI
2511 		if (is_parallel)
2512             MPI_Allreduce ( &local_areas[0], &global_areas[0], 3, MPI_DOUBLE, MPI_SUM, pco_intx->comm() );
2513         else {
2514             global_areas[0] = local_areas[0];
2515             global_areas[1] = local_areas[1];
2516             global_areas[2] = local_areas[2];
2517         }
2518 #else
2519         global_areas[0] = local_areas[0];
2520         global_areas[1] = local_areas[1];
2521         global_areas[2] = local_areas[2];
2522 #endif
2523 
2524 		if ( is_root )
2525 		{
2526 			printf ( "initial area: %12.10f\n", global_areas[0] );
2527 			printf ( "area with l'Huiller: %12.10f with Girard: %12.10f\n", global_areas[1], global_areas[2] );
2528 			printf ( "  relative difference areas = %12.10e\n", fabs ( global_areas[1] - global_areas[2] ) / global_areas[1] );
2529 			printf ( "  relative error = %12.10e\n", fabs ( global_areas[1] - global_areas[0] ) / global_areas[1] );
2530 		}
2531 	}
2532 
2533 	return 0;
2534 }
2535 
2536 
iMOAB_ApplyScalarProjectionWeights(iMOAB_AppID pid_intersection,const iMOAB_String solution_weights_identifier,const iMOAB_String source_solution_tag_name,const iMOAB_String target_solution_tag_name,int solution_weights_identifier_length,int source_solution_tag_name_length,int target_solution_tag_name_length)2537 ErrCode iMOAB_ApplyScalarProjectionWeights (   iMOAB_AppID pid_intersection,
2538                                                const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
2539                                                const iMOAB_String source_solution_tag_name,
2540                                                const iMOAB_String target_solution_tag_name,
2541                                                int solution_weights_identifier_length,
2542                                                int source_solution_tag_name_length,
2543                                                int target_solution_tag_name_length )
2544 {
2545     moab::ErrorCode rval;
2546 
2547     assert(solution_weights_identifier_length > 0 && source_solution_tag_name_length > 0 && target_solution_tag_name_length > 0);
2548 
2549     // Get the source and target data and pcomm objects
2550     appData& data_intx = context.appDatas[*pid_intersection];
2551 
2552     // Now allocate and initialize the remapper object
2553     moab::TempestRemapper* remapper = data_intx.remapper;
2554     moab::TempestOfflineMap* weightMap = data_intx.weightMaps[std::string(solution_weights_identifier)];
2555 
2556 
2557     // we assume that there are separators ";" between the tag names
2558     std::vector<std::string> srcNames;
2559     std::vector<std::string> tgtNames;
2560     std::vector<Tag> srcTagHandles;
2561     std::vector<Tag> tgtTagHandles;
2562     std::string separator(";");
2563     std::string src_name(source_solution_tag_name);
2564     std::string tgt_name(target_solution_tag_name);
2565     split_tag_names( src_name, separator, srcNames);
2566     split_tag_names( tgt_name, separator, tgtNames);
2567     if (srcNames.size() != tgtNames.size())
2568     {
2569       std::cout <<" error in parsing source and target tag names. \n";
2570       return 1;
2571     }
2572 
2573     for (size_t i=0; i < srcNames.size(); i++ )
2574     {
2575       Tag tagHandle;
2576       rval = context.MBI->tag_get_handle ( srcNames[i].c_str(), tagHandle);
2577       if ( MB_SUCCESS != rval || NULL == tagHandle) { return 1; }
2578       srcTagHandles.push_back(tagHandle);
2579       rval = context.MBI->tag_get_handle ( tgtNames[i].c_str(), tagHandle);
2580       if ( MB_SUCCESS != rval || NULL == tagHandle) { return 1; }
2581       tgtTagHandles.push_back(tagHandle);
2582     }
2583 
2584     moab::Range& covSrcEnts = remapper->GetMeshEntities(moab::Remapper::CoveringMesh);
2585     moab::Range& tgtEnts = remapper->GetMeshEntities(moab::Remapper::TargetMesh);
2586 
2587     std::vector<double> solSTagVals(covSrcEnts.size()*weightMap->GetSourceNDofsPerElement()*weightMap->GetSourceNDofsPerElement(), -1.0);
2588     std::vector<double> solTTagVals(tgtEnts.size()*weightMap->GetDestinationNDofsPerElement()*weightMap->GetDestinationNDofsPerElement(), -1.0);
2589 
2590     for (size_t i=0; i < srcTagHandles.size(); i++ )
2591     {
2592       // The tag data is np*np*n_el_src
2593       Tag ssolnTag = srcTagHandles[i];
2594       Tag tsolnTag = tgtTagHandles[i];
2595       rval = context.MBI->tag_get_data (ssolnTag , covSrcEnts, &solSTagVals[0] );CHKERRVAL(rval);
2596 
2597       // Compute the application of weights on the suorce solution data and store it in the destination solution vector data
2598       // Optionally, can also perform the transpose application of the weight matrix. Set the 3rd argument to true if this is needed
2599       rval = weightMap->ApplyWeights(solSTagVals, solTTagVals, false);CHKERRVAL(rval);
2600 
2601       // The tag data is np*np*n_el_dest
2602       rval = context.MBI->tag_set_data (tsolnTag , tgtEnts, &solTTagVals[0] );CHKERRVAL(rval);
2603 
2604 #ifdef VERBOSE
2605       ParallelComm* pco_intx = context.pcomms[*pid_intersection];
2606 
2607       {
2608           std::stringstream sstr;
2609           sstr << "covsrcTagData_" << i << "_" << pco_intx->rank() << ".txt";
2610           std::ofstream output_file ( sstr.str().c_str() );
2611           for (unsigned i=0; i < covSrcEnts.size(); ++i) {
2612               EntityHandle elem = covSrcEnts[i];
2613               std::vector<double> locsolSTagVals(16);
2614               rval = context.MBI->tag_get_data ( ssolnTag, &elem, 1, &locsolSTagVals[0] );CHKERRVAL(rval);
2615               output_file << "\n" << remapper->GetGlobalID(Remapper::CoveringMesh, i) << "-- \n\t";
2616               for (unsigned j=0; j < 16; ++j)
2617                   output_file << locsolSTagVals[j] << " ";
2618           }
2619           output_file.flush(); // required here
2620           output_file.close();
2621       }
2622       {
2623           std::stringstream sstr;
2624           sstr << "outputSrcDest_" << i << "_"<< pco_intx->rank() << ".h5m";
2625           EntityHandle sets[2] = {context.appDatas[*data_intx.pid_src].file_set, context.appDatas[*data_intx.pid_dest].file_set};
2626           rval = context.MBI->write_file ( sstr.str().c_str(), NULL, "", sets, 2 ); MB_CHK_ERR ( rval );
2627       }
2628       {
2629           std::stringstream sstr;
2630           sstr << "outputCovSrcDest_" << i << "_" << pco_intx->rank() << ".h5m";
2631           // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
2632           EntityHandle sets[2] = {data_intx.covering_set, context.appDatas[*data_intx.pid_dest].file_set};
2633           rval = context.MBI->write_file ( sstr.str().c_str(), NULL, "", sets, 2 ); MB_CHK_ERR ( rval );
2634       }
2635       {
2636           std::stringstream sstr;
2637           sstr << "colvector_" << i << "_" << pco_intx->rank() << ".txt";
2638           std::ofstream output_file ( sstr.str().c_str() );
2639           for (unsigned i = 0; i < solSTagVals.size(); ++i)
2640               output_file << i << " " << weightMap->col_dofmap[i] << " " << weightMap->col_gdofmap[i] << " " << solSTagVals[i] << "\n";
2641           output_file.flush(); // required here
2642           output_file.close();
2643       }
2644 #endif
2645     }
2646     return 0;
2647 }
2648 
2649 
2650 #endif // MOAB_HAVE_TEMPESTREMAP
2651 
2652 #ifdef __cplusplus
2653 }
2654 #endif
2655