1 #include "NCHelperGCRM.hpp"
2 #include "moab/ReadUtilIface.hpp"
3 #include "moab/FileOptions.hpp"
4 #include "moab/SpectralMeshTool.hpp"
5 #include "MBTagConventions.hpp"
6 
7 #ifdef MOAB_HAVE_ZOLTAN
8 #include "moab/ZoltanPartitioner.hpp"
9 #endif
10 
11 #include <cmath>
12 
13 namespace moab {
14 
15 // GCRM cells are either pentagons or hexagons, and pentagons are always padded to hexagons
16 const int EDGES_PER_CELL = 6;
17 
NCHelperGCRM(ReadNC * readNC,int fileId,const FileOptions & opts,EntityHandle fileSet)18 NCHelperGCRM::NCHelperGCRM(ReadNC* readNC, int fileId, const FileOptions& opts, EntityHandle fileSet)
19 : UcdNCHelper(readNC, fileId, opts, fileSet)
20 , createGatherSet(false)
21 {
22   // Ignore variables containing topological information
23   ignoredVarNames.insert("grid");
24   ignoredVarNames.insert("cell_corners");
25   ignoredVarNames.insert("cell_edges");
26   ignoredVarNames.insert("edge_corners");
27   ignoredVarNames.insert("cell_neighbors");
28 }
29 
can_read_file(ReadNC * readNC)30 bool NCHelperGCRM::can_read_file(ReadNC* readNC)
31 {
32   std::vector<std::string>& dimNames = readNC->dimNames;
33 
34   // If dimension name "cells" exists then it should be the GCRM grid
35   if (std::find(dimNames.begin(), dimNames.end(), std::string("cells")) != dimNames.end())
36     return true;
37 
38   return false;
39 }
40 
init_mesh_vals()41 ErrorCode NCHelperGCRM::init_mesh_vals()
42 {
43   std::vector<std::string>& dimNames = _readNC->dimNames;
44   std::vector<int>& dimLens = _readNC->dimLens;
45   std::map<std::string, ReadNC::VarData>& varInfo = _readNC->varInfo;
46 
47   ErrorCode rval;
48   unsigned int idx;
49   std::vector<std::string>::iterator vit;
50 
51   // Look for time dimension
52   if ((vit = std::find(dimNames.begin(), dimNames.end(), "Time")) != dimNames.end())
53     idx = vit - dimNames.begin();
54   else if ((vit = std::find(dimNames.begin(), dimNames.end(), "time")) != dimNames.end())
55     idx = vit - dimNames.begin();
56   else {
57     MB_SET_ERR(MB_FAILURE, "Couldn't find 'Time' or 'time' dimension");
58   }
59   tDim = idx;
60   nTimeSteps = dimLens[idx];
61 
62   // Get number of cells
63   if ((vit = std::find(dimNames.begin(), dimNames.end(), "cells")) != dimNames.end())
64     idx = vit - dimNames.begin();
65   else {
66     MB_SET_ERR(MB_FAILURE, "Couldn't find 'cells' dimension");
67   }
68   cDim = idx;
69   nCells = dimLens[idx];
70 
71   // Get number of edges
72   if ((vit = std::find(dimNames.begin(), dimNames.end(), "edges")) != dimNames.end())
73     idx = vit - dimNames.begin();
74   else {
75     MB_SET_ERR(MB_FAILURE, "Couldn't find 'edges' dimension");
76   }
77   eDim = idx;
78   nEdges = dimLens[idx];
79 
80   // Get number of vertices
81   vDim = -1;
82   if ((vit = std::find(dimNames.begin(), dimNames.end(), "corners")) != dimNames.end())
83     idx = vit - dimNames.begin();
84   else {
85     MB_SET_ERR(MB_FAILURE, "Couldn't find 'corners' dimension");
86   }
87   vDim = idx;
88   nVertices = dimLens[idx];
89 
90   // Get number of layers
91   if ((vit = std::find(dimNames.begin(), dimNames.end(), "layers")) != dimNames.end())
92     idx = vit - dimNames.begin();
93   else {
94     MB_SET_ERR(MB_FAILURE, "Couldn't find 'layers' dimension");
95   }
96   levDim = idx;
97   nLevels = dimLens[idx];
98 
99   // Dimension indices for other optional levels
100   std::vector<unsigned int> opt_lev_dims;
101 
102   // Get index of interface levels
103   if ((vit = std::find(dimNames.begin(), dimNames.end(), "interfaces")) != dimNames.end()) {
104     idx = vit - dimNames.begin();
105     opt_lev_dims.push_back(idx);
106   }
107 
108   std::map<std::string, ReadNC::VarData>::iterator vmit;
109 
110   // Store time coordinate values in tVals
111   if (nTimeSteps > 0) {
112     if ((vmit = varInfo.find("time")) != varInfo.end()) {
113       rval = read_coordinate("time", 0, nTimeSteps - 1, tVals);MB_CHK_SET_ERR(rval, "Trouble reading 'time' variable");
114     }
115     else if ((vmit = varInfo.find("t")) != varInfo.end()) {
116       rval = read_coordinate("t", 0, nTimeSteps - 1, tVals);MB_CHK_SET_ERR(rval, "Trouble reading 't' variable");
117     }
118     else {
119       // If expected time variable is not available, set dummy time coordinate values to tVals
120       for (int t = 0; t < nTimeSteps; t++)
121         tVals.push_back((double)t);
122     }
123   }
124 
125   // For each variable, determine the entity location type and number of levels
126   for (vmit = varInfo.begin(); vmit != varInfo.end(); ++vmit) {
127     ReadNC::VarData& vd = (*vmit).second;
128 
129     // Default entLoc is ENTLOCSET
130     if (std::find(vd.varDims.begin(), vd.varDims.end(), tDim) != vd.varDims.end()) {
131       if (std::find(vd.varDims.begin(), vd.varDims.end(), vDim) != vd.varDims.end())
132         vd.entLoc = ReadNC::ENTLOCVERT;
133       else if (std::find(vd.varDims.begin(), vd.varDims.end(), eDim) != vd.varDims.end())
134         vd.entLoc = ReadNC::ENTLOCEDGE;
135       else if (std::find(vd.varDims.begin(), vd.varDims.end(), cDim) != vd.varDims.end())
136         vd.entLoc = ReadNC::ENTLOCFACE;
137     }
138 
139     // Default numLev is 0
140     if (std::find(vd.varDims.begin(), vd.varDims.end(), levDim) != vd.varDims.end())
141       vd.numLev = nLevels;
142     else {
143       // If layers dimension is not found, try other optional levels such as interfaces
144       for (unsigned int i = 0; i < opt_lev_dims.size(); i++) {
145         if (std::find(vd.varDims.begin(), vd.varDims.end(), opt_lev_dims[i]) != vd.varDims.end()) {
146           vd.numLev = dimLens[opt_lev_dims[i]];
147           break;
148         }
149       }
150     }
151   }
152 
153   // Hack: create dummy variables for dimensions (like cells) with no corresponding coordinate variables
154   rval = create_dummy_variables();MB_CHK_SET_ERR(rval, "Failed to create dummy variables");
155 
156   return MB_SUCCESS;
157 }
158 
159 // When noMesh option is used on this read, the old ReadNC class instance for last read can get out
160 // of scope (and deleted). The old instance initialized some variables properly when the mesh was
161 // created, but they are now lost. The new instance (will not create the mesh with noMesh option)
162 // has to restore them based on the existing mesh from last read
check_existing_mesh()163 ErrorCode NCHelperGCRM::check_existing_mesh()
164 {
165   Interface*& mbImpl = _readNC->mbImpl;
166   Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
167   bool& noMesh = _readNC->noMesh;
168 
169   if (noMesh) {
170     ErrorCode rval;
171 
172     if (localGidVerts.empty()) {
173       // Get all vertices from current file set (it is the input set in no_mesh scenario)
174       Range local_verts;
175       rval = mbImpl->get_entities_by_dimension(_fileSet, 0, local_verts);MB_CHK_SET_ERR(rval, "Trouble getting local vertices in current file set");
176 
177       if (!local_verts.empty()) {
178         std::vector<int> gids(local_verts.size());
179 
180         // !IMPORTANT : this has to be the GLOBAL_ID tag
181         rval = mbImpl->tag_get_data(mGlobalIdTag, local_verts, &gids[0]);MB_CHK_SET_ERR(rval, "Trouble getting local gid values of vertices");
182 
183         // Restore localGidVerts
184         std::copy(gids.rbegin(), gids.rend(), range_inserter(localGidVerts));
185         nLocalVertices = localGidVerts.size();
186       }
187     }
188 
189     if (localGidEdges.empty()) {
190       // Get all edges from current file set (it is the input set in no_mesh scenario)
191       Range local_edges;
192       rval = mbImpl->get_entities_by_dimension(_fileSet, 1, local_edges);MB_CHK_SET_ERR(rval, "Trouble getting local edges in current file set");
193 
194       if (!local_edges.empty()) {
195         std::vector<int> gids(local_edges.size());
196 
197         // !IMPORTANT : this has to be the GLOBAL_ID tag
198         rval = mbImpl->tag_get_data(mGlobalIdTag, local_edges, &gids[0]);MB_CHK_SET_ERR(rval, "Trouble getting local gid values of edges");
199 
200         // Restore localGidEdges
201         std::copy(gids.rbegin(), gids.rend(), range_inserter(localGidEdges));
202         nLocalEdges = localGidEdges.size();
203       }
204     }
205 
206     if (localGidCells.empty()) {
207       // Get all cells from current file set (it is the input set in no_mesh scenario)
208       Range local_cells;
209       rval = mbImpl->get_entities_by_dimension(_fileSet, 2, local_cells);MB_CHK_SET_ERR(rval, "Trouble getting local cells in current file set");
210 
211       if (!local_cells.empty()) {
212         std::vector<int> gids(local_cells.size());
213 
214         // !IMPORTANT : this has to be the GLOBAL_ID tag
215         rval = mbImpl->tag_get_data(mGlobalIdTag, local_cells, &gids[0]);MB_CHK_SET_ERR(rval, "Trouble getting local gid values of cells");
216 
217         // Restore localGidCells
218         std::copy(gids.rbegin(), gids.rend(), range_inserter(localGidCells));
219         nLocalCells = localGidCells.size();
220       }
221     }
222   }
223 
224   return MB_SUCCESS;
225 }
226 
create_mesh(Range & faces)227 ErrorCode NCHelperGCRM::create_mesh(Range& faces)
228 {
229   bool& noEdges = _readNC->noEdges;
230   DebugOutput& dbgOut = _readNC->dbgOut;
231 
232 #ifdef MOAB_HAVE_MPI
233   int rank = 0;
234   int procs = 1;
235   bool& isParallel = _readNC->isParallel;
236   if (isParallel) {
237     ParallelComm*& myPcomm = _readNC->myPcomm;
238     rank = myPcomm->proc_config().proc_rank();
239     procs = myPcomm->proc_config().proc_size();
240   }
241 
242   // Need to know whether we'll be creating gather mesh
243   if (rank == _readNC->gatherSetRank)
244     createGatherSet = true;
245 
246   if (procs >= 2) {
247     // Shift rank to obtain a rotated trivial partition
248     int shifted_rank = rank;
249     int& trivialPartitionShift = _readNC->trivialPartitionShift;
250     if (trivialPartitionShift > 0)
251       shifted_rank = (rank + trivialPartitionShift) % procs;
252 
253     // Compute the number of local cells on this proc
254     nLocalCells = int(std::floor(1.0 * nCells / procs));
255 
256     // The starting global cell index in the GCRM file for this proc
257     int start_cell_idx = shifted_rank * nLocalCells;
258 
259     // Number of extra cells after equal split over procs
260     int iextra = nCells % procs;
261 
262     // Allocate extra cells over procs
263     if (shifted_rank < iextra)
264       nLocalCells++;
265     start_cell_idx += std::min(shifted_rank, iextra);
266 
267     start_cell_idx++; // 0 based -> 1 based
268 
269     // Redistribute local cells after trivial partition (e.g. apply Zoltan partition)
270     ErrorCode rval = redistribute_local_cells(start_cell_idx);MB_CHK_SET_ERR(rval, "Failed to redistribute local cells after trivial partition");
271   }
272   else {
273     nLocalCells = nCells;
274     localGidCells.insert(1, nLocalCells);
275   }
276 #else
277   nLocalCells = nCells;
278   localGidCells.insert(1, nLocalCells);
279 #endif
280   dbgOut.tprintf(1, " localGidCells.psize() = %d\n", (int)localGidCells.psize());
281   dbgOut.tprintf(1, " localGidCells.size() = %d\n", (int)localGidCells.size());
282 
283   // Read vertices on each local cell, to get localGidVerts and cell connectivity later
284   int verticesOnCellVarId;
285   int success = NCFUNC(inq_varid)(_fileId, "cell_corners", &verticesOnCellVarId);
286   if (success)
287     MB_SET_ERR(MB_FAILURE, "Failed to get variable id of cell_corners");
288   std::vector<int> vertices_on_local_cells(nLocalCells * EDGES_PER_CELL);
289   dbgOut.tprintf(1, " nLocalCells = %d\n", (int)nLocalCells);
290   dbgOut.tprintf(1, " vertices_on_local_cells.size() = %d\n", (int)vertices_on_local_cells.size());
291 #ifdef MOAB_HAVE_PNETCDF
292   size_t nb_reads = localGidCells.psize();
293   std::vector<int> requests(nb_reads);
294   std::vector<int> statuss(nb_reads);
295   size_t idxReq = 0;
296 #endif
297   size_t indexInArray = 0;
298   for (Range::pair_iterator pair_iter = localGidCells.pair_begin();
299        pair_iter != localGidCells.pair_end();
300        ++pair_iter) {
301     EntityHandle starth = pair_iter->first;
302     EntityHandle endh = pair_iter->second;
303     dbgOut.tprintf(1, " cell_corners starth = %d\n", (int)starth);
304     dbgOut.tprintf(1, " cell_corners   endh = %d\n", (int)endh);
305     NCDF_SIZE read_starts[2] = {static_cast<NCDF_SIZE>(starth - 1), 0};
306     NCDF_SIZE read_counts[2] = {static_cast<NCDF_SIZE>(endh - starth + 1),
307                                 static_cast<NCDF_SIZE>(EDGES_PER_CELL)};
308 
309     // Do a partial read in each subrange
310 #ifdef MOAB_HAVE_PNETCDF
311     success = NCFUNCREQG(_vara_int)(_fileId, verticesOnCellVarId, read_starts, read_counts,
312                                       &(vertices_on_local_cells[indexInArray]), &requests[idxReq++]);
313 #else
314     success = NCFUNCAG(_vara_int)(_fileId, verticesOnCellVarId, read_starts, read_counts,
315                                       &(vertices_on_local_cells[indexInArray]));
316 #endif
317     if (success)
318       MB_SET_ERR(MB_FAILURE, "Failed to read cell_corners data in a loop");
319 
320     // Increment the index for next subrange
321     indexInArray += (endh - starth + 1) * EDGES_PER_CELL;
322   }
323 
324 #ifdef MOAB_HAVE_PNETCDF
325   // Wait outside the loop
326   success = NCFUNC(wait_all)(_fileId, requests.size(), &requests[0], &statuss[0]);
327   if (success)
328     MB_SET_ERR(MB_FAILURE, "Failed on wait_all");
329 #endif
330 
331   // Correct vertices_on_local_cells array. Pentagons as hexagons should have
332   // a connectivity like 123455 and not 122345
333   for (int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++) {
334     int* pvertex = &vertices_on_local_cells[local_cell_idx * EDGES_PER_CELL];
335     for (int k = 0; k < EDGES_PER_CELL - 2; k++) {
336       if (*(pvertex + k) == *(pvertex + k + 1)) {
337         // Shift the connectivity
338         for (int kk = k + 1; kk < EDGES_PER_CELL - 1; kk++)
339           *(pvertex + kk) = *(pvertex + kk + 1);
340         // No need to try next k
341         break;
342       }
343     }
344   }
345 
346   // GCRM is 0 based, convert vertex indices from 0 to 1 based
347   for (std::size_t idx = 0; idx < vertices_on_local_cells.size(); idx++)
348     vertices_on_local_cells[idx] += 1;
349 
350   // Create local vertices
351   EntityHandle start_vertex;
352   ErrorCode rval = create_local_vertices(vertices_on_local_cells, start_vertex);MB_CHK_SET_ERR(rval, "Failed to create local vertices for GCRM mesh");
353 
354   // Create local edges (unless NO_EDGES read option is set)
355   if (!noEdges) {
356     rval = create_local_edges(start_vertex);MB_CHK_SET_ERR(rval, "Failed to create local edges for GCRM mesh");
357   }
358 
359   // Create local cells, padded
360   rval = create_padded_local_cells(vertices_on_local_cells, start_vertex, faces);MB_CHK_SET_ERR(rval, "Failed to create padded local cells for GCRM mesh");
361 
362   if (createGatherSet) {
363     EntityHandle gather_set;
364     rval = _readNC->readMeshIface->create_gather_set(gather_set);MB_CHK_SET_ERR(rval, "Failed to create gather set");
365 
366     // Create gather set vertices
367     EntityHandle start_gather_set_vertex;
368     rval = create_gather_set_vertices(gather_set, start_gather_set_vertex);MB_CHK_SET_ERR(rval, "Failed to create gather set vertices for GCRM mesh");
369 
370     // Create gather set edges (unless NO_EDGES read option is set)
371     if (!noEdges) {
372       rval = create_gather_set_edges(gather_set, start_gather_set_vertex);MB_CHK_SET_ERR(rval, "Failed to create gather set edges for GCRM mesh");
373     }
374 
375     // Create gather set cells with padding
376     rval = create_padded_gather_set_cells(gather_set, start_gather_set_vertex);MB_CHK_SET_ERR(rval, "Failed to create padded gather set cells for GCRM mesh");
377   }
378 
379   return MB_SUCCESS;
380 }
381 
read_ucd_variables_to_nonset_allocate(std::vector<ReadNC::VarData> & vdatas,std::vector<int> & tstep_nums)382 ErrorCode NCHelperGCRM::read_ucd_variables_to_nonset_allocate(std::vector<ReadNC::VarData>& vdatas, std::vector<int>& tstep_nums)
383 {
384   Interface*& mbImpl = _readNC->mbImpl;
385   std::vector<int>& dimLens = _readNC->dimLens;
386   bool& noEdges = _readNC->noEdges;
387   DebugOutput& dbgOut = _readNC->dbgOut;
388 
389   Range* range = NULL;
390 
391   // Get vertices
392   Range verts;
393   ErrorCode rval = mbImpl->get_entities_by_dimension(_fileSet, 0, verts);MB_CHK_SET_ERR(rval, "Trouble getting vertices in current file set");
394   assert("Should only have a single vertex subrange, since they were read in one shot" && verts.psize() == 1);
395 
396   // Get edges
397   Range edges;
398   rval = mbImpl->get_entities_by_dimension(_fileSet, 1, edges);MB_CHK_SET_ERR(rval, "Trouble getting edges in current file set");
399 
400   // Get faces
401   Range faces;
402   rval = mbImpl->get_entities_by_dimension(_fileSet, 2, faces);MB_CHK_SET_ERR(rval, "Trouble getting faces in current file set");
403   assert("Should only have a single face subrange, since they were read in one shot" && faces.psize() == 1);
404 
405 #ifdef MOAB_HAVE_MPI
406   bool& isParallel = _readNC->isParallel;
407   if (isParallel) {
408     ParallelComm*& myPcomm = _readNC->myPcomm;
409     rval = myPcomm->filter_pstatus(faces, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &facesOwned);MB_CHK_SET_ERR(rval, "Trouble getting owned faces in current file set");
410   }
411   else
412     facesOwned = faces; // not running in parallel, but still with MPI
413 #else
414   facesOwned = faces;
415 #endif
416 
417   for (unsigned int i = 0; i < vdatas.size(); i++) {
418     // Skip edge variables, if specified by the read options
419     if (noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE)
420       continue;
421 
422     // Support non-set variables with 3 dimensions like (time, cells, layers), or
423     // 2 dimensions like (time, cells)
424     assert(3 == vdatas[i].varDims.size() || 2 == vdatas[i].varDims.size());
425 
426     // For a non-set variable, time should be the first dimension
427     assert(tDim == vdatas[i].varDims[0]);
428 
429     // Set up readStarts and readCounts
430     vdatas[i].readStarts.resize(3);
431     vdatas[i].readCounts.resize(3);
432 
433     // First: Time
434     vdatas[i].readStarts[0] = 0; // This value is timestep dependent, will be set later
435     vdatas[i].readCounts[0] = 1;
436 
437     // Next: nVertices / nCells / nEdges
438     switch (vdatas[i].entLoc) {
439       case ReadNC::ENTLOCVERT:
440         // Vertices
441         // Start from the first localGidVerts
442         // Actually, this will be reset later on in a loop
443         vdatas[i].readStarts[1] = localGidVerts[0] - 1;
444         vdatas[i].readCounts[1] = nLocalVertices;
445         range = &verts;
446         break;
447       case ReadNC::ENTLOCFACE:
448         // Faces
449         // Start from the first localGidCells
450         // Actually, this will be reset later on in a loop
451         vdatas[i].readStarts[1] = localGidCells[0] - 1;
452         vdatas[i].readCounts[1] = nLocalCells;
453         range = &facesOwned;
454         break;
455       case ReadNC::ENTLOCEDGE:
456         // Edges
457         // Start from the first localGidEdges
458         // Actually, this will be reset later on in a loop
459         vdatas[i].readStarts[1] = localGidEdges[0] - 1;
460         vdatas[i].readCounts[1] = nLocalEdges;
461         range = &edges;
462         break;
463       default:
464         MB_SET_ERR(MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName);
465     }
466 
467     // Finally: layers or other optional levels, it is possible that there is no
468     // level dimension (numLev is 0) for this non-set variable
469     if (vdatas[i].numLev < 1)
470       vdatas[i].numLev = 1;
471     vdatas[i].readStarts[2] = 0;
472     vdatas[i].readCounts[2] = vdatas[i].numLev;
473 
474     // Get variable size
475     vdatas[i].sz = 1;
476     for (std::size_t idx = 0; idx != 3; idx++)
477       vdatas[i].sz *= vdatas[i].readCounts[idx];
478 
479     for (unsigned int t = 0; t < tstep_nums.size(); t++) {
480       dbgOut.tprintf(2, "Reading variable %s, time step %d\n", vdatas[i].varName.c_str(), tstep_nums[t]);
481 
482       if (tstep_nums[t] >= dimLens[tDim]) {
483         MB_SET_ERR(MB_INDEX_OUT_OF_RANGE, "Wrong value for timestep number " << tstep_nums[t]);
484       }
485 
486       // Get the tag to read into
487       if (!vdatas[i].varTags[t]) {
488         rval = get_tag_to_nonset(vdatas[i], tstep_nums[t], vdatas[i].varTags[t], vdatas[i].numLev);MB_CHK_SET_ERR(rval, "Trouble getting tag for variable " << vdatas[i].varName);
489       }
490 
491       // Get ptr to tag space
492       assert(1 == range->psize());
493       void* data;
494       int count;
495       rval = mbImpl->tag_iterate(vdatas[i].varTags[t], range->begin(), range->end(), count, data);MB_CHK_SET_ERR(rval, "Failed to iterate tag for variable " << vdatas[i].varName);
496       assert((unsigned)count == range->size());
497       vdatas[i].varDatas[t] = data;
498     }
499   }
500 
501   return rval;
502 }
503 
504 #ifdef MOAB_HAVE_PNETCDF
read_ucd_variables_to_nonset_async(std::vector<ReadNC::VarData> & vdatas,std::vector<int> & tstep_nums)505 ErrorCode NCHelperGCRM::read_ucd_variables_to_nonset_async(std::vector<ReadNC::VarData>& vdatas, std::vector<int>& tstep_nums)
506 {
507   bool& noEdges = _readNC->noEdges;
508   DebugOutput& dbgOut = _readNC->dbgOut;
509 
510   ErrorCode rval = read_ucd_variables_to_nonset_allocate(vdatas, tstep_nums);MB_CHK_SET_ERR(rval, "Trouble allocating space to read non-set variables");
511 
512   // Finally, read into that space
513   int success;
514   Range* pLocalGid = NULL;
515 
516   for (unsigned int i = 0; i < vdatas.size(); i++) {
517     // Skip edge variables, if specified by the read options
518     if (noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE)
519       continue;
520 
521     switch (vdatas[i].entLoc) {
522       case ReadNC::ENTLOCVERT:
523         pLocalGid = &localGidVerts;
524         break;
525       case ReadNC::ENTLOCFACE:
526         pLocalGid = &localGidCells;
527         break;
528       case ReadNC::ENTLOCEDGE:
529         pLocalGid = &localGidEdges;
530         break;
531       default:
532         MB_SET_ERR(MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName);
533     }
534 
535     std::size_t sz = vdatas[i].sz;
536 
537     for (unsigned int t = 0; t < tstep_nums.size(); t++) {
538       // We will synchronize all these reads with the other processors,
539       // so the wait will be inside this double loop; is it too much?
540       size_t nb_reads = pLocalGid->psize();
541       std::vector<int> requests(nb_reads), statuss(nb_reads);
542       size_t idxReq = 0;
543 
544       // Set readStart for each timestep along time dimension
545       vdatas[i].readStarts[0] = tstep_nums[t];
546 
547       switch (vdatas[i].varDataType) {
548         case NC_FLOAT:
549         case NC_DOUBLE: {
550           // Read float as double
551           std::vector<double> tmpdoubledata(sz);
552 
553           // In the case of ucd mesh, and on multiple proc,
554           // we need to read as many times as subranges we have in the
555           // localGid range;
556           // basically, we have to give a different point
557           // for data to start, for every subrange :(
558           size_t indexInDoubleArray = 0;
559           size_t ic = 0;
560           for (Range::pair_iterator pair_iter = pLocalGid->pair_begin();
561               pair_iter != pLocalGid->pair_end();
562               ++pair_iter, ic++) {
563             EntityHandle starth = pair_iter->first;
564             EntityHandle endh = pair_iter->second; // inclusive
565             vdatas[i].readStarts[1] = (NCDF_SIZE) (starth - 1);
566             vdatas[i].readCounts[1] = (NCDF_SIZE) (endh - starth + 1);
567 
568             // Do a partial read, in each subrange
569             // wait outside this loop
570             success = NCFUNCREQG(_vara_double)(_fileId, vdatas[i].varId,
571                 &(vdatas[i].readStarts[0]), &(vdatas[i].readCounts[0]),
572                             &(tmpdoubledata[indexInDoubleArray]), &requests[idxReq++]);
573             if (success)
574               MB_SET_ERR(MB_FAILURE, "Failed to read double data in a loop for variable " << vdatas[i].varName);
575             // We need to increment the index in double array for the
576             // next subrange
577             indexInDoubleArray += (endh - starth + 1) * 1 * vdatas[i].numLev;
578           }
579           assert(ic == pLocalGid->psize());
580 
581           success = NCFUNC(wait_all)(_fileId, requests.size(), &requests[0], &statuss[0]);
582           if (success)
583             MB_SET_ERR(MB_FAILURE, "Failed on wait_all");
584 
585           void* data = vdatas[i].varDatas[t];
586           for (std::size_t idx = 0; idx != tmpdoubledata.size(); idx++)
587             ((double*) data)[idx] = tmpdoubledata[idx];
588 
589           break;
590         }
591         default:
592           MB_SET_ERR(MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName);
593       }
594     }
595   }
596 
597   // Debug output, if requested
598   if (1 == dbgOut.get_verbosity()) {
599     dbgOut.printf(1, "Read variables: %s", vdatas.begin()->varName.c_str());
600     for (unsigned int i = 1; i < vdatas.size(); i++)
601       dbgOut.printf(1, ", %s ", vdatas[i].varName.c_str());
602     dbgOut.tprintf(1, "\n");
603   }
604 
605   return rval;
606 }
607 #else
read_ucd_variables_to_nonset(std::vector<ReadNC::VarData> & vdatas,std::vector<int> & tstep_nums)608 ErrorCode NCHelperGCRM::read_ucd_variables_to_nonset(std::vector<ReadNC::VarData>& vdatas, std::vector<int>& tstep_nums)
609 {
610   bool& noEdges = _readNC->noEdges;
611   DebugOutput& dbgOut = _readNC->dbgOut;
612 
613   ErrorCode rval = read_ucd_variables_to_nonset_allocate(vdatas, tstep_nums);MB_CHK_SET_ERR(rval, "Trouble allocating space to read non-set variables");
614 
615   // Finally, read into that space
616   int success;
617   Range* pLocalGid = NULL;
618 
619   for (unsigned int i = 0; i < vdatas.size(); i++) {
620     // Skip edge variables, if specified by the read options
621     if (noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE)
622       continue;
623 
624     switch (vdatas[i].entLoc) {
625       case ReadNC::ENTLOCVERT:
626         pLocalGid = &localGidVerts;
627         break;
628       case ReadNC::ENTLOCFACE:
629         pLocalGid = &localGidCells;
630         break;
631       case ReadNC::ENTLOCEDGE:
632         pLocalGid = &localGidEdges;
633         break;
634       default:
635         MB_SET_ERR(MB_FAILURE, "Unexpected entity location type for variable " << vdatas[i].varName);
636     }
637 
638     std::size_t sz = vdatas[i].sz;
639 
640     for (unsigned int t = 0; t < tstep_nums.size(); t++) {
641       // Set readStart for each timestep along time dimension
642       vdatas[i].readStarts[0] = tstep_nums[t];
643 
644       switch (vdatas[i].varDataType) {
645         case NC_FLOAT:
646         case NC_DOUBLE: {
647           // Read float as double
648           std::vector<double> tmpdoubledata(sz);
649 
650           // In the case of ucd mesh, and on multiple proc,
651           // we need to read as many times as subranges we have in the
652           // localGid range;
653           // basically, we have to give a different point
654           // for data to start, for every subrange :(
655           size_t indexInDoubleArray = 0;
656           size_t ic = 0;
657           for (Range::pair_iterator pair_iter = pLocalGid->pair_begin();
658               pair_iter != pLocalGid->pair_end();
659               ++pair_iter, ic++) {
660             EntityHandle starth = pair_iter->first;
661             EntityHandle endh = pair_iter->second; // Inclusive
662             vdatas[i].readStarts[1] = (NCDF_SIZE) (starth - 1);
663             vdatas[i].readCounts[1] = (NCDF_SIZE) (endh - starth + 1);
664 
665             success = NCFUNCAG(_vara_double)(_fileId, vdatas[i].varId,
666                 &(vdatas[i].readStarts[0]), &(vdatas[i].readCounts[0]),
667                             &(tmpdoubledata[indexInDoubleArray]));
668             if (success)
669               MB_SET_ERR(MB_FAILURE, "Failed to read double data in a loop for variable " << vdatas[i].varName);
670             // We need to increment the index in double array for the
671             // next subrange
672             indexInDoubleArray += (endh - starth + 1) * 1 * vdatas[i].numLev;
673           }
674           assert(ic == pLocalGid->psize());
675 
676           void* data = vdatas[i].varDatas[t];
677           for (std::size_t idx = 0; idx != tmpdoubledata.size(); idx++)
678             ((double*) data)[idx] = tmpdoubledata[idx];
679 
680           break;
681         }
682         default:
683           MB_SET_ERR(MB_FAILURE, "Unexpected data type for variable " << vdatas[i].varName);
684       }
685     }
686   }
687 
688   // Debug output, if requested
689   if (1 == dbgOut.get_verbosity()) {
690     dbgOut.printf(1, "Read variables: %s", vdatas.begin()->varName.c_str());
691     for (unsigned int i = 1; i < vdatas.size(); i++)
692       dbgOut.printf(1, ", %s ", vdatas[i].varName.c_str());
693     dbgOut.tprintf(1, "\n");
694   }
695 
696   return rval;
697 }
698 #endif
699 
700 #ifdef MOAB_HAVE_MPI
redistribute_local_cells(int start_cell_idx)701 ErrorCode NCHelperGCRM::redistribute_local_cells(int start_cell_idx)
702 {
703   // If possible, apply Zoltan partition
704 #ifdef MOAB_HAVE_ZOLTAN
705   if (_readNC->partMethod == ScdParData::RCBZOLTAN) {
706     // Read lat/lon coordinates of cell centers, then convert spherical to
707     // Cartesian, and use them as input to Zoltan partition
708     int xCellVarId;
709     int success = NCFUNC(inq_varid)(_fileId, "grid_center_lat", &xCellVarId);
710     if (success)
711       MB_SET_ERR(MB_FAILURE, "Failed to get variable id of grid_center_lat");
712     std::vector<double> xCell(nLocalCells);
713     NCDF_SIZE read_start = static_cast<NCDF_SIZE>(start_cell_idx - 1);
714     NCDF_SIZE read_count = static_cast<NCDF_SIZE>(nLocalCells);
715     success = NCFUNCAG(_vara_double)(_fileId, xCellVarId, &read_start, &read_count, &xCell[0]);
716     if (success)
717       MB_SET_ERR(MB_FAILURE, "Failed to read grid_center_lat data");
718 
719     // Read y coordinates of cell centers
720     int yCellVarId;
721     success = NCFUNC(inq_varid)(_fileId, "grid_center_lon", &yCellVarId);
722     if (success)
723       MB_SET_ERR(MB_FAILURE, "Failed to get variable id of grid_center_lon");
724     std::vector<double> yCell(nLocalCells);
725     success = NCFUNCAG(_vara_double)(_fileId, yCellVarId, &read_start, &read_count, &yCell[0]);
726     if (success)
727       MB_SET_ERR(MB_FAILURE, "Failed to read grid_center_lon data");
728 
729     // Convert lon/lat/rad to x/y/z
730     std::vector<double> zCell(nLocalCells);
731     double rad = 8000.0; // This is just a approximate radius
732     for (int i = 0; i < nLocalCells; i++) {
733       double cosphi = cos(xCell[i]);
734       double zmult = sin(xCell[i]);
735       double xmult = cosphi * cos(yCell[i]);
736       double ymult = cosphi * sin(yCell[i]);
737       xCell[i] = rad * xmult;
738       yCell[i] = rad * ymult;
739       zCell[i] = rad * zmult;
740     }
741 
742     // Zoltan partition using RCB; maybe more studies would be good, as to which partition
743     // is better
744     Interface*& mbImpl = _readNC->mbImpl;
745     DebugOutput& dbgOut = _readNC->dbgOut;
746     ZoltanPartitioner* mbZTool = new ZoltanPartitioner(mbImpl, false, 0, NULL);
747     ErrorCode rval = mbZTool->repartition(xCell, yCell, zCell, start_cell_idx, "RCB", localGidCells);MB_CHK_SET_ERR(rval, "Error in Zoltan partitioning");
748     delete mbZTool;
749 
750     dbgOut.tprintf(1, "After Zoltan partitioning, localGidCells.psize() = %d\n", (int)localGidCells.psize());
751     dbgOut.tprintf(1, "                           localGidCells.size() = %d\n", (int)localGidCells.size());
752 
753     // This is important: local cells are now redistributed, so nLocalCells might be different!
754     nLocalCells = localGidCells.size();
755 
756     return MB_SUCCESS;
757   }
758 #endif
759 
760   // By default, apply trivial partition
761   localGidCells.insert(start_cell_idx, start_cell_idx + nLocalCells - 1);
762 
763   return MB_SUCCESS;
764 }
765 #endif
766 
create_local_vertices(const std::vector<int> & vertices_on_local_cells,EntityHandle & start_vertex)767 ErrorCode NCHelperGCRM::create_local_vertices(const std::vector<int>& vertices_on_local_cells, EntityHandle& start_vertex)
768 {
769   Interface*& mbImpl = _readNC->mbImpl;
770   Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
771   const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
772   DebugOutput& dbgOut = _readNC->dbgOut;
773   std::map<std::string, ReadNC::VarData>& varInfo = _readNC->varInfo;
774 
775   // Make a copy of vertices_on_local_cells for sorting (keep original one to set cell connectivity later)
776   std::vector<int> vertices_on_local_cells_sorted(vertices_on_local_cells);
777   std::sort(vertices_on_local_cells_sorted.begin(), vertices_on_local_cells_sorted.end());
778   std::copy(vertices_on_local_cells_sorted.rbegin(), vertices_on_local_cells_sorted.rend(), range_inserter(localGidVerts));
779   nLocalVertices = localGidVerts.size();
780 
781   dbgOut.tprintf(1, "   localGidVerts.psize() = %d\n", (int)localGidVerts.psize());
782   dbgOut.tprintf(1, "   localGidVerts.size() = %d\n", (int)localGidVerts.size());
783 
784   // Create local vertices
785   std::vector<double*> arrays;
786   ErrorCode rval = _readNC->readMeshIface->get_node_coords(3, nLocalVertices, 0, start_vertex, arrays,
787                                                           // Might have to create gather mesh later
788                                                           (createGatherSet ? nLocalVertices + nVertices : nLocalVertices));MB_CHK_SET_ERR(rval, "Failed to create local vertices");
789 
790   // Add local vertices to current file set
791   Range local_verts_range(start_vertex, start_vertex + nLocalVertices - 1);
792   rval = _readNC->mbImpl->add_entities(_fileSet, local_verts_range);MB_CHK_SET_ERR(rval, "Failed to add local vertices to current file set");
793 
794   // Get ptr to GID memory for local vertices
795   int count = 0;
796   void* data = NULL;
797   rval = mbImpl->tag_iterate(mGlobalIdTag, local_verts_range.begin(), local_verts_range.end(), count, data);MB_CHK_SET_ERR(rval, "Failed to iterate global id tag on local vertices");
798   assert(count == nLocalVertices);
799   int* gid_data = (int*) data;
800   std::copy(localGidVerts.begin(), localGidVerts.end(), gid_data);
801 
802   // Duplicate GID data, which will be used to resolve sharing
803   if (mpFileIdTag) {
804     rval = mbImpl->tag_iterate(*mpFileIdTag, local_verts_range.begin(), local_verts_range.end(), count, data);MB_CHK_SET_ERR(rval, "Failed to iterate file id tag on local vertices");
805     assert(count == nLocalVertices);
806     int bytes_per_tag = 4;
807     rval = mbImpl->tag_get_bytes(*mpFileIdTag, bytes_per_tag);MB_CHK_SET_ERR(rval, "can't get number of bytes for file id tag");
808     if (4 == bytes_per_tag) {
809       gid_data = (int*) data;
810       std::copy(localGidVerts.begin(), localGidVerts.end(), gid_data);
811     }
812     else if (8 == bytes_per_tag) { // Should be a handle tag on 64 bit machine?
813       long* handle_tag_data = (long*)data;
814       std::copy(localGidVerts.begin(), localGidVerts.end(), handle_tag_data);
815     }
816   }
817 
818 #ifdef MOAB_HAVE_PNETCDF
819   size_t nb_reads = localGidVerts.psize();
820   std::vector<int> requests(nb_reads);
821   std::vector<int> statuss(nb_reads);
822   size_t idxReq = 0;
823 #endif
824 
825   // Store lev values in levVals
826   std::map<std::string, ReadNC::VarData>::iterator vmit;
827   if ((vmit = varInfo.find("layers")) != varInfo.end() && (*vmit).second.varDims.size() == 1) {
828     rval = read_coordinate("layers", 0, nLevels - 1, levVals);MB_CHK_SET_ERR(rval, "Trouble reading 'layers' variable");
829   }
830   else if ((vmit = varInfo.find("interfaces")) != varInfo.end() && (*vmit).second.varDims.size() == 1) {
831     rval = read_coordinate("interfaces", 0, nLevels - 1, levVals);MB_CHK_SET_ERR(rval, "Trouble reading 'interfaces' variable");
832   }
833   else {
834     MB_SET_ERR(MB_FAILURE, "Couldn't find 'layers' or 'interfaces' variable");
835   }
836 
837   // Decide whether down is positive
838   char posval[10] = {0};
839   int success = NCFUNC(get_att_text)(_fileId, (*vmit).second.varId, "positive", posval);
840   if (0 == success && !strncmp(posval, "down", 4)) {
841     for (std::vector<double>::iterator dvit = levVals.begin(); dvit != levVals.end(); ++dvit)
842       (*dvit) *= -1.0;
843   }
844 
845   // Read x coordinates for local vertices
846   double* xptr = arrays[0];
847   int xVertexVarId;
848   success = NCFUNC(inq_varid)(_fileId, "grid_corner_lon", &xVertexVarId);
849   if (success)
850     MB_SET_ERR(MB_FAILURE, "Failed to get variable id of grid_corner_lon");
851   size_t indexInArray = 0;
852   for (Range::pair_iterator pair_iter = localGidVerts.pair_begin();
853        pair_iter != localGidVerts.pair_end();
854        ++pair_iter) {
855     EntityHandle starth = pair_iter->first;
856     EntityHandle endh = pair_iter->second;
857     NCDF_SIZE read_start = (NCDF_SIZE) (starth - 1);
858     NCDF_SIZE read_count = (NCDF_SIZE) (endh - starth + 1);
859 
860     // Do a partial read in each subrange
861 #ifdef MOAB_HAVE_PNETCDF
862     success = NCFUNCREQG(_vara_double)(_fileId, xVertexVarId, &read_start, &read_count,
863                                       &(xptr[indexInArray]), &requests[idxReq++]);
864 #else
865     success = NCFUNCAG(_vara_double)(_fileId, xVertexVarId, &read_start, &read_count,
866                                       &(xptr[indexInArray]));
867 #endif
868     if (success)
869       MB_SET_ERR(MB_FAILURE, "Failed to read grid_corner_lon data in a loop");
870 
871     // Increment the index for next subrange
872     indexInArray += (endh - starth + 1);
873   }
874 
875 #ifdef MOAB_HAVE_PNETCDF
876   // Wait outside the loop
877   success = NCFUNC(wait_all)(_fileId, requests.size(), &requests[0], &statuss[0]);
878   if (success)
879     MB_SET_ERR(MB_FAILURE, "Failed on wait_all");
880 #endif
881 
882   // Read y coordinates for local vertices
883   double* yptr = arrays[1];
884   int yVertexVarId;
885   success = NCFUNC(inq_varid)(_fileId, "grid_corner_lat", &yVertexVarId);
886   if (success)
887     MB_SET_ERR(MB_FAILURE, "Failed to get variable id of grid_corner_lat");
888 #ifdef MOAB_HAVE_PNETCDF
889   idxReq = 0;
890 #endif
891   indexInArray = 0;
892   for (Range::pair_iterator pair_iter = localGidVerts.pair_begin();
893        pair_iter != localGidVerts.pair_end();
894        ++pair_iter) {
895     EntityHandle starth = pair_iter->first;
896     EntityHandle endh = pair_iter->second;
897     NCDF_SIZE read_start = (NCDF_SIZE) (starth - 1);
898     NCDF_SIZE read_count = (NCDF_SIZE) (endh - starth + 1);
899 
900     // Do a partial read in each subrange
901 #ifdef MOAB_HAVE_PNETCDF
902     success = NCFUNCREQG(_vara_double)(_fileId, yVertexVarId, &read_start, &read_count,
903                                       &(yptr[indexInArray]), &requests[idxReq++]);
904 #else
905     success = NCFUNCAG(_vara_double)(_fileId, yVertexVarId, &read_start, &read_count,
906                                       &(yptr[indexInArray]));
907 #endif
908     if (success)
909       MB_SET_ERR(MB_FAILURE, "Failed to read grid_corner_lat data in a loop");
910 
911     // Increment the index for next subrange
912     indexInArray += (endh - starth + 1);
913   }
914 
915 #ifdef MOAB_HAVE_PNETCDF
916   // Wait outside the loop
917   success = NCFUNC(wait_all)(_fileId, requests.size(), &requests[0], &statuss[0]);
918   if (success)
919     MB_SET_ERR(MB_FAILURE, "Failed on wait_all");
920 #endif
921 
922   // Convert lon/lat/rad to x/y/z
923   double* zptr = arrays[2];
924   double rad = 8000.0 + levVals[0];
925   for (int i = 0; i < nLocalVertices; i++) {
926     double cosphi = cos(yptr[i]);
927     double zmult = sin(yptr[i]);
928     double xmult = cosphi * cos(xptr[i]);
929     double ymult = cosphi * sin(xptr[i]);
930     xptr[i] = rad * xmult;
931     yptr[i] = rad * ymult;
932     zptr[i] = rad * zmult;
933   }
934 
935   return MB_SUCCESS;
936 }
937 
create_local_edges(EntityHandle start_vertex)938 ErrorCode NCHelperGCRM::create_local_edges(EntityHandle start_vertex)
939 {
940   Interface*& mbImpl = _readNC->mbImpl;
941   Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
942   DebugOutput& dbgOut = _readNC->dbgOut;
943 
944   // Read edges on each local cell, to get localGidEdges
945   int edgesOnCellVarId;
946   int success = NCFUNC(inq_varid)(_fileId, "cell_edges", &edgesOnCellVarId);
947   if (success)
948     MB_SET_ERR(MB_FAILURE, "Failed to get variable id of cell_edges");
949 
950   std::vector<int> edges_on_local_cells(nLocalCells * EDGES_PER_CELL);
951   dbgOut.tprintf(1, "   edges_on_local_cells.size() = %d\n", (int)edges_on_local_cells.size());
952 
953 #ifdef MOAB_HAVE_PNETCDF
954   size_t nb_reads = localGidCells.psize();
955   std::vector<int> requests(nb_reads);
956   std::vector<int> statuss(nb_reads);
957   size_t idxReq = 0;
958 #endif
959   size_t indexInArray = 0;
960   for (Range::pair_iterator pair_iter = localGidCells.pair_begin();
961        pair_iter != localGidCells.pair_end();
962        ++pair_iter) {
963     EntityHandle starth = pair_iter->first;
964     EntityHandle endh = pair_iter->second;
965     dbgOut.tprintf(1, "   starth = %d\n", (int)starth);
966     dbgOut.tprintf(1, "   endh = %d\n", (int)endh);
967     NCDF_SIZE read_starts[2] = {static_cast<NCDF_SIZE>(starth - 1), 0};
968     NCDF_SIZE read_counts[2] = {static_cast<NCDF_SIZE>(endh - starth + 1), static_cast<NCDF_SIZE>(EDGES_PER_CELL)};
969 
970     // Do a partial read in each subrange
971 #ifdef MOAB_HAVE_PNETCDF
972     success = NCFUNCREQG(_vara_int)(_fileId, edgesOnCellVarId, read_starts, read_counts,
973                                       &(edges_on_local_cells[indexInArray]), &requests[idxReq++]);
974 #else
975     success = NCFUNCAG(_vara_int)(_fileId, edgesOnCellVarId, read_starts, read_counts,
976                                       &(edges_on_local_cells[indexInArray]));
977 #endif
978     if (success)
979       MB_SET_ERR(MB_FAILURE, "Failed to read cell_edges data in a loop");
980 
981     // Increment the index for next subrange
982     indexInArray += (endh - starth + 1) * EDGES_PER_CELL;
983   }
984 
985 #ifdef MOAB_HAVE_PNETCDF
986   // Wait outside the loop
987   success = NCFUNC(wait_all)(_fileId, requests.size(), &requests[0], &statuss[0]);
988   if (success)
989     MB_SET_ERR(MB_FAILURE, "Failed on wait_all");
990 #endif
991 
992   // GCRM is 0 based, convert edge indices from 0 to 1 based
993   for (std::size_t idx = 0; idx < edges_on_local_cells.size(); idx++)
994     edges_on_local_cells[idx] += 1;
995 
996   // Collect local edges
997   std::sort(edges_on_local_cells.begin(), edges_on_local_cells.end());
998   std::copy(edges_on_local_cells.rbegin(), edges_on_local_cells.rend(), range_inserter(localGidEdges));
999   nLocalEdges = localGidEdges.size();
1000 
1001   dbgOut.tprintf(1, "   localGidEdges.psize() = %d\n", (int)localGidEdges.psize());
1002   dbgOut.tprintf(1, "   localGidEdges.size() = %d\n", (int)localGidEdges.size());
1003 
1004   // Create local edges
1005   EntityHandle start_edge;
1006   EntityHandle* conn_arr_edges = NULL;
1007   ErrorCode rval = _readNC->readMeshIface->get_element_connect(nLocalEdges, 2, MBEDGE, 0, start_edge, conn_arr_edges,
1008                                                               // Might have to create gather mesh later
1009                                                               (createGatherSet ? nLocalEdges + nEdges : nLocalEdges));MB_CHK_SET_ERR(rval, "Failed to create local edges");
1010 
1011   // Add local edges to current file set
1012   Range local_edges_range(start_edge, start_edge + nLocalEdges - 1);
1013   rval = _readNC->mbImpl->add_entities(_fileSet, local_edges_range);MB_CHK_SET_ERR(rval, "Failed to add local edges to current file set");
1014 
1015   // Get ptr to GID memory for edges
1016   int count = 0;
1017   void* data = NULL;
1018   rval = mbImpl->tag_iterate(mGlobalIdTag, local_edges_range.begin(), local_edges_range.end(), count, data);MB_CHK_SET_ERR(rval, "Failed to iterate global id tag on local edges");
1019   assert(count == nLocalEdges);
1020   int* gid_data = (int*) data;
1021   std::copy(localGidEdges.begin(), localGidEdges.end(), gid_data);
1022 
1023   int verticesOnEdgeVarId;
1024 
1025   // Read vertices on each local edge, to get edge connectivity
1026   success = NCFUNC(inq_varid)(_fileId, "edge_corners", &verticesOnEdgeVarId);
1027   if (success)
1028     MB_SET_ERR(MB_FAILURE, "Failed to get variable id of edge_corners");
1029   // Utilize the memory storage pointed by conn_arr_edges
1030   int* vertices_on_local_edges = (int*) conn_arr_edges;
1031 #ifdef MOAB_HAVE_PNETCDF
1032   nb_reads = localGidEdges.psize();
1033   requests.resize(nb_reads);
1034   statuss.resize(nb_reads);
1035   idxReq = 0;
1036 #endif
1037   indexInArray = 0;
1038   for (Range::pair_iterator pair_iter = localGidEdges.pair_begin();
1039       pair_iter != localGidEdges.pair_end();
1040       ++pair_iter) {
1041     EntityHandle starth = pair_iter->first;
1042     EntityHandle endh = pair_iter->second;
1043     NCDF_SIZE read_starts[2] = {static_cast<NCDF_SIZE>(starth - 1), 0};
1044     NCDF_SIZE read_counts[2] = {static_cast<NCDF_SIZE>(endh - starth + 1), 2};
1045 
1046     // Do a partial read in each subrange
1047 #ifdef MOAB_HAVE_PNETCDF
1048     success = NCFUNCREQG(_vara_int)(_fileId, verticesOnEdgeVarId, read_starts, read_counts,
1049                                     &(vertices_on_local_edges[indexInArray]), &requests[idxReq++]);
1050 #else
1051     success = NCFUNCAG(_vara_int)(_fileId, verticesOnEdgeVarId, read_starts, read_counts,
1052                                     &(vertices_on_local_edges[indexInArray]));
1053 #endif
1054     if (success)
1055       MB_SET_ERR(MB_FAILURE, "Failed to read edge_corners data in a loop");
1056 
1057     // Increment the index for next subrange
1058     indexInArray += (endh - starth + 1) * 2;
1059   }
1060 
1061 #ifdef MOAB_HAVE_PNETCDF
1062   // Wait outside the loop
1063   success = NCFUNC(wait_all)(_fileId, requests.size(), &requests[0], &statuss[0]);
1064   if (success)
1065     MB_SET_ERR(MB_FAILURE, "Failed on wait_all");
1066 #endif
1067 
1068   // Populate connectivity data for local edges
1069   // Convert in-place from int (stored in the first half) to EntityHandle
1070   // Reading backward is the trick
1071   for (int edge_vert = nLocalEdges * 2 - 1; edge_vert >= 0; edge_vert--) {
1072     // Note, indices stored in vertices_on_local_edges are 0 based
1073     int global_vert_idx = vertices_on_local_edges[edge_vert] + 1; // Global vertex index, 1 based
1074     int local_vert_idx = localGidVerts.index(global_vert_idx); // Local vertex index, 0 based
1075     assert(local_vert_idx != -1);
1076     conn_arr_edges[edge_vert] = start_vertex + local_vert_idx;
1077   }
1078 
1079   return MB_SUCCESS;
1080 }
1081 
create_padded_local_cells(const std::vector<int> & vertices_on_local_cells,EntityHandle start_vertex,Range & faces)1082 ErrorCode NCHelperGCRM::create_padded_local_cells(const std::vector<int>& vertices_on_local_cells,
1083                                                   EntityHandle start_vertex, Range& faces)
1084 {
1085   Interface*& mbImpl = _readNC->mbImpl;
1086   Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
1087 
1088   // Create cells
1089   EntityHandle start_element;
1090   EntityHandle* conn_arr_local_cells = NULL;
1091   ErrorCode rval = _readNC->readMeshIface->get_element_connect(nLocalCells, EDGES_PER_CELL, MBPOLYGON, 0, start_element, conn_arr_local_cells,
1092                                                               // Might have to create gather mesh later
1093                                                               (createGatherSet ? nLocalCells + nCells : nLocalCells));MB_CHK_SET_ERR(rval, "Failed to create local cells");
1094   faces.insert(start_element, start_element + nLocalCells - 1);
1095 
1096   // Add local cells to current file set
1097   Range local_cells_range(start_element, start_element + nLocalCells - 1);
1098   rval = _readNC->mbImpl->add_entities(_fileSet, local_cells_range);MB_CHK_SET_ERR(rval, "Failed to add local cells to current file set");
1099 
1100   // Get ptr to GID memory for local cells
1101   int count = 0;
1102   void* data = NULL;
1103   rval = mbImpl->tag_iterate(mGlobalIdTag, local_cells_range.begin(), local_cells_range.end(), count, data);MB_CHK_SET_ERR(rval, "Failed to iterate global id tag on local cells");
1104   assert(count == nLocalCells);
1105   int* gid_data = (int*) data;
1106   std::copy(localGidCells.begin(), localGidCells.end(), gid_data);
1107 
1108   // Set connectivity array with proper local vertices handles
1109   // vertices_on_local_cells array was already corrected to have
1110   // the last vertices repeated for pentagons, e.g. 122345 => 123455
1111   for (int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++) {
1112     for (int i = 0; i < EDGES_PER_CELL; i++) {
1113       // Note, indices stored in vertices_on_local_cells are 1 based
1114       EntityHandle global_vert_idx = vertices_on_local_cells[local_cell_idx * EDGES_PER_CELL + i]; // Global vertex index, 1 based
1115       int local_vert_idx = localGidVerts.index(global_vert_idx); // Local vertex index, 0 based
1116       assert(local_vert_idx != -1);
1117       conn_arr_local_cells[local_cell_idx * EDGES_PER_CELL + i] = start_vertex + local_vert_idx;
1118     }
1119   }
1120 
1121   return MB_SUCCESS;
1122 }
1123 
create_gather_set_vertices(EntityHandle gather_set,EntityHandle & gather_set_start_vertex)1124 ErrorCode NCHelperGCRM::create_gather_set_vertices(EntityHandle gather_set, EntityHandle& gather_set_start_vertex)
1125 {
1126   Interface*& mbImpl = _readNC->mbImpl;
1127   Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
1128   const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
1129 
1130   // Create gather set vertices
1131   std::vector<double*> arrays;
1132   // Don't need to specify allocation number here, because we know enough vertices were created before
1133   ErrorCode rval = _readNC->readMeshIface->get_node_coords(3, nVertices, 0, gather_set_start_vertex, arrays);MB_CHK_SET_ERR(rval, "Failed to create gather set vertices");
1134 
1135   // Add vertices to the gather set
1136   Range gather_set_verts_range(gather_set_start_vertex, gather_set_start_vertex + nVertices - 1);
1137   rval = mbImpl->add_entities(gather_set, gather_set_verts_range);MB_CHK_SET_ERR(rval, "Failed to add vertices to the gather set");
1138 
1139   // Read x coordinates for gather set vertices
1140   double* xptr = arrays[0];
1141   int xVertexVarId;
1142   int success = NCFUNC(inq_varid)(_fileId, "grid_corner_lon", &xVertexVarId);
1143   if (success)
1144     MB_SET_ERR(MB_FAILURE, "Failed to get variable id of grid_corner_lon");
1145   NCDF_SIZE read_start = 0;
1146   NCDF_SIZE read_count = static_cast<NCDF_SIZE>(nVertices);
1147 #ifdef MOAB_HAVE_PNETCDF
1148   // Enter independent I/O mode, since this read is only for the gather processor
1149   success = NCFUNC(begin_indep_data)(_fileId);
1150   if (success)
1151     MB_SET_ERR(MB_FAILURE, "Failed to begin independent I/O mode");
1152   success = NCFUNCG(_vara_double)(_fileId, xVertexVarId, &read_start, &read_count, xptr);
1153   if (success)
1154     MB_SET_ERR(MB_FAILURE, "Failed to read grid_corner_lon data");
1155   success = NCFUNC(end_indep_data)(_fileId);
1156   if (success)
1157     MB_SET_ERR(MB_FAILURE, "Failed to end independent I/O mode");
1158 #else
1159   success = NCFUNCG(_vara_double)(_fileId, xVertexVarId, &read_start, &read_count, xptr);
1160   if (success)
1161     MB_SET_ERR(MB_FAILURE, "Failed to read grid_corner_lon data");
1162 #endif
1163 
1164   // Read y coordinates for gather set vertices
1165   double* yptr = arrays[1];
1166   int yVertexVarId;
1167   success = NCFUNC(inq_varid)(_fileId, "grid_corner_lat", &yVertexVarId);
1168   if (success)
1169     MB_SET_ERR(MB_FAILURE, "Failed to get variable id of grid_corner_lat");
1170 #ifdef MOAB_HAVE_PNETCDF
1171   // Enter independent I/O mode, since this read is only for the gather processor
1172   success = NCFUNC(begin_indep_data)(_fileId);
1173   if (success)
1174     MB_SET_ERR(MB_FAILURE, "Failed to begin independent I/O mode");
1175   success = NCFUNCG(_vara_double)(_fileId, yVertexVarId, &read_start, &read_count, yptr);
1176   if (success)
1177     MB_SET_ERR(MB_FAILURE, "Failed to read grid_corner_lat data");
1178   success = NCFUNC(end_indep_data)(_fileId);
1179   if (success)
1180     MB_SET_ERR(MB_FAILURE, "Failed to end independent I/O mode");
1181 #else
1182   success = NCFUNCG(_vara_double)(_fileId, yVertexVarId, &read_start, &read_count, yptr);
1183   if (success)
1184     MB_SET_ERR(MB_FAILURE, "Failed to read grid_corner_lat data");
1185 #endif
1186 
1187   // Convert lon/lat/rad to x/y/z
1188   double* zptr = arrays[2];
1189   double rad = 8000.0 + levVals[0];
1190   for (int i = 0; i < nVertices; i++) {
1191     double cosphi = cos(yptr[i]);
1192     double zmult = sin(yptr[i]);
1193     double xmult = cosphi * cos(xptr[i]);
1194     double ymult = cosphi * sin(xptr[i]);
1195     xptr[i] = rad * xmult;
1196     yptr[i] = rad * ymult;
1197     zptr[i] = rad * zmult;
1198   }
1199 
1200   // Get ptr to GID memory for gather set vertices
1201   int count = 0;
1202   void* data = NULL;
1203   rval = mbImpl->tag_iterate(mGlobalIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(),
1204                              count, data);MB_CHK_SET_ERR(rval, "Failed to iterate global id tag on gather set vertices");
1205   assert(count == nVertices);
1206   int* gid_data = (int*) data;
1207   for (int j = 1; j <= nVertices; j++)
1208     gid_data[j - 1] = j;
1209 
1210   // Set the file id tag too, it should be bigger something not interfering with global id
1211   if (mpFileIdTag) {
1212     rval = mbImpl->tag_iterate(*mpFileIdTag, gather_set_verts_range.begin(), gather_set_verts_range.end(),
1213                                count, data);MB_CHK_SET_ERR(rval, "Failed to iterate file id tag on gather set vertices");
1214     assert(count == nVertices);
1215     int bytes_per_tag = 4;
1216     rval = mbImpl->tag_get_bytes(*mpFileIdTag, bytes_per_tag);MB_CHK_SET_ERR(rval, "Can't get number of bytes for file id tag");
1217     if (4 == bytes_per_tag) {
1218       gid_data = (int*) data;
1219       for (int j = 1; j <= nVertices; j++)
1220         gid_data[j - 1] = nVertices + j; // Bigger than global id tag
1221     }
1222     else if (8 == bytes_per_tag) { // Should be a handle tag on 64 bit machine?
1223       long* handle_tag_data = (long*)data;
1224       for (int j = 1; j <= nVertices; j++)
1225         handle_tag_data[j - 1] = nVertices + j; // Bigger than global id tag
1226     }
1227   }
1228 
1229   return MB_SUCCESS;
1230 }
1231 
create_gather_set_edges(EntityHandle gather_set,EntityHandle gather_set_start_vertex)1232 ErrorCode NCHelperGCRM::create_gather_set_edges(EntityHandle gather_set, EntityHandle gather_set_start_vertex)
1233 {
1234   Interface*& mbImpl = _readNC->mbImpl;
1235 
1236   // Create gather set edges
1237   EntityHandle start_edge;
1238   EntityHandle* conn_arr_gather_set_edges = NULL;
1239   // Don't need to specify allocation number here, because we know enough edges were created before
1240   ErrorCode rval = _readNC->readMeshIface->get_element_connect(nEdges, 2, MBEDGE, 0, start_edge,
1241                                                                conn_arr_gather_set_edges);MB_CHK_SET_ERR(rval, "Failed to create gather set edges");
1242 
1243   // Add edges to the gather set
1244   Range gather_set_edges_range(start_edge, start_edge + nEdges - 1);
1245   rval = mbImpl->add_entities(gather_set, gather_set_edges_range);MB_CHK_SET_ERR(rval, "Failed to add edges to the gather set");
1246 
1247   // Read vertices on each edge
1248   int verticesOnEdgeVarId;
1249   int success = NCFUNC(inq_varid)(_fileId, "edge_corners", &verticesOnEdgeVarId);
1250   if (success)
1251     MB_SET_ERR(MB_FAILURE, "Failed to get variable id of edge_corners");
1252   // Utilize the memory storage pointed by conn_arr_gather_set_edges
1253   int* vertices_on_gather_set_edges = (int*) conn_arr_gather_set_edges;
1254   NCDF_SIZE read_starts[2] = {0, 0};
1255   NCDF_SIZE read_counts[2] = {static_cast<NCDF_SIZE>(nEdges), 2};
1256  #ifdef MOAB_HAVE_PNETCDF
1257    // Enter independent I/O mode, since this read is only for the gather processor
1258    success = NCFUNC(begin_indep_data)(_fileId);
1259    if (success)
1260      MB_SET_ERR(MB_FAILURE, "Failed to begin independent I/O mode");
1261    success = NCFUNCG(_vara_int)(_fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges);
1262    if (success)
1263      MB_SET_ERR(MB_FAILURE, "Failed to read edge_corners data");
1264    success = NCFUNC(end_indep_data)(_fileId);
1265    if (success)
1266      MB_SET_ERR(MB_FAILURE, "Failed to end independent I/O mode");
1267  #else
1268    success = NCFUNCG(_vara_int)(_fileId, verticesOnEdgeVarId, read_starts, read_counts, vertices_on_gather_set_edges);
1269    if (success)
1270      MB_SET_ERR(MB_FAILURE, "Failed to read edge_corners data");
1271  #endif
1272 
1273    // Populate connectivity data for gather set edges
1274    // Convert in-place from int (stored in the first half) to EntityHandle
1275    // Reading backward is the trick
1276    for (int edge_vert = nEdges * 2 - 1; edge_vert >= 0; edge_vert--) {
1277      // Note, indices stored in vertices_on_gather_set_edges are 0 based
1278      int gather_set_vert_idx = vertices_on_gather_set_edges[edge_vert]; // Global vertex index, 0 based
1279      // Connectivity array is shifted by where the gather set vertices start
1280      conn_arr_gather_set_edges[edge_vert] = gather_set_start_vertex + gather_set_vert_idx;
1281    }
1282 
1283    return MB_SUCCESS;
1284 }
1285 
create_padded_gather_set_cells(EntityHandle gather_set,EntityHandle gather_set_start_vertex)1286 ErrorCode NCHelperGCRM::create_padded_gather_set_cells(EntityHandle gather_set, EntityHandle gather_set_start_vertex)
1287 {
1288   Interface*& mbImpl = _readNC->mbImpl;
1289 
1290   // Create gather set cells
1291   EntityHandle start_element;
1292   EntityHandle* conn_arr_gather_set_cells = NULL;
1293   // Don't need to specify allocation number here, because we know enough cells were created before
1294   ErrorCode rval = _readNC->readMeshIface->get_element_connect(nCells, EDGES_PER_CELL, MBPOLYGON, 0, start_element,
1295                                                                conn_arr_gather_set_cells);MB_CHK_SET_ERR(rval, "Failed to create gather set cells");
1296 
1297   // Add cells to the gather set
1298   Range gather_set_cells_range(start_element, start_element + nCells - 1);
1299   rval = mbImpl->add_entities(gather_set, gather_set_cells_range);MB_CHK_SET_ERR(rval, "Failed to add cells to the gather set");
1300 
1301   // Read vertices on each gather set cell (connectivity)
1302   int verticesOnCellVarId;
1303   int success = NCFUNC(inq_varid)(_fileId, "cell_corners", &verticesOnCellVarId);
1304   if (success)
1305     MB_SET_ERR(MB_FAILURE, "Failed to get variable id of cell_corners");
1306   // Utilize the memory storage pointed by conn_arr_gather_set_cells
1307   int* vertices_on_gather_set_cells = (int*) conn_arr_gather_set_cells;
1308   NCDF_SIZE read_starts[2] = {0, 0};
1309   NCDF_SIZE read_counts[2] = {static_cast<NCDF_SIZE>(nCells), static_cast<NCDF_SIZE>(EDGES_PER_CELL)};
1310 #ifdef MOAB_HAVE_PNETCDF
1311   // Enter independent I/O mode, since this read is only for the gather processor
1312   success = NCFUNC(begin_indep_data)(_fileId);
1313   if (success)
1314     MB_SET_ERR(MB_FAILURE, "Failed to begin independent I/O mode");
1315   success = NCFUNCG(_vara_int)(_fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells);
1316   if (success)
1317     MB_SET_ERR(MB_FAILURE, "Failed to read cell_corners data");
1318   success = NCFUNC(end_indep_data)(_fileId);
1319   if (success)
1320     MB_SET_ERR(MB_FAILURE, "Failed to end independent I/O mode");
1321 #else
1322   success = NCFUNCG(_vara_int)(_fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells);
1323   if (success)
1324     MB_SET_ERR(MB_FAILURE, "Failed to read cell_corners data");
1325 #endif
1326 
1327   // Correct gather set cell vertices array in the same way as local cell vertices array
1328   // Pentagons as hexagons should have a connectivity like 123455 and not 122345
1329   for (int gather_set_cell_idx = 0; gather_set_cell_idx < nCells; gather_set_cell_idx++) {
1330     int* pvertex = vertices_on_gather_set_cells + gather_set_cell_idx * EDGES_PER_CELL;
1331     for (int k = 0; k < EDGES_PER_CELL - 2; k++) {
1332       if (*(pvertex + k) == *(pvertex + k + 1)) {
1333         // Shift the connectivity
1334         for (int kk = k + 1; kk < EDGES_PER_CELL - 1; kk++)
1335           *(pvertex + kk) = *(pvertex + kk + 1);
1336         // No need to try next k
1337         break;
1338       }
1339     }
1340   }
1341 
1342   // Populate connectivity data for gather set cells
1343   // Convert in-place from int (stored in the first half) to EntityHandle
1344   // Reading backward is the trick
1345   for (int cell_vert = nCells * EDGES_PER_CELL - 1; cell_vert >= 0; cell_vert--) {
1346     // Note, indices stored in vertices_on_gather_set_cells are 0 based
1347     int gather_set_vert_idx = vertices_on_gather_set_cells[cell_vert]; // Global vertex index, 0 based
1348     // Connectivity array is shifted by where the gather set vertices start
1349     conn_arr_gather_set_cells[cell_vert] = gather_set_start_vertex + gather_set_vert_idx;
1350   }
1351 
1352   return MB_SUCCESS;
1353 }
1354 
1355 } // namespace moab
1356