1 /*=========================================================================
2 
3   Program:   Visualization Toolkit
4   Module:    TestPStructuredGridConnectivity.cxx
5 
6   Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
7   All rights reserved.
8   See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
9 
10      This software is distributed WITHOUT ANY WARRANTY; without even
11      the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
12      PURPOSE.  See the above copyright notice for more information.
13 
14 =========================================================================*/
15 // .NAME TestPStructuredGridConnectivity.cxx -- Parallel structured connectivity
16 //
17 // .SECTION Description
18 //  A test for parallel structured grid connectivity.
19 
20 // C++ includes
21 #include <cassert>
22 #include <iostream>
23 #include <sstream>
24 #include <string>
25 #include <vector>
26 
27 // MPI include
28 #include <vtk_mpi.h>
29 
30 // VTK includes
31 #include "vtkCellData.h"
32 #include "vtkDataObject.h"
33 #include "vtkDataSet.h"
34 #include "vtkInformation.h"
35 #include "vtkMPIController.h"
36 #include "vtkMathUtilities.h"
37 #include "vtkMultiBlockDataSet.h"
38 #include "vtkPStructuredGridConnectivity.h"
39 #include "vtkPointData.h"
40 #include "vtkStreamingDemandDrivenPipeline.h"
41 #include "vtkStructuredGridConnectivity.h"
42 #include "vtkStructuredNeighbor.h"
43 #include "vtkUniformGrid.h"
44 #include "vtkUniformGridPartitioner.h"
45 #include "vtkUnsignedCharArray.h"
46 #include "vtkXMLPMultiBlockDataWriter.h"
47 
48 namespace
49 {
50 
51 //------------------------------------------------------------------------------
52 //      G L O B A  L   D A T A
53 //------------------------------------------------------------------------------
54 vtkMultiProcessController* Controller;
55 int Rank;
56 int NumberOfProcessors;
57 
58 //------------------------------------------------------------------------------
WriteDistributedDataSet(const std::string & prefix,vtkMultiBlockDataSet * dataset)59 void WriteDistributedDataSet(const std::string& prefix, vtkMultiBlockDataSet* dataset)
60 {
61 #ifdef DEBUG_ON
62   vtkXMLPMultiBlockDataWriter* writer = vtkXMLPMultiBlockDataWriter::New();
63   std::ostringstream oss;
64   oss << prefix << "." << writer->GetDefaultFileExtension();
65   writer->SetFileName(oss.str().c_str());
66   writer->SetInputData(dataset);
67   if (Controller->GetLocalProcessId() == 0)
68   {
69     writer->SetWriteMetaFile(1);
70   }
71   writer->Update();
72   writer->Delete();
73 #else
74   (void)(prefix);
75   (void)(dataset);
76 #endif
77 }
78 
79 //------------------------------------------------------------------------------
LogMessage(const std::string & msg)80 void LogMessage(const std::string& msg)
81 {
82   if (Controller->GetLocalProcessId() == 0)
83   {
84     std::cout << msg << std::endl;
85     std::cout.flush();
86   }
87 }
88 
89 //------------------------------------------------------------------------------
GetTotalNumberOfNodes(vtkMultiBlockDataSet * multiblock)90 int GetTotalNumberOfNodes(vtkMultiBlockDataSet* multiblock)
91 {
92   assert("pre: Controller should not be nullptr" && (Controller != nullptr));
93   assert("pre: multi-block grid is nullptr" && (multiblock != nullptr));
94 
95   // STEP 0: Count local number of nodes
96   int numNodes = 0;
97   for (unsigned int block = 0; block < multiblock->GetNumberOfBlocks(); ++block)
98   {
99     vtkUniformGrid* grid = vtkUniformGrid::SafeDownCast(multiblock->GetBlock(block));
100 
101     if (grid != nullptr)
102     {
103       vtkIdType pntIdx = 0;
104       for (; pntIdx < grid->GetNumberOfPoints(); ++pntIdx)
105       {
106         if (grid->IsPointVisible(pntIdx))
107         {
108           ++numNodes;
109         }
110       } // END for all nodes
111     }   // END if grid != nullptr
112 
113   } // END for all blocks
114 
115   // STEP 2: Synchronize processes
116   Controller->Barrier();
117 
118   // STEP 3: Get a global sum
119   int totalSum = 0;
120   Controller->AllReduce(&numNodes, &totalSum, 1, vtkCommunicator::SUM_OP);
121 
122   return (totalSum);
123 }
124 
125 //------------------------------------------------------------------------------
126 // Description:
127 // Generates a distributed multi-block dataset, each grid is added using
128 // round-robin assignment.
GetDataSet(const int numPartitions)129 vtkMultiBlockDataSet* GetDataSet(const int numPartitions)
130 {
131   int wholeExtent[6];
132   wholeExtent[0] = 0;
133   wholeExtent[1] = 99;
134   wholeExtent[2] = 0;
135   wholeExtent[3] = 99;
136   wholeExtent[4] = 0;
137   wholeExtent[5] = 99;
138 
139   int dims[3];
140   dims[0] = wholeExtent[1] - wholeExtent[0] + 1;
141   dims[1] = wholeExtent[3] - wholeExtent[2] + 1;
142   dims[2] = wholeExtent[5] - wholeExtent[4] + 1;
143 
144   // Generate grid for the entire domain
145   vtkUniformGrid* wholeGrid = vtkUniformGrid::New();
146   wholeGrid->SetOrigin(0.0, 0.0, 0.0);
147   wholeGrid->SetSpacing(0.5, 0.5, 0.5);
148   wholeGrid->SetDimensions(dims);
149 
150   // partition the grid, the grid partitioner will generate the whole extent and
151   // node extent information.
152   vtkUniformGridPartitioner* gridPartitioner = vtkUniformGridPartitioner::New();
153   gridPartitioner->SetInputData(wholeGrid);
154   gridPartitioner->SetNumberOfPartitions(numPartitions);
155   gridPartitioner->Update();
156   vtkMultiBlockDataSet* partitionedGrid =
157     vtkMultiBlockDataSet::SafeDownCast(gridPartitioner->GetOutput());
158   assert("pre: partitionedGrid != nullptr" && (partitionedGrid != nullptr));
159 
160   // Each process has the same number of blocks, i.e., the same structure,
161   // however some block entries are nullptr indicating that the data lives on
162   // some other process
163   vtkMultiBlockDataSet* mbds = vtkMultiBlockDataSet::New();
164   mbds->SetNumberOfBlocks(numPartitions);
165   mbds->GetInformation()->Set(vtkStreamingDemandDrivenPipeline::WHOLE_EXTENT(),
166     partitionedGrid->GetInformation()->Get(vtkStreamingDemandDrivenPipeline::WHOLE_EXTENT()), 6);
167   //  mbds->SetWholeExtent( partitionedGrid->GetWholeExtent( ) );
168 
169   // Populate blocks for this process
170   unsigned int block = 0;
171   for (; block < partitionedGrid->GetNumberOfBlocks(); ++block)
172   {
173     if (Rank == static_cast<int>(block % NumberOfProcessors))
174     {
175       // Copy the uniform grid
176       vtkUniformGrid* grid = vtkUniformGrid::New();
177       grid->DeepCopy(partitionedGrid->GetBlock(block));
178 
179       mbds->SetBlock(block, grid);
180       grid->Delete();
181 
182       // Copy the global extent for the blockinformation
183       vtkInformation* info = partitionedGrid->GetMetaData(block);
184       assert("pre: null metadata!" && (info != nullptr));
185       assert("pre: must have a piece extent!" && (info->Has(vtkDataObject::PIECE_EXTENT())));
186 
187       vtkInformation* metadata = mbds->GetMetaData(block);
188       assert("pre: null metadata!" && (metadata != nullptr));
189       metadata->Set(vtkDataObject::PIECE_EXTENT(), info->Get(vtkDataObject::PIECE_EXTENT()), 6);
190     } // END if we own the block
191     else
192     {
193       mbds->SetBlock(block, nullptr);
194     } // END else we don't own the block
195   }   // END for all blocks
196 
197   wholeGrid->Delete();
198   gridPartitioner->Delete();
199 
200   assert("pre: mbds is nullptr" && (mbds != nullptr));
201   return (mbds);
202 }
203 
204 //------------------------------------------------------------------------------
RegisterGrids(vtkMultiBlockDataSet * mbds,vtkPStructuredGridConnectivity * connectivity)205 void RegisterGrids(vtkMultiBlockDataSet* mbds, vtkPStructuredGridConnectivity* connectivity)
206 {
207   assert("pre: Multi-block is nullptr!" && (mbds != nullptr));
208   assert("pre: connectivity is nullptr!" && (connectivity != nullptr));
209 
210   for (unsigned int block = 0; block < mbds->GetNumberOfBlocks(); ++block)
211   {
212     vtkUniformGrid* grid = vtkUniformGrid::SafeDownCast(mbds->GetBlock(block));
213     if (grid != nullptr)
214     {
215       vtkInformation* info = mbds->GetMetaData(block);
216       assert("pre: metadata should not be nullptr" && (info != nullptr));
217       assert("pre: must have piece extent!" && info->Has(vtkDataObject::PIECE_EXTENT()));
218       connectivity->RegisterGrid(block, info->Get(vtkDataObject::PIECE_EXTENT()),
219         grid->GetPointGhostArray(), grid->GetCellGhostArray(), grid->GetPointData(),
220         grid->GetCellData(), nullptr);
221     } // END if block belongs to this process
222   }   // END for all blocks
223 }
224 
225 //------------------------------------------------------------------------------
226 // Tests StructuredGridConnectivity on a distributed data-set
TestPStructuredGridConnectivity(const int factor)227 int TestPStructuredGridConnectivity(const int factor)
228 {
229   assert("pre: MPI Controller is nullptr!" && (Controller != nullptr));
230 
231   int expected = 100 * 100 * 100;
232 
233   // STEP 0: Calculation number of partitions as factor of the number of
234   // processes.
235   assert("pre: factor >= 1" && (factor >= 1));
236   int numPartitions = factor * NumberOfProcessors;
237 
238   // STEP 1: Acquire the distributed structured grid for this process.
239   // Each process has the same number of blocks, but not all entries are
240   // poplulated. A nullptr entry indicates that the block belongs to a different
241   // process.
242   vtkMultiBlockDataSet* mbds = GetDataSet(numPartitions);
243   Controller->Barrier();
244   assert("pre: mbds != nullptr" && (mbds != nullptr));
245   assert(
246     "pre: numBlocks mismatch" && (static_cast<int>(mbds->GetNumberOfBlocks()) == numPartitions));
247 
248   // STEP 2: Setup the grid connectivity
249   vtkPStructuredGridConnectivity* gridConnectivity = vtkPStructuredGridConnectivity::New();
250   gridConnectivity->SetController(Controller);
251   gridConnectivity->SetNumberOfGrids(mbds->GetNumberOfBlocks());
252   gridConnectivity->SetWholeExtent(
253     mbds->GetInformation()->Get(vtkStreamingDemandDrivenPipeline::WHOLE_EXTENT()));
254   gridConnectivity->Initialize();
255 
256   // STEP 3: Register the grids
257   RegisterGrids(mbds, gridConnectivity);
258   Controller->Barrier();
259 
260   // STEP 4: Compute neighbors
261   gridConnectivity->ComputeNeighbors();
262   Controller->Barrier();
263 
264   // STEP 6: Total global count of the nodes
265   int count = GetTotalNumberOfNodes(mbds);
266   Controller->Barrier();
267 
268   // STEP 7: Deallocate
269   mbds->Delete();
270   gridConnectivity->Delete();
271 
272   // STEP 8: return success or failure
273   if (count != expected)
274     return 1;
275   return 0;
276 }
277 
278 //------------------------------------------------------------------------------
279 // Assuming a 100x100x100 domain and a field given by F=X+Y+Z at each
280 // node, the method computes the average.
CalculateExpectedAverage()281 double CalculateExpectedAverage()
282 {
283   int wholeExtent[6];
284   wholeExtent[0] = 0;
285   wholeExtent[1] = 99;
286   wholeExtent[2] = 0;
287   wholeExtent[3] = 99;
288   wholeExtent[4] = 0;
289   wholeExtent[5] = 99;
290 
291   int dims[3];
292   dims[0] = wholeExtent[1] - wholeExtent[0] + 1;
293   dims[1] = wholeExtent[3] - wholeExtent[2] + 1;
294   dims[2] = wholeExtent[5] - wholeExtent[4] + 1;
295 
296   // Generate grid for the entire domain
297   vtkUniformGrid* wholeGrid = vtkUniformGrid::New();
298   wholeGrid->SetOrigin(0.0, 0.0, 0.0);
299   wholeGrid->SetSpacing(0.5, 0.5, 0.5);
300   wholeGrid->SetDimensions(dims);
301 
302   double pnt[3];
303   double sum = 0.0;
304   for (vtkIdType pntIdx = 0; pntIdx < wholeGrid->GetNumberOfPoints(); ++pntIdx)
305   {
306     wholeGrid->GetPoint(pntIdx, pnt);
307     sum += pnt[0];
308     sum += pnt[1];
309     sum += pnt[2];
310   } // END for all points
311 
312   double N = static_cast<double>(wholeGrid->GetNumberOfPoints());
313   wholeGrid->Delete();
314   return (sum / N);
315 }
316 
317 //------------------------------------------------------------------------------
GetXYZSumForGrid(vtkUniformGrid * grid)318 double GetXYZSumForGrid(vtkUniformGrid* grid)
319 {
320   assert("pre: input grid is nullptr!" && (grid != nullptr));
321 
322   double pnt[3];
323   double sum = 0.0;
324   for (vtkIdType pntIdx = 0; pntIdx < grid->GetNumberOfPoints(); ++pntIdx)
325   {
326     if (grid->IsPointVisible(pntIdx))
327     {
328       grid->GetPoint(pntIdx, pnt);
329       sum += pnt[0];
330       sum += pnt[1];
331       sum += pnt[2];
332     }
333   } // END for all points
334   return (sum);
335 }
336 
337 //------------------------------------------------------------------------------
338 // Tests computing the average serially vs. in paraller using a factor*N
339 // partitions where N is the total number of processes. An artificialy field
340 // F=X+Y+Z is imposed on each node.
TestAverage(const int factor)341 int TestAverage(const int factor)
342 {
343   // STEP 0: Calculate expected value
344   double expected = CalculateExpectedAverage();
345 
346   // STEP 1: Calculation number of partitions as factor of the number of
347   // processes.
348   assert("pre: factor >= 1" && (factor >= 1));
349   int numPartitions = factor * NumberOfProcessors;
350 
351   // STEP 2: Acquire the distributed structured grid for this process.
352   // Each process has the same number of blocks, but not all entries are
353   // poplulated. A nullptr entry indicates that the block belongs to a different
354   // process.
355   vtkMultiBlockDataSet* mbds = GetDataSet(numPartitions);
356   assert("pre: mbds != nullptr" && (mbds != nullptr));
357   assert(
358     "pre: numBlocks mismatch" && (static_cast<int>(mbds->GetNumberOfBlocks()) == numPartitions));
359 
360   // STEP 2: Setup the grid connectivity
361   vtkPStructuredGridConnectivity* gridConnectivity = vtkPStructuredGridConnectivity::New();
362   gridConnectivity->SetController(Controller);
363   gridConnectivity->SetNumberOfGrids(mbds->GetNumberOfBlocks());
364   gridConnectivity->SetWholeExtent(
365     mbds->GetInformation()->Get(vtkStreamingDemandDrivenPipeline::WHOLE_EXTENT()));
366   gridConnectivity->Initialize();
367 
368   // STEP 3: Register the grids
369   RegisterGrids(mbds, gridConnectivity);
370   Controller->Barrier();
371 
372   // STEP 4: Compute neighbors
373   gridConnectivity->ComputeNeighbors();
374   Controller->Barrier();
375 
376   // STEP 6: Total global count of the nodes
377   int count = GetTotalNumberOfNodes(mbds);
378   Controller->Barrier();
379 
380   // STEP 7: Compute partial local sum
381   double partialSum = 0.0;
382   unsigned int blockIdx = 0;
383   for (; blockIdx < mbds->GetNumberOfBlocks(); ++blockIdx)
384   {
385     vtkUniformGrid* blockPtr = vtkUniformGrid::SafeDownCast(mbds->GetBlock(blockIdx));
386     if (blockPtr != nullptr)
387     {
388       partialSum += GetXYZSumForGrid(blockPtr);
389     } // END if
390   }   // END for all blocks
391 
392   // STEP 8: All reduce to the global sum
393   double globalSum = 0.0;
394   Controller->AllReduce(&partialSum, &globalSum, 1, vtkCommunicator::SUM_OP);
395 
396   // STEP 9: Compute average
397   double average = globalSum / static_cast<double>(count);
398 
399   // STEP 7: Deallocate
400   mbds->Delete();
401   gridConnectivity->Delete();
402 
403   // STEP 8: return success or failure
404   if (vtkMathUtilities::FuzzyCompare(average, expected))
405   {
406     if (Rank == 0)
407     {
408       std::cout << "Computed: " << average << " Expected: " << expected << "\n";
409       std::cout.flush();
410     }
411     return 0;
412   }
413 
414   if (Rank == 0)
415   {
416     std::cout << "Global sum: " << globalSum << std::endl;
417     std::cout << "Number of Nodes: " << count << std::endl;
418     std::cout << "Computed: " << average << " Expected: " << expected << "\n";
419     std::cout.flush();
420   }
421 
422   return 1;
423 }
424 
425 //------------------------------------------------------------------------------
TestGhostLayerCreation(int factor,int ng)426 int TestGhostLayerCreation(int factor, int ng)
427 {
428   // STEP 1: Calculation number of partitions as factor of the number of
429   // processes.
430   assert("pre: factor >= 1" && (factor >= 1));
431   int numPartitions = factor * NumberOfProcessors;
432 
433   // STEP 2: Acquire the distributed structured grid for this process.
434   // Each process has the same number of blocks, but not all entries are
435   // poplulated. A nullptr entry indicates that the block belongs to a different
436   // process.
437   vtkMultiBlockDataSet* mbds = GetDataSet(numPartitions);
438   WriteDistributedDataSet("PINITIAL", mbds);
439   assert("pre: mbds != nullptr" && (mbds != nullptr));
440   assert(
441     "pre: numBlocks mismatch" && (static_cast<int>(mbds->GetNumberOfBlocks()) == numPartitions));
442 
443   // STEP 2: Setup the grid connectivity
444   vtkPStructuredGridConnectivity* gridConnectivity = vtkPStructuredGridConnectivity::New();
445   gridConnectivity->SetController(Controller);
446   gridConnectivity->SetNumberOfGrids(mbds->GetNumberOfBlocks());
447   gridConnectivity->SetWholeExtent(
448     mbds->GetInformation()->Get(vtkStreamingDemandDrivenPipeline::WHOLE_EXTENT()));
449   gridConnectivity->Initialize();
450 
451   // STEP 3: Register the grids
452   RegisterGrids(mbds, gridConnectivity);
453   Controller->Barrier();
454 
455   // STEP 4: Compute neighbors
456   gridConnectivity->ComputeNeighbors();
457   Controller->Barrier();
458 
459   // STEP 5: Create ghost layers
460   gridConnectivity->CreateGhostLayers(ng);
461   Controller->Barrier();
462 
463   mbds->Delete();
464   gridConnectivity->Delete();
465   return 0;
466 }
467 
468 }
469 
470 //------------------------------------------------------------------------------
471 // Program main
TestPStructuredGridConnectivity(int argc,char * argv[])472 int TestPStructuredGridConnectivity(int argc, char* argv[])
473 {
474   int rc = 0;
475 
476   // STEP 0: Initialize
477   Controller = vtkMPIController::New();
478   Controller->Initialize(&argc, &argv, 0);
479   assert("pre: Controller should not be nullptr" && (Controller != nullptr));
480   vtkMultiProcessController::SetGlobalController(Controller);
481   LogMessage("Finished MPI Initialization!");
482 
483   LogMessage("Getting Rank ID and NumberOfProcessors...");
484   Rank = Controller->GetLocalProcessId();
485   NumberOfProcessors = Controller->GetNumberOfProcesses();
486   assert("pre: NumberOfProcessors >= 1" && (NumberOfProcessors >= 1));
487   assert("pre: Rank is out-of-bounds" && (Rank >= 0));
488 
489   // STEP 1: Run test where the number of partitions is equal to the number of
490   // processes
491   Controller->Barrier();
492   LogMessage("Testing with same number of partitions as processes...");
493   rc += TestPStructuredGridConnectivity(1);
494   Controller->Barrier();
495 
496   // STEP 2: Run test where the number of partitions is double the number of
497   // processes
498   Controller->Barrier();
499   LogMessage("Testing with double the number of partitions as processes...");
500   rc += TestPStructuredGridConnectivity(2);
501   Controller->Barrier();
502 
503   // STEP 4: Compute average
504   LogMessage("Calculating average with same number of partitions as processes");
505   rc += TestAverage(1);
506   Controller->Barrier();
507 
508   LogMessage("Calculating average with double the number of partitions");
509   rc += TestAverage(2);
510   Controller->Barrier();
511 
512   LogMessage("Creating ghost-layers");
513   rc += TestGhostLayerCreation(1, 1);
514 
515   // STEP 3: Deallocate controller and exit
516   LogMessage("Finalizing...");
517   Controller->Finalize();
518   Controller->Delete();
519 
520   if (rc != 0)
521   {
522     std::cout << "Test Failed!\n";
523     rc = 0;
524   }
525   return (rc);
526 }
527