1 
2 #include <../src/mat/impls/adj/mpi/mpiadj.h>    /*I "petscmat.h" I*/
3 
4 /*
5    Currently using ParMetis-4.0.2
6 */
7 
8 #include <parmetis.h>
9 
10 /*
11       The first 5 elements of this structure are the input control array to Metis
12 */
13 typedef struct {
14   PetscInt  cuts;         /* number of cuts made (output) */
15   PetscInt  foldfactor;
16   PetscInt  parallel;     /* use parallel partitioner for coarse problem */
17   PetscInt  indexing;     /* 0 indicates C indexing, 1 Fortran */
18   PetscInt  printout;     /* indicates if one wishes Metis to print info */
19   PetscBool repartition;
20 } MatPartitioning_Parmetis;
21 
22 #define CHKERRQPARMETIS(n,func)                                             \
23   if (n == METIS_ERROR_INPUT) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"ParMETIS error due to wrong inputs and/or options for %s",func); \
24   else if (n == METIS_ERROR_MEMORY) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"ParMETIS error due to insufficient memory in %s",func); \
25   else if (n == METIS_ERROR) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"ParMETIS general error in %s",func); \
26 
27 #define PetscStackCallParmetis(func,args) do {PetscStackPush(#func);int status = func args;PetscStackPop;CHKERRQPARMETIS(status,#func);} while (0)
28 
MatPartitioningApply_Parmetis_Private(MatPartitioning part,PetscBool useND,PetscBool isImprove,IS * partitioning)29 static PetscErrorCode MatPartitioningApply_Parmetis_Private(MatPartitioning part, PetscBool useND, PetscBool isImprove, IS *partitioning)
30 {
31   MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
32   PetscErrorCode           ierr;
33   PetscInt                 *locals = NULL;
34   Mat                      mat     = part->adj,amat,pmat;
35   PetscBool                flg;
36   PetscInt                 bs = 1;
37 
38   PetscFunctionBegin;
39   PetscValidHeaderSpecific(part,MAT_PARTITIONING_CLASSID,1);
40   PetscValidPointer(partitioning,4);
41   ierr = PetscObjectTypeCompare((PetscObject)mat,MATMPIADJ,&flg);CHKERRQ(ierr);
42   if (flg) {
43     amat = mat;
44     ierr = PetscObjectReference((PetscObject)amat);CHKERRQ(ierr);
45   } else {
46     /* bs indicates if the converted matrix is "reduced" from the original and hence the
47        resulting partition results need to be stretched to match the original matrix */
48     ierr = MatConvert(mat,MATMPIADJ,MAT_INITIAL_MATRIX,&amat);CHKERRQ(ierr);
49     if (amat->rmap->n > 0) bs = mat->rmap->n/amat->rmap->n;
50   }
51   ierr = MatMPIAdjCreateNonemptySubcommMat(amat,&pmat);CHKERRQ(ierr);
52   ierr = MPI_Barrier(PetscObjectComm((PetscObject)part));CHKERRQ(ierr);
53 
54   if (pmat) {
55     MPI_Comm   pcomm,comm;
56     Mat_MPIAdj *adj     = (Mat_MPIAdj*)pmat->data;
57     PetscInt   *vtxdist = pmat->rmap->range;
58     PetscInt   *xadj    = adj->i;
59     PetscInt   *adjncy  = adj->j;
60     PetscInt   *NDorder = NULL;
61     PetscInt   itmp     = 0,wgtflag=0, numflag=0, ncon=1, nparts=part->n, options[24], i, j;
62     real_t     *tpwgts,*ubvec,itr=0.1;
63 
64     ierr = PetscObjectGetComm((PetscObject)pmat,&pcomm);CHKERRQ(ierr);
65     if (PetscDefined(USE_DEBUG)) {
66       /* check that matrix has no diagonal entries */
67       PetscInt rstart;
68       ierr = MatGetOwnershipRange(pmat,&rstart,NULL);CHKERRQ(ierr);
69       for (i=0; i<pmat->rmap->n; i++) {
70         for (j=xadj[i]; j<xadj[i+1]; j++) {
71           if (adjncy[j] == i+rstart) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Row %D has diagonal entry; Parmetis forbids diagonal entry",i+rstart);
72         }
73       }
74     }
75 
76     ierr = PetscMalloc1(pmat->rmap->n,&locals);CHKERRQ(ierr);
77 
78     if (isImprove) {
79       PetscInt       i;
80       const PetscInt *part_indices;
81       PetscValidHeaderSpecific(*partitioning,IS_CLASSID,4);
82       ierr = ISGetIndices(*partitioning,&part_indices);CHKERRQ(ierr);
83       for (i=0; i<pmat->rmap->n; i++) locals[i] = part_indices[i*bs];
84       ierr = ISRestoreIndices(*partitioning,&part_indices);CHKERRQ(ierr);
85       ierr = ISDestroy(partitioning);CHKERRQ(ierr);
86     }
87 
88     if (adj->values && part->use_edge_weights && !part->vertex_weights) wgtflag = 1;
89     if (part->vertex_weights && !adj->values) wgtflag = 2;
90     if (part->vertex_weights && adj->values && part->use_edge_weights) wgtflag = 3;
91 
92     if (PetscLogPrintInfo) {itmp = pmetis->printout; pmetis->printout = 127;}
93     ierr = PetscMalloc1(ncon*nparts,&tpwgts);CHKERRQ(ierr);
94     for (i=0; i<ncon; i++) {
95       for (j=0; j<nparts; j++) {
96         if (part->part_weights) {
97           tpwgts[i*nparts+j] = part->part_weights[i*nparts+j];
98         } else {
99           tpwgts[i*nparts+j] = 1./nparts;
100         }
101       }
102     }
103     ierr = PetscMalloc1(ncon,&ubvec);CHKERRQ(ierr);
104     for (i=0; i<ncon; i++) ubvec[i] = 1.05;
105     /* This sets the defaults */
106     options[0] = 0;
107     for (i=1; i<24; i++) options[i] = -1;
108     /* Duplicate the communicator to be sure that ParMETIS attribute caching does not interfere with PETSc. */
109     ierr = MPI_Comm_dup(pcomm,&comm);CHKERRQ(ierr);
110     if (useND) {
111       PetscInt    *sizes, *seps, log2size, subd, *level;
112       PetscMPIInt size;
113       idx_t       mtype = PARMETIS_MTYPE_GLOBAL, rtype = PARMETIS_SRTYPE_2PHASE, p_nseps = 1, s_nseps = 1;
114       real_t      ubfrac = 1.05;
115 
116       ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
117       ierr = PetscMalloc1(pmat->rmap->n,&NDorder);CHKERRQ(ierr);
118       ierr = PetscMalloc3(2*size,&sizes,4*size,&seps,size,&level);CHKERRQ(ierr);
119       PetscStackCallParmetis(ParMETIS_V32_NodeND,((idx_t*)vtxdist,(idx_t*)xadj,(idx_t*)adjncy,(idx_t*)part->vertex_weights,(idx_t*)&numflag,&mtype,&rtype,&p_nseps,&s_nseps,&ubfrac,NULL/* seed */,NULL/* dbglvl */,(idx_t*)NDorder,(idx_t*)(sizes),&comm));
120       log2size = PetscLog2Real(size);
121       subd = PetscPowInt(2,log2size);
122       ierr = MatPartitioningSizesToSep_Private(subd,sizes,seps,level);CHKERRQ(ierr);
123       for (i=0;i<pmat->rmap->n;i++) {
124         PetscInt loc;
125 
126         ierr = PetscFindInt(NDorder[i],2*subd,seps,&loc);CHKERRQ(ierr);
127         if (loc < 0) {
128           loc = -(loc+1);
129           if (loc%2) { /* part of subdomain */
130             locals[i] = loc/2;
131           } else {
132             ierr = PetscFindInt(NDorder[i],2*(subd-1),seps+2*subd,&loc);CHKERRQ(ierr);
133             loc = loc < 0 ? -(loc+1)/2 : loc/2;
134             locals[i] = level[loc];
135           }
136         } else locals[i] = loc/2;
137       }
138       ierr = PetscFree3(sizes,seps,level);CHKERRQ(ierr);
139     } else {
140       if (pmetis->repartition) {
141         PetscStackCallParmetis(ParMETIS_V3_AdaptiveRepart,((idx_t*)vtxdist,(idx_t*)xadj,(idx_t*)adjncy,(idx_t*)part->vertex_weights,(idx_t*)part->vertex_weights,(idx_t*)adj->values,(idx_t*)&wgtflag,(idx_t*)&numflag,(idx_t*)&ncon,(idx_t*)&nparts,tpwgts,ubvec,&itr,(idx_t*)options,(idx_t*)&pmetis->cuts,(idx_t*)locals,&comm));
142       } else if (isImprove) {
143         PetscStackCallParmetis(ParMETIS_V3_RefineKway,((idx_t*)vtxdist,(idx_t*)xadj,(idx_t*)adjncy,(idx_t*)part->vertex_weights,(idx_t*)adj->values,(idx_t*)&wgtflag,(idx_t*)&numflag,(idx_t*)&ncon,(idx_t*)&nparts,tpwgts,ubvec,(idx_t*)options,(idx_t*)&pmetis->cuts,(idx_t*)locals,&comm));
144       } else {
145         PetscStackCallParmetis(ParMETIS_V3_PartKway,((idx_t*)vtxdist,(idx_t*)xadj,(idx_t*)adjncy,(idx_t*)part->vertex_weights,(idx_t*)adj->values,(idx_t*)&wgtflag,(idx_t*)&numflag,(idx_t*)&ncon,(idx_t*)&nparts,tpwgts,ubvec,(idx_t*)options,(idx_t*)&pmetis->cuts,(idx_t*)locals,&comm));
146       }
147     }
148     ierr = MPI_Comm_free(&comm);CHKERRQ(ierr);
149 
150     ierr = PetscFree(tpwgts);CHKERRQ(ierr);
151     ierr = PetscFree(ubvec);CHKERRQ(ierr);
152     if (PetscLogPrintInfo) pmetis->printout = itmp;
153 
154     if (bs > 1) {
155       PetscInt i,j,*newlocals;
156       ierr = PetscMalloc1(bs*pmat->rmap->n,&newlocals);CHKERRQ(ierr);
157       for (i=0; i<pmat->rmap->n; i++) {
158         for (j=0; j<bs; j++) {
159           newlocals[bs*i + j] = locals[i];
160         }
161       }
162       ierr = PetscFree(locals);CHKERRQ(ierr);
163       ierr = ISCreateGeneral(PetscObjectComm((PetscObject)part),bs*pmat->rmap->n,newlocals,PETSC_OWN_POINTER,partitioning);CHKERRQ(ierr);
164     } else {
165       ierr = ISCreateGeneral(PetscObjectComm((PetscObject)part),pmat->rmap->n,locals,PETSC_OWN_POINTER,partitioning);CHKERRQ(ierr);
166     }
167     if (useND) {
168       IS ndis;
169 
170       if (bs > 1) {
171         ierr = ISCreateBlock(PetscObjectComm((PetscObject)part),bs,pmat->rmap->n,NDorder,PETSC_OWN_POINTER,&ndis);CHKERRQ(ierr);
172       } else {
173         ierr = ISCreateGeneral(PetscObjectComm((PetscObject)part),pmat->rmap->n,NDorder,PETSC_OWN_POINTER,&ndis);CHKERRQ(ierr);
174       }
175       ierr = ISSetPermutation(ndis);CHKERRQ(ierr);
176       ierr = PetscObjectCompose((PetscObject)(*partitioning),"_petsc_matpartitioning_ndorder",(PetscObject)ndis);CHKERRQ(ierr);
177       ierr = ISDestroy(&ndis);CHKERRQ(ierr);
178     }
179   } else {
180     ierr = ISCreateGeneral(PetscObjectComm((PetscObject)part),0,NULL,PETSC_COPY_VALUES,partitioning);CHKERRQ(ierr);
181     if (useND) {
182       IS ndis;
183 
184       if (bs > 1) {
185         ierr = ISCreateBlock(PetscObjectComm((PetscObject)part),bs,0,NULL,PETSC_COPY_VALUES,&ndis);CHKERRQ(ierr);
186       } else {
187         ierr = ISCreateGeneral(PetscObjectComm((PetscObject)part),0,NULL,PETSC_COPY_VALUES,&ndis);CHKERRQ(ierr);
188       }
189       ierr = ISSetPermutation(ndis);CHKERRQ(ierr);
190       ierr = PetscObjectCompose((PetscObject)(*partitioning),"_petsc_matpartitioning_ndorder",(PetscObject)ndis);CHKERRQ(ierr);
191       ierr = ISDestroy(&ndis);CHKERRQ(ierr);
192     }
193   }
194   ierr = MatDestroy(&pmat);CHKERRQ(ierr);
195   ierr = MatDestroy(&amat);CHKERRQ(ierr);
196   PetscFunctionReturn(0);
197 }
198 
199 /*
200    Uses the ParMETIS parallel matrix partitioner to compute a nested dissection ordering of the matrix in parallel
201 */
MatPartitioningApplyND_Parmetis(MatPartitioning part,IS * partitioning)202 static PetscErrorCode MatPartitioningApplyND_Parmetis(MatPartitioning part, IS *partitioning)
203 {
204   PetscErrorCode ierr;
205 
206   PetscFunctionBegin;
207   ierr = MatPartitioningApply_Parmetis_Private(part, PETSC_TRUE, PETSC_FALSE, partitioning);CHKERRQ(ierr);
208   PetscFunctionReturn(0);
209 }
210 
211 /*
212    Uses the ParMETIS parallel matrix partitioner to partition the matrix in parallel
213 */
MatPartitioningApply_Parmetis(MatPartitioning part,IS * partitioning)214 static PetscErrorCode MatPartitioningApply_Parmetis(MatPartitioning part, IS *partitioning)
215 {
216   PetscErrorCode ierr;
217 
218   PetscFunctionBegin;
219   ierr = MatPartitioningApply_Parmetis_Private(part, PETSC_FALSE, PETSC_FALSE, partitioning);CHKERRQ(ierr);
220   PetscFunctionReturn(0);
221 }
222 
223 /*
224    Uses the ParMETIS to improve the quality  of a partition
225 */
MatPartitioningImprove_Parmetis(MatPartitioning part,IS * partitioning)226 static PetscErrorCode MatPartitioningImprove_Parmetis(MatPartitioning part, IS *partitioning)
227 {
228   PetscErrorCode ierr;
229 
230   PetscFunctionBegin;
231   ierr = MatPartitioningApply_Parmetis_Private(part, PETSC_FALSE, PETSC_TRUE, partitioning);CHKERRQ(ierr);
232   PetscFunctionReturn(0);
233 }
234 
MatPartitioningView_Parmetis(MatPartitioning part,PetscViewer viewer)235 PetscErrorCode MatPartitioningView_Parmetis(MatPartitioning part,PetscViewer viewer)
236 {
237   MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
238   PetscErrorCode           ierr;
239   PetscMPIInt              rank;
240   PetscBool                iascii;
241 
242   PetscFunctionBegin;
243   ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)part),&rank);CHKERRQ(ierr);
244   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
245   if (iascii) {
246     if (pmetis->parallel == 2) {
247       ierr = PetscViewerASCIIPrintf(viewer,"  Using parallel coarse grid partitioner\n");CHKERRQ(ierr);
248     } else {
249       ierr = PetscViewerASCIIPrintf(viewer,"  Using sequential coarse grid partitioner\n");CHKERRQ(ierr);
250     }
251     ierr = PetscViewerASCIIPrintf(viewer,"  Using %D fold factor\n",pmetis->foldfactor);CHKERRQ(ierr);
252     ierr = PetscViewerASCIIPushSynchronized(viewer);CHKERRQ(ierr);
253     ierr = PetscViewerASCIISynchronizedPrintf(viewer,"  [%d]Number of cuts found %D\n",rank,pmetis->cuts);CHKERRQ(ierr);
254     ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
255     ierr = PetscViewerASCIIPopSynchronized(viewer);CHKERRQ(ierr);
256   }
257   PetscFunctionReturn(0);
258 }
259 
260 /*@
261      MatPartitioningParmetisSetCoarseSequential - Use the sequential code to
262          do the partitioning of the coarse grid.
263 
264   Logically Collective on MatPartitioning
265 
266   Input Parameter:
267 .  part - the partitioning context
268 
269    Level: advanced
270 
271 @*/
MatPartitioningParmetisSetCoarseSequential(MatPartitioning part)272 PetscErrorCode  MatPartitioningParmetisSetCoarseSequential(MatPartitioning part)
273 {
274   MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
275 
276   PetscFunctionBegin;
277   pmetis->parallel = 1;
278   PetscFunctionReturn(0);
279 }
280 
281 /*@
282      MatPartitioningParmetisSetRepartition - Repartition
283      current mesh to rebalance computation.
284 
285   Logically Collective on MatPartitioning
286 
287   Input Parameter:
288 .  part - the partitioning context
289 
290    Level: advanced
291 
292 @*/
MatPartitioningParmetisSetRepartition(MatPartitioning part)293 PetscErrorCode  MatPartitioningParmetisSetRepartition(MatPartitioning part)
294 {
295   MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
296 
297   PetscFunctionBegin;
298   pmetis->repartition = PETSC_TRUE;
299   PetscFunctionReturn(0);
300 }
301 
302 /*@
303   MatPartitioningParmetisGetEdgeCut - Returns the number of edge cuts in the vertex partition.
304 
305   Input Parameter:
306 . part - the partitioning context
307 
308   Output Parameter:
309 . cut - the edge cut
310 
311    Level: advanced
312 
313 @*/
MatPartitioningParmetisGetEdgeCut(MatPartitioning part,PetscInt * cut)314 PetscErrorCode  MatPartitioningParmetisGetEdgeCut(MatPartitioning part, PetscInt *cut)
315 {
316   MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*) part->data;
317 
318   PetscFunctionBegin;
319   *cut = pmetis->cuts;
320   PetscFunctionReturn(0);
321 }
322 
MatPartitioningSetFromOptions_Parmetis(PetscOptionItems * PetscOptionsObject,MatPartitioning part)323 PetscErrorCode MatPartitioningSetFromOptions_Parmetis(PetscOptionItems *PetscOptionsObject,MatPartitioning part)
324 {
325   PetscErrorCode ierr;
326   PetscBool      flag = PETSC_FALSE;
327 
328   PetscFunctionBegin;
329   ierr = PetscOptionsHead(PetscOptionsObject,"Set ParMeTiS partitioning options");CHKERRQ(ierr);
330   ierr = PetscOptionsBool("-mat_partitioning_parmetis_coarse_sequential","Use sequential coarse partitioner","MatPartitioningParmetisSetCoarseSequential",flag,&flag,NULL);CHKERRQ(ierr);
331   if (flag) {
332     ierr = MatPartitioningParmetisSetCoarseSequential(part);CHKERRQ(ierr);
333   }
334   ierr = PetscOptionsBool("-mat_partitioning_parmetis_repartition","","MatPartitioningParmetisSetRepartition",flag,&flag,NULL);CHKERRQ(ierr);
335   if (flag){
336     ierr =  MatPartitioningParmetisSetRepartition(part);CHKERRQ(ierr);
337   }
338   ierr = PetscOptionsTail();CHKERRQ(ierr);
339   PetscFunctionReturn(0);
340 }
341 
342 
MatPartitioningDestroy_Parmetis(MatPartitioning part)343 PetscErrorCode MatPartitioningDestroy_Parmetis(MatPartitioning part)
344 {
345   MatPartitioning_Parmetis *pmetis = (MatPartitioning_Parmetis*)part->data;
346   PetscErrorCode           ierr;
347 
348   PetscFunctionBegin;
349   ierr = PetscFree(pmetis);CHKERRQ(ierr);
350   PetscFunctionReturn(0);
351 }
352 
353 
354 /*MC
355    MATPARTITIONINGPARMETIS - Creates a partitioning context via the external package PARMETIS.
356 
357    Collective
358 
359    Input Parameter:
360 .  part - the partitioning context
361 
362    Options Database Keys:
363 .  -mat_partitioning_parmetis_coarse_sequential - use sequential PARMETIS coarse partitioner
364 
365    Level: beginner
366 
367    Notes:
368     See https://www-users.cs.umn.edu/~karypis/metis/
369 
370 .seealso: MatPartitioningSetType(), MatPartitioningType
371 
372 M*/
373 
MatPartitioningCreate_Parmetis(MatPartitioning part)374 PETSC_EXTERN PetscErrorCode MatPartitioningCreate_Parmetis(MatPartitioning part)
375 {
376   PetscErrorCode           ierr;
377   MatPartitioning_Parmetis *pmetis;
378 
379   PetscFunctionBegin;
380   ierr       = PetscNewLog(part,&pmetis);CHKERRQ(ierr);
381   part->data = (void*)pmetis;
382 
383   pmetis->cuts       = 0;   /* output variable */
384   pmetis->foldfactor = 150; /*folding factor */
385   pmetis->parallel   = 2;   /* use parallel partitioner for coarse grid */
386   pmetis->indexing   = 0;   /* index numbering starts from 0 */
387   pmetis->printout   = 0;   /* print no output while running */
388   pmetis->repartition      = PETSC_FALSE;
389 
390   part->ops->apply          = MatPartitioningApply_Parmetis;
391   part->ops->applynd        = MatPartitioningApplyND_Parmetis;
392   part->ops->improve        = MatPartitioningImprove_Parmetis;
393   part->ops->view           = MatPartitioningView_Parmetis;
394   part->ops->destroy        = MatPartitioningDestroy_Parmetis;
395   part->ops->setfromoptions = MatPartitioningSetFromOptions_Parmetis;
396   PetscFunctionReturn(0);
397 }
398 
399 /*@
400  MatMeshToVertexGraph -   This routine does not exist because ParMETIS does not provide the functionality.  Uses the ParMETIS package to
401                        convert a Mat that represents a mesh to a Mat the represents the graph of the coupling
402                        between vertices of the cells and is suitable for partitioning with the MatPartitioning object. Use this to partition
403                        vertices of a mesh. More likely you should use MatMeshToCellGraph()
404 
405    Collective on Mat
406 
407    Input Parameter:
408 +     mesh - the graph that represents the mesh
409 -     ncommonnodes - mesh elements that share this number of common nodes are considered neighbors, use 2 for triangles and
410                      quadrilaterials, 3 for tetrahedrals and 4 for hexahedrals
411 
412    Output Parameter:
413 .     dual - the dual graph
414 
415    Notes:
416      Currently requires ParMetis to be installed and uses ParMETIS_V3_Mesh2Dual()
417 
418      The columns of each row of the Mat mesh are the global vertex numbers of the vertices of that rows cell. The number of rows in mesh is
419      number of cells, the number of columns is the number of vertices.
420 
421    Level: advanced
422 
423 .seealso: MatMeshToCellGraph(), MatCreateMPIAdj(), MatPartitioningCreate()
424 
425 @*/
MatMeshToVertexGraph(Mat mesh,PetscInt ncommonnodes,Mat * dual)426 PetscErrorCode MatMeshToVertexGraph(Mat mesh,PetscInt ncommonnodes,Mat *dual)
427 {
428   PetscFunctionBegin;
429   SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ParMETIS does not provide this functionality");
430   PetscFunctionReturn(0);
431 }
432 
433 /*@
434      MatMeshToCellGraph -   Uses the ParMETIS package to convert a Mat that represents a mesh to a Mat the represents the graph of the coupling
435                        between cells (the "dual" graph) and is suitable for partitioning with the MatPartitioning object. Use this to partition
436                        cells of a mesh.
437 
438    Collective on Mat
439 
440    Input Parameter:
441 +     mesh - the graph that represents the mesh
442 -     ncommonnodes - mesh elements that share this number of common nodes are considered neighbors, use 2 for triangles and
443                      quadrilaterials, 3 for tetrahedrals and 4 for hexahedrals
444 
445    Output Parameter:
446 .     dual - the dual graph
447 
448    Notes:
449      Currently requires ParMetis to be installed and uses ParMETIS_V3_Mesh2Dual()
450 
451 $     Each row of the mesh object represents a single cell in the mesh. For triangles it has 3 entries, quadrilaterials 4 entries,
452 $         tetrahedrals 4 entries and hexahedrals 8 entries. You can mix triangles and quadrilaterals in the same mesh, but cannot
453 $         mix  tetrahedrals and hexahedrals
454 $     The columns of each row of the Mat mesh are the global vertex numbers of the vertices of that row's cell.
455 $     The number of rows in mesh is number of cells, the number of columns is the number of vertices.
456 
457 
458    Level: advanced
459 
460 .seealso: MatMeshToVertexGraph(), MatCreateMPIAdj(), MatPartitioningCreate()
461 
462 
463 @*/
MatMeshToCellGraph(Mat mesh,PetscInt ncommonnodes,Mat * dual)464 PetscErrorCode MatMeshToCellGraph(Mat mesh,PetscInt ncommonnodes,Mat *dual)
465 {
466   PetscErrorCode ierr;
467   PetscInt       *newxadj,*newadjncy;
468   PetscInt       numflag=0;
469   Mat_MPIAdj     *adj   = (Mat_MPIAdj*)mesh->data,*newadj;
470   PetscBool      flg;
471   MPI_Comm       comm;
472 
473   PetscFunctionBegin;
474   ierr = PetscObjectTypeCompare((PetscObject)mesh,MATMPIADJ,&flg);CHKERRQ(ierr);
475   if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Must use MPIAdj matrix type");
476 
477   ierr = PetscObjectGetComm((PetscObject)mesh,&comm);CHKERRQ(ierr);
478   PetscStackCallParmetis(ParMETIS_V3_Mesh2Dual,((idx_t*)mesh->rmap->range,(idx_t*)adj->i,(idx_t*)adj->j,(idx_t*)&numflag,(idx_t*)&ncommonnodes,(idx_t**)&newxadj,(idx_t**)&newadjncy,&comm));
479   ierr   = MatCreateMPIAdj(PetscObjectComm((PetscObject)mesh),mesh->rmap->n,mesh->rmap->N,newxadj,newadjncy,NULL,dual);CHKERRQ(ierr);
480   newadj = (Mat_MPIAdj*)(*dual)->data;
481 
482   newadj->freeaijwithfree = PETSC_TRUE; /* signal the matrix should be freed with system free since space was allocated by ParMETIS */
483   PetscFunctionReturn(0);
484 }
485