1 
2 /*
3    Provides an interface to the ML smoothed Aggregation
4    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
5                                     Jed Brown, see [PETSC #18321, #18449].
6 */
7 #include <petsc/private/pcimpl.h>   /*I "petscpc.h" I*/
8 #include <petsc/private/pcmgimpl.h>                    /*I "petscksp.h" I*/
9 #include <../src/mat/impls/aij/seq/aij.h>
10 #include <../src/mat/impls/aij/mpi/mpiaij.h>
11 #include <petscdm.h>            /* for DMDestroy(&pc->mg) hack */
12 
13 EXTERN_C_BEGIN
14 /* HAVE_CONFIG_H flag is required by ML include files */
15 #if !defined(HAVE_CONFIG_H)
16 #define HAVE_CONFIG_H
17 #endif
18 #include <ml_include.h>
19 #include <ml_viz_stats.h>
20 EXTERN_C_END
21 
22 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType;
23 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0};
24 
25 /* The context (data structure) at each grid level */
26 typedef struct {
27   Vec x,b,r;                  /* global vectors */
28   Mat A,P,R;
29   KSP ksp;
30   Vec coords;                 /* projected by ML, if PCSetCoordinates is called; values packed by node */
31 } GridCtx;
32 
33 /* The context used to input PETSc matrix into ML at fine grid */
34 typedef struct {
35   Mat         A;       /* Petsc matrix in aij format */
36   Mat         Aloc;    /* local portion of A to be used by ML */
37   Vec         x,y;
38   ML_Operator *mlmat;
39   PetscScalar *pwork;  /* tmp array used by PetscML_comm() */
40 } FineGridCtx;
41 
42 /* The context associates a ML matrix with a PETSc shell matrix */
43 typedef struct {
44   Mat         A;        /* PETSc shell matrix associated with mlmat */
45   ML_Operator *mlmat;   /* ML matrix assorciated with A */
46 } Mat_MLShell;
47 
48 /* Private context for the ML preconditioner */
49 typedef struct {
50   ML                *ml_object;
51   ML_Aggregate      *agg_object;
52   GridCtx           *gridctx;
53   FineGridCtx       *PetscMLdata;
54   PetscInt          Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization,MinPerProc,PutOnSingleProc,RepartitionType,ZoltanScheme;
55   PetscReal         Threshold,DampingFactor,EnergyMinimizationDropTol,MaxMinRatio,AuxThreshold;
56   PetscBool         SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable,Repartition,Aux;
57   PetscBool         reuse_interpolation;
58   PCMLNullSpaceType nulltype;
59   PetscMPIInt       size; /* size of communicator for pc->pmat */
60   PetscInt          dim;  /* data from PCSetCoordinates(_ML) */
61   PetscInt          nloc;
62   PetscReal         *coords; /* ML has a grid object for each level: the finest grid will point into coords */
63 } PC_ML;
64 
PetscML_getrow(ML_Operator * ML_data,int N_requested_rows,int requested_rows[],int allocated_space,int columns[],double values[],int row_lengths[])65 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
66 {
67   PetscErrorCode ierr;
68   PetscInt       m,i,j,k=0,row,*aj;
69   PetscScalar    *aa;
70   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
71   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
72 
73   ierr = MatGetSize(ml->Aloc,&m,NULL); if (ierr) return(0);
74   for (i = 0; i<N_requested_rows; i++) {
75     row            = requested_rows[i];
76     row_lengths[i] = a->ilen[row];
77     if (allocated_space < k+row_lengths[i]) return(0);
78     if ((row >= 0) || (row <= (m-1))) {
79       aj = a->j + a->i[row];
80       aa = a->a + a->i[row];
81       for (j=0; j<row_lengths[i]; j++) {
82         columns[k]  = aj[j];
83         values[k++] = aa[j];
84       }
85     }
86   }
87   return(1);
88 }
89 
PetscML_comm(double p[],void * ML_data)90 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
91 {
92   PetscErrorCode    ierr;
93   FineGridCtx       *ml = (FineGridCtx*)ML_data;
94   Mat               A   = ml->A;
95   Mat_MPIAIJ        *a  = (Mat_MPIAIJ*)A->data;
96   PetscMPIInt       size;
97   PetscInt          i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
98   const PetscScalar *array;
99 
100   PetscFunctionBegin;
101   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
102   if (size == 1) PetscFunctionReturn(0);
103 
104   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
105   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
106   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
107   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
108   ierr = VecGetArrayRead(a->lvec,&array);CHKERRQ(ierr);
109   for (i=in_length; i<out_length; i++) p[i] = array[i-in_length];
110   ierr = VecRestoreArrayRead(a->lvec,&array);CHKERRQ(ierr);
111   PetscFunctionReturn(0);
112 }
113 
PetscML_matvec(ML_Operator * ML_data,int in_length,double p[],int out_length,double ap[])114 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
115 {
116   PetscErrorCode ierr;
117   FineGridCtx    *ml = (FineGridCtx*)ML_Get_MyMatvecData(ML_data);
118   Mat            A   = ml->A, Aloc=ml->Aloc;
119   PetscMPIInt    size;
120   PetscScalar    *pwork=ml->pwork;
121   PetscInt       i;
122 
123   PetscFunctionBegin;
124   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
125   if (size == 1) {
126     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
127   } else {
128     for (i=0; i<in_length; i++) pwork[i] = p[i];
129     ierr = PetscML_comm(pwork,ml);CHKERRQ(ierr);
130     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
131   }
132   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
133   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
134   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
135   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
136   PetscFunctionReturn(0);
137 }
138 
MatMult_ML(Mat A,Vec x,Vec y)139 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
140 {
141   PetscErrorCode    ierr;
142   Mat_MLShell       *shell;
143   PetscScalar       *yarray;
144   const PetscScalar *xarray;
145   PetscInt          x_length,y_length;
146 
147   PetscFunctionBegin;
148   ierr     = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
149   ierr     = VecGetArrayRead(x,&xarray);CHKERRQ(ierr);
150   ierr     = VecGetArray(y,&yarray);CHKERRQ(ierr);
151   x_length = shell->mlmat->invec_leng;
152   y_length = shell->mlmat->outvec_leng;
153   PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat,x_length,(PetscScalar*)xarray,y_length,yarray));
154   ierr = VecRestoreArrayRead(x,&xarray);CHKERRQ(ierr);
155   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
156   PetscFunctionReturn(0);
157 }
158 
159 /* newtype is ignored since only handles one case */
MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat * Aloc)160 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
161 {
162   PetscErrorCode ierr;
163   Mat_MPIAIJ     *mpimat=(Mat_MPIAIJ*)A->data;
164   Mat_SeqAIJ     *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
165   PetscInt       *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
166   PetscScalar    *aa,*ba,*ca;
167   PetscInt       am =A->rmap->n,an=A->cmap->n,i,j,k;
168   PetscInt       *ci,*cj,ncols;
169 
170   PetscFunctionBegin;
171   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
172   ierr = MatSeqAIJGetArrayRead(mpimat->A,(const PetscScalar**)&aa);CHKERRQ(ierr);
173   ierr = MatSeqAIJGetArrayRead(mpimat->B,(const PetscScalar**)&ba);CHKERRQ(ierr);
174   if (scall == MAT_INITIAL_MATRIX) {
175     ierr  = PetscMalloc1(1+am,&ci);CHKERRQ(ierr);
176     ci[0] = 0;
177     for (i=0; i<am; i++) ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
178     ierr = PetscMalloc1(1+ci[am],&cj);CHKERRQ(ierr);
179     ierr = PetscMalloc1(1+ci[am],&ca);CHKERRQ(ierr);
180 
181     k = 0;
182     for (i=0; i<am; i++) {
183       /* diagonal portion of A */
184       ncols = ai[i+1] - ai[i];
185       for (j=0; j<ncols; j++) {
186         cj[k]   = *aj++;
187         ca[k++] = *aa++;
188       }
189       /* off-diagonal portion of A */
190       ncols = bi[i+1] - bi[i];
191       for (j=0; j<ncols; j++) {
192         cj[k]   = an + (*bj); bj++;
193         ca[k++] = *ba++;
194       }
195     }
196     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
197 
198     /* put together the new matrix */
199     an   = mpimat->A->cmap->n+mpimat->B->cmap->n;
200     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
201 
202     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
203     /* Since these are PETSc arrays, change flags to free them as necessary. */
204     mat          = (Mat_SeqAIJ*)(*Aloc)->data;
205     mat->free_a  = PETSC_TRUE;
206     mat->free_ij = PETSC_TRUE;
207 
208     mat->nonew = 0;
209   } else if (scall == MAT_REUSE_MATRIX) {
210     mat=(Mat_SeqAIJ*)(*Aloc)->data;
211     ci = mat->i; cj = mat->j; ca = mat->a;
212     for (i=0; i<am; i++) {
213       /* diagonal portion of A */
214       ncols = ai[i+1] - ai[i];
215       for (j=0; j<ncols; j++) *ca++ = *aa++;
216       /* off-diagonal portion of A */
217       ncols = bi[i+1] - bi[i];
218       for (j=0; j<ncols; j++) *ca++ = *ba++;
219     }
220   } else SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
221   ierr = MatSeqAIJRestoreArrayRead(mpimat->A,(const PetscScalar**)&aa);CHKERRQ(ierr);
222   ierr = MatSeqAIJRestoreArrayRead(mpimat->B,(const PetscScalar**)&ba);CHKERRQ(ierr);
223   PetscFunctionReturn(0);
224 }
225 
MatDestroy_ML(Mat A)226 static PetscErrorCode MatDestroy_ML(Mat A)
227 {
228   PetscErrorCode ierr;
229   Mat_MLShell    *shell;
230 
231   PetscFunctionBegin;
232   ierr = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
233   ierr = PetscFree(shell);CHKERRQ(ierr);
234   PetscFunctionReturn(0);
235 }
236 
MatWrapML_SeqAIJ(ML_Operator * mlmat,MatReuse reuse,Mat * newmat)237 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
238 {
239   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata*)mlmat->data;
240   PetscErrorCode        ierr;
241   PetscInt              m       =mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = NULL,nz_max;
242   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i;
243   PetscScalar           *ml_vals=matdata->values,*aa;
244 
245   PetscFunctionBegin;
246   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
247   if (m != n) { /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
248     if (reuse) {
249       Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data;
250       aij->i = ml_rowptr;
251       aij->j = ml_cols;
252       aij->a = ml_vals;
253     } else {
254       /* sort ml_cols and ml_vals */
255       ierr = PetscMalloc1(m+1,&nnz);
256       for (i=0; i<m; i++) nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
257       aj = ml_cols; aa = ml_vals;
258       for (i=0; i<m; i++) {
259         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
260         aj  += nnz[i]; aa += nnz[i];
261       }
262       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
263       ierr = PetscFree(nnz);CHKERRQ(ierr);
264     }
265     ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
266     ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
267     PetscFunctionReturn(0);
268   }
269 
270   nz_max = PetscMax(1,mlmat->max_nz_per_row);
271   ierr   = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr);
272   if (!reuse) {
273     ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
274     ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
275     ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
276     /* keep track of block size for A matrices */
277     ierr = MatSetBlockSize (*newmat, mlmat->num_PDEs);CHKERRQ(ierr);
278 
279     ierr = PetscMalloc1(m,&nnz);CHKERRQ(ierr);
280     for (i=0; i<m; i++) {
281       PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
282     }
283     ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
284   }
285   for (i=0; i<m; i++) {
286     PetscInt ncols;
287 
288     PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
289     ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
290   }
291   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
292   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
293 
294   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
295   ierr = PetscFree(nnz);CHKERRQ(ierr);
296   PetscFunctionReturn(0);
297 }
298 
MatWrapML_SHELL(ML_Operator * mlmat,MatReuse reuse,Mat * newmat)299 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
300 {
301   PetscErrorCode ierr;
302   PetscInt       m,n;
303   ML_Comm        *MLcomm;
304   Mat_MLShell    *shellctx;
305 
306   PetscFunctionBegin;
307   m = mlmat->outvec_leng;
308   n = mlmat->invec_leng;
309 
310   if (reuse) {
311     ierr            = MatShellGetContext(*newmat,(void**)&shellctx);CHKERRQ(ierr);
312     shellctx->mlmat = mlmat;
313     PetscFunctionReturn(0);
314   }
315 
316   MLcomm = mlmat->comm;
317 
318   ierr = PetscNew(&shellctx);CHKERRQ(ierr);
319   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
320   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
321   ierr = MatShellSetOperation(*newmat,MATOP_DESTROY,(void(*)(void))MatDestroy_ML);CHKERRQ(ierr);
322 
323   shellctx->A         = *newmat;
324   shellctx->mlmat     = mlmat;
325   PetscFunctionReturn(0);
326 }
327 
MatWrapML_MPIAIJ(ML_Operator * mlmat,MatReuse reuse,Mat * newmat)328 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
329 {
330   PetscInt       *aj;
331   PetscScalar    *aa;
332   PetscErrorCode ierr;
333   PetscInt       i,j,*gordering;
334   PetscInt       m=mlmat->outvec_leng,n,nz_max,row;
335   Mat            A;
336 
337   PetscFunctionBegin;
338   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
339   n = mlmat->invec_leng;
340   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
341 
342   /* create global row numbering for a ML_Operator */
343   PetscStackCall("ML_build_global_numbering",ML_build_global_numbering(mlmat,&gordering,"rows"));
344 
345   nz_max = PetscMax(1,mlmat->max_nz_per_row) + 1;
346   ierr = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr);
347   if (reuse) {
348     A = *newmat;
349   } else {
350     PetscInt *nnzA,*nnzB,*nnz;
351     PetscInt rstart;
352     ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
353     ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
354     ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
355     /* keep track of block size for A matrices */
356     ierr = MatSetBlockSize (A,mlmat->num_PDEs);CHKERRQ(ierr);
357     ierr = PetscMalloc3(m,&nnzA,m,&nnzB,m,&nnz);CHKERRQ(ierr);
358     ierr = MPI_Scan(&m,&rstart,1,MPIU_INT,MPI_SUM,mlmat->comm->USR_comm);CHKERRQ(ierr);
359     rstart -= m;
360 
361     for (i=0; i<m; i++) {
362       row = gordering[i] - rstart;
363       PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
364       nnzA[row] = 0;
365       for (j=0; j<nnz[i]; j++) {
366         if (aj[j] < m) nnzA[row]++;
367       }
368       nnzB[row] = nnz[i] - nnzA[row];
369     }
370     ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
371     ierr = PetscFree3(nnzA,nnzB,nnz);
372   }
373   for (i=0; i<m; i++) {
374     PetscInt ncols;
375     row = gordering[i];
376 
377     PetscStackCall(",ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
378     for (j = 0; j < ncols; j++) aj[j] = gordering[aj[j]];
379     ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
380   }
381   PetscStackCall("ML_free",ML_free(gordering));
382   ierr    = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
383   ierr    = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
384   *newmat = A;
385 
386   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
387   PetscFunctionReturn(0);
388 }
389 
390 /* -------------------------------------------------------------------------- */
391 /*
392    PCSetCoordinates_ML
393 
394    Input Parameter:
395    .  pc - the preconditioner context
396 */
PCSetCoordinates_ML(PC pc,PetscInt ndm,PetscInt a_nloc,PetscReal * coords)397 static PetscErrorCode PCSetCoordinates_ML(PC pc, PetscInt ndm, PetscInt a_nloc, PetscReal *coords)
398 {
399   PC_MG          *mg    = (PC_MG*)pc->data;
400   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
401   PetscErrorCode ierr;
402   PetscInt       arrsz,oldarrsz,bs,my0,kk,ii,nloc,Iend,aloc;
403   Mat            Amat = pc->pmat;
404 
405   /* this function copied and modified from PCSetCoordinates_GEO -TGI */
406   PetscFunctionBegin;
407   PetscValidHeaderSpecific(Amat, MAT_CLASSID, 1);
408   ierr = MatGetBlockSize(Amat, &bs);CHKERRQ(ierr);
409 
410   ierr = MatGetOwnershipRange(Amat, &my0, &Iend);CHKERRQ(ierr);
411   aloc = (Iend-my0);
412   nloc = (Iend-my0)/bs;
413 
414   if (nloc!=a_nloc && aloc != a_nloc) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Number of local blocks %D must be %D or %D.",a_nloc,nloc,aloc);
415 
416   oldarrsz    = pc_ml->dim * pc_ml->nloc;
417   pc_ml->dim  = ndm;
418   pc_ml->nloc = nloc;
419   arrsz       = ndm * nloc;
420 
421   /* create data - syntactic sugar that should be refactored at some point */
422   if (pc_ml->coords==0 || (oldarrsz != arrsz)) {
423     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
424     ierr = PetscMalloc1(arrsz, &pc_ml->coords);CHKERRQ(ierr);
425   }
426   for (kk=0; kk<arrsz; kk++) pc_ml->coords[kk] = -999.;
427   /* copy data in - column oriented */
428   if (nloc == a_nloc) {
429     for (kk = 0; kk < nloc; kk++) {
430       for (ii = 0; ii < ndm; ii++) {
431         pc_ml->coords[ii*nloc + kk] =  coords[kk*ndm + ii];
432       }
433     }
434   } else { /* assumes the coordinates are blocked */
435     for (kk = 0; kk < nloc; kk++) {
436       for (ii = 0; ii < ndm; ii++) {
437         pc_ml->coords[ii*nloc + kk] =  coords[bs*kk*ndm + ii];
438       }
439     }
440   }
441   PetscFunctionReturn(0);
442 }
443 
444 /* -----------------------------------------------------------------------------*/
445 extern PetscErrorCode PCReset_MG(PC);
PCReset_ML(PC pc)446 PetscErrorCode PCReset_ML(PC pc)
447 {
448   PetscErrorCode ierr;
449   PC_MG          *mg    = (PC_MG*)pc->data;
450   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
451   PetscInt       level,fine_level=pc_ml->Nlevels-1,dim=pc_ml->dim;
452 
453   PetscFunctionBegin;
454   if (dim) {
455     for (level=0; level<=fine_level; level++) {
456       ierr = VecDestroy(&pc_ml->gridctx[level].coords);CHKERRQ(ierr);
457     }
458     if (pc_ml->ml_object && pc_ml->ml_object->Grid) {
459       ML_Aggregate_Viz_Stats * grid_info = (ML_Aggregate_Viz_Stats*) pc_ml->ml_object->Grid[0].Grid;
460       grid_info->x = 0; /* do this so ML doesn't try to free coordinates */
461       grid_info->y = 0;
462       grid_info->z = 0;
463       PetscStackCall("ML_Operator_Getrow",ML_Aggregate_VizAndStats_Clean(pc_ml->ml_object));
464     }
465   }
466   PetscStackCall("ML_Aggregate_Destroy",ML_Aggregate_Destroy(&pc_ml->agg_object));
467   PetscStackCall("ML_Aggregate_Destroy",ML_Destroy(&pc_ml->ml_object));
468 
469   if (pc_ml->PetscMLdata) {
470     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
471     ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);
472     ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr);
473     ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr);
474   }
475   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
476 
477   if (pc_ml->gridctx) {
478     for (level=0; level<fine_level; level++) {
479       if (pc_ml->gridctx[level].A) {ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);}
480       if (pc_ml->gridctx[level].P) {ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);}
481       if (pc_ml->gridctx[level].R) {ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);}
482       if (pc_ml->gridctx[level].x) {ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);}
483       if (pc_ml->gridctx[level].b) {ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);}
484       if (pc_ml->gridctx[level+1].r) {ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
485     }
486   }
487   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
488   ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
489 
490   pc_ml->dim  = 0;
491   pc_ml->nloc = 0;
492   ierr = PCReset_MG(pc);CHKERRQ(ierr);
493   PetscFunctionReturn(0);
494 }
495 /* -------------------------------------------------------------------------- */
496 /*
497    PCSetUp_ML - Prepares for the use of the ML preconditioner
498                     by setting data structures and options.
499 
500    Input Parameter:
501 .  pc - the preconditioner context
502 
503    Application Interface Routine: PCSetUp()
504 
505    Notes:
506    The interface routine PCSetUp() is not usually called directly by
507    the user, but instead is called by PCApply() if necessary.
508 */
509 extern PetscErrorCode PCSetFromOptions_MG(PetscOptionItems *PetscOptionsObject,PC);
510 extern PetscErrorCode PCReset_MG(PC);
511 
PCSetUp_ML(PC pc)512 PetscErrorCode PCSetUp_ML(PC pc)
513 {
514   PetscErrorCode   ierr;
515   PetscMPIInt      size;
516   FineGridCtx      *PetscMLdata;
517   ML               *ml_object;
518   ML_Aggregate     *agg_object;
519   ML_Operator      *mlmat;
520   PetscInt         nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
521   Mat              A,Aloc;
522   GridCtx          *gridctx;
523   PC_MG            *mg    = (PC_MG*)pc->data;
524   PC_ML            *pc_ml = (PC_ML*)mg->innerctx;
525   PetscBool        isSeq, isMPI;
526   KSP              smoother;
527   PC               subpc;
528   PetscInt         mesh_level, old_mesh_level;
529   MatInfo          info;
530   static PetscBool cite = PETSC_FALSE;
531 
532   PetscFunctionBegin;
533   ierr = PetscCitationsRegister("@TechReport{ml_users_guide,\n  author = {M. Sala and J.J. Hu and R.S. Tuminaro},\n  title = {{ML}3.1 {S}moothed {A}ggregation {U}ser's {G}uide},\n  institution =  {Sandia National Laboratories},\n  number = {SAND2004-4821},\n  year = 2004\n}\n",&cite);CHKERRQ(ierr);
534   A    = pc->pmat;
535   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
536 
537   if (pc->setupcalled) {
538     if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
539       /*
540        Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
541        multiple solves in which the matrix is not changing too quickly.
542        */
543       ml_object             = pc_ml->ml_object;
544       gridctx               = pc_ml->gridctx;
545       Nlevels               = pc_ml->Nlevels;
546       fine_level            = Nlevels - 1;
547       gridctx[fine_level].A = A;
548 
549       ierr = PetscObjectBaseTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
550       ierr = PetscObjectBaseTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
551       if (isMPI) {
552         ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
553       } else if (isSeq) {
554         Aloc = A;
555         ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
556       } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
557 
558       ierr              = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
559       PetscMLdata       = pc_ml->PetscMLdata;
560       ierr              = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr);
561       PetscMLdata->A    = A;
562       PetscMLdata->Aloc = Aloc;
563       PetscStackCall("ML_Aggregate_Destroy",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
564       PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
565 
566       mesh_level = ml_object->ML_finest_level;
567       while (ml_object->SingleLevel[mesh_level].Rmat->to) {
568         old_mesh_level = mesh_level;
569         mesh_level     = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;
570 
571         /* clean and regenerate A */
572         mlmat = &(ml_object->Amat[mesh_level]);
573         PetscStackCall("ML_Operator_Clean",ML_Operator_Clean(mlmat));
574         PetscStackCall("ML_Operator_Init",ML_Operator_Init(mlmat,ml_object->comm));
575         PetscStackCall("ML_Gen_AmatrixRAP",ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level));
576       }
577 
578       level = fine_level - 1;
579       if (size == 1) { /* convert ML P, R and A into seqaij format */
580         for (mllevel=1; mllevel<Nlevels; mllevel++) {
581           mlmat = &(ml_object->Amat[mllevel]);
582           ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
583           level--;
584         }
585       } else { /* convert ML P and R into shell format, ML A into mpiaij format */
586         for (mllevel=1; mllevel<Nlevels; mllevel++) {
587           mlmat  = &(ml_object->Amat[mllevel]);
588           ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
589           level--;
590         }
591       }
592 
593       for (level=0; level<fine_level; level++) {
594         if (level > 0) {
595           ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr);
596         }
597         ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);CHKERRQ(ierr);
598       }
599       ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr);
600       ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);CHKERRQ(ierr);
601 
602       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
603       PetscFunctionReturn(0);
604     } else {
605       /* since ML can change the size of vectors/matrices at any level we must destroy everything */
606       ierr = PCReset_ML(pc);CHKERRQ(ierr);
607     }
608   }
609 
610   /* setup special features of PCML */
611   /*--------------------------------*/
612   /* covert A to Aloc to be used by ML at fine grid */
613   pc_ml->size = size;
614   ierr        = PetscObjectBaseTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
615   ierr        = PetscObjectBaseTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
616   if (isMPI) {
617     ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
618   } else if (isSeq) {
619     Aloc = A;
620     ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
621   } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
622 
623   /* create and initialize struct 'PetscMLdata' */
624   ierr               = PetscNewLog(pc,&PetscMLdata);CHKERRQ(ierr);
625   pc_ml->PetscMLdata = PetscMLdata;
626   ierr               = PetscMalloc1(Aloc->cmap->n+1,&PetscMLdata->pwork);CHKERRQ(ierr);
627 
628   ierr = MatCreateVecs(Aloc,&PetscMLdata->x,&PetscMLdata->y);CHKERRQ(ierr);
629 
630   PetscMLdata->A    = A;
631   PetscMLdata->Aloc = Aloc;
632   if (pc_ml->dim) { /* create vecs around the coordinate data given */
633     PetscInt  i,j,dim=pc_ml->dim;
634     PetscInt  nloc = pc_ml->nloc,nlocghost;
635     PetscReal *ghostedcoords;
636 
637     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
638     nlocghost = Aloc->cmap->n / bs;
639     ierr      = PetscMalloc1(dim*nlocghost,&ghostedcoords);CHKERRQ(ierr);
640     for (i = 0; i < dim; i++) {
641       /* copy coordinate values into first component of pwork */
642       for (j = 0; j < nloc; j++) {
643         PetscMLdata->pwork[bs * j] = pc_ml->coords[nloc * i + j];
644       }
645       /* get the ghost values */
646       ierr = PetscML_comm(PetscMLdata->pwork,PetscMLdata);CHKERRQ(ierr);
647       /* write into the vector */
648       for (j = 0; j < nlocghost; j++) {
649         ghostedcoords[i * nlocghost + j] = PetscMLdata->pwork[bs * j];
650       }
651     }
652     /* replace the original coords with the ghosted coords, because these are
653      * what ML needs */
654     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
655     pc_ml->coords = ghostedcoords;
656   }
657 
658   /* create ML discretization matrix at fine grid */
659   /* ML requires input of fine-grid matrix. It determines nlevels. */
660   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
661   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
662   PetscStackCall("ML_Create",ML_Create(&ml_object,pc_ml->MaxNlevels));
663   PetscStackCall("ML_Comm_Set_UsrComm",ML_Comm_Set_UsrComm(ml_object->comm,PetscObjectComm((PetscObject)A)));
664   pc_ml->ml_object = ml_object;
665   PetscStackCall("ML_Init_Amatrix",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
666   PetscStackCall("ML_Set_Amatrix_Getrow",ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols));
667   PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
668 
669   PetscStackCall("ML_Set_Symmetrize",ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO));
670 
671   /* aggregation */
672   PetscStackCall("ML_Aggregate_Create",ML_Aggregate_Create(&agg_object));
673   pc_ml->agg_object = agg_object;
674 
675   {
676     MatNullSpace mnull;
677     ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr);
678     if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) {
679       if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER;
680       else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK;
681       else pc_ml->nulltype = PCML_NULLSPACE_SCALAR;
682     }
683     switch (pc_ml->nulltype) {
684     case PCML_NULLSPACE_USER: {
685       PetscScalar       *nullvec;
686       const PetscScalar *v;
687       PetscBool         has_const;
688       PetscInt          i,j,mlocal,nvec,M;
689       const Vec         *vecs;
690 
691       if (!mnull) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space");
692       ierr = MatGetSize(A,&M,NULL);CHKERRQ(ierr);
693       ierr = MatGetLocalSize(Aloc,&mlocal,NULL);CHKERRQ(ierr);
694       ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr);
695       ierr = PetscMalloc1((nvec+!!has_const)*mlocal,&nullvec);CHKERRQ(ierr);
696       if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M;
697       for (i=0; i<nvec; i++) {
698         ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr);
699         for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j];
700         ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr);
701       }
702       PetscStackCall("ML_Aggregate_Create",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr));
703       ierr = PetscFree(nullvec);CHKERRQ(ierr);
704     } break;
705     case PCML_NULLSPACE_BLOCK:
706       PetscStackCall("ML_Aggregate_Set_NullSpace",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr));
707       break;
708     case PCML_NULLSPACE_SCALAR:
709       break;
710     default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP,"Unknown null space type");
711     }
712   }
713   PetscStackCall("ML_Aggregate_Set_MaxCoarseSize",ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize));
714   /* set options */
715   switch (pc_ml->CoarsenScheme) {
716   case 1:
717     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_Coupled",ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object));break;
718   case 2:
719     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_MIS",ML_Aggregate_Set_CoarsenScheme_MIS(agg_object));break;
720   case 3:
721     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_METIS",ML_Aggregate_Set_CoarsenScheme_METIS(agg_object));break;
722   }
723   PetscStackCall("ML_Aggregate_Set_Threshold",ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold));
724   PetscStackCall("ML_Aggregate_Set_DampingFactor",ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor));
725   if (pc_ml->SpectralNormScheme_Anorm) {
726     PetscStackCall("ML_Set_SpectralNormScheme_Anorm",ML_Set_SpectralNormScheme_Anorm(ml_object));
727   }
728   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
729   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
730   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
731   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
732   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
733   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
734 
735   if (pc_ml->Aux) {
736     if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Auxiliary matrix requires coordinates");
737     ml_object->Amat[0].aux_data->threshold = pc_ml->AuxThreshold;
738     ml_object->Amat[0].aux_data->enable    = 1;
739     ml_object->Amat[0].aux_data->max_level = 10;
740     ml_object->Amat[0].num_PDEs            = bs;
741   }
742 
743   ierr = MatGetInfo(A,MAT_LOCAL,&info);CHKERRQ(ierr);
744   ml_object->Amat[0].N_nonzeros = (int) info.nz_used;
745 
746   if (pc_ml->dim) {
747     PetscInt               i,dim = pc_ml->dim;
748     ML_Aggregate_Viz_Stats *grid_info;
749     PetscInt               nlocghost;
750 
751     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
752     nlocghost = Aloc->cmap->n / bs;
753 
754     PetscStackCall("ML_Aggregate_VizAndStats_Setup(",ML_Aggregate_VizAndStats_Setup(ml_object)); /* create ml info for coords */
755     grid_info = (ML_Aggregate_Viz_Stats*) ml_object->Grid[0].Grid;
756     for (i = 0; i < dim; i++) {
757       /* set the finest level coordinates to point to the column-order array
758        * in pc_ml */
759       /* NOTE: must point away before VizAndStats_Clean so ML doesn't free */
760       switch (i) {
761       case 0: grid_info->x = pc_ml->coords + nlocghost * i; break;
762       case 1: grid_info->y = pc_ml->coords + nlocghost * i; break;
763       case 2: grid_info->z = pc_ml->coords + nlocghost * i; break;
764       default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
765       }
766     }
767     grid_info->Ndim = dim;
768   }
769 
770   /* repartitioning */
771   if (pc_ml->Repartition) {
772     PetscStackCall("ML_Repartition_Activate",ML_Repartition_Activate(ml_object));
773     PetscStackCall("ML_Repartition_Set_LargestMinMaxRatio",ML_Repartition_Set_LargestMinMaxRatio(ml_object,pc_ml->MaxMinRatio));
774     PetscStackCall("ML_Repartition_Set_MinPerProc",ML_Repartition_Set_MinPerProc(ml_object,pc_ml->MinPerProc));
775     PetscStackCall("ML_Repartition_Set_PutOnSingleProc",ML_Repartition_Set_PutOnSingleProc(ml_object,pc_ml->PutOnSingleProc));
776 #if 0                           /* Function not yet defined in ml-6.2 */
777     /* I'm not sure what compatibility issues might crop up if we partitioned
778      * on the finest level, so to be safe repartition starts on the next
779      * finest level (reflection default behavior in
780      * ml_MultiLevelPreconditioner) */
781     PetscStackCall("ML_Repartition_Set_StartLevel",ML_Repartition_Set_StartLevel(ml_object,1));
782 #endif
783 
784     if (!pc_ml->RepartitionType) {
785       PetscInt i;
786 
787       if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"ML Zoltan repartitioning requires coordinates");
788       PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEZOLTAN));
789       PetscStackCall("ML_Aggregate_Set_Dimensions",ML_Aggregate_Set_Dimensions(agg_object, pc_ml->dim));
790 
791       for (i = 0; i < ml_object->ML_num_levels; i++) {
792         ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Grid[i].Grid;
793         grid_info->zoltan_type = pc_ml->ZoltanScheme + 1; /* ml numbers options 1, 2, 3 */
794         /* defaults from ml_agg_info.c */
795         grid_info->zoltan_estimated_its = 40; /* only relevant to hypergraph / fast hypergraph */
796         grid_info->zoltan_timers        = 0;
797         grid_info->smoothing_steps      = 4;  /* only relevant to hypergraph / fast hypergraph */
798       }
799     } else {
800       PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEPARMETIS));
801     }
802   }
803 
804   if (pc_ml->OldHierarchy) {
805     PetscStackCall("ML_Gen_MGHierarchy_UsingAggregation",Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
806   } else {
807     PetscStackCall("ML_Gen_MultiLevelHierarchy_UsingAggregation",Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
808   }
809   if (Nlevels<=0) SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
810   pc_ml->Nlevels = Nlevels;
811   fine_level     = Nlevels - 1;
812 
813   ierr = PCMGSetLevels(pc,Nlevels,NULL);CHKERRQ(ierr);
814   /* set default smoothers */
815   for (level=1; level<=fine_level; level++) {
816     ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
817     ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
818     ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
819     ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
820   }
821   ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr);
822   ierr = PCSetFromOptions_MG(PetscOptionsObject,pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
823   ierr = PetscOptionsEnd();CHKERRQ(ierr);
824 
825   ierr = PetscMalloc1(Nlevels,&gridctx);CHKERRQ(ierr);
826 
827   pc_ml->gridctx = gridctx;
828 
829   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
830      Level 0 is the finest grid for ML, but coarsest for PETSc! */
831   gridctx[fine_level].A = A;
832 
833   level = fine_level - 1;
834   if (size == 1) { /* convert ML P, R and A into seqaij format */
835     for (mllevel=1; mllevel<Nlevels; mllevel++) {
836       mlmat = &(ml_object->Pmat[mllevel]);
837       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
838       mlmat = &(ml_object->Rmat[mllevel-1]);
839       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
840 
841       mlmat = &(ml_object->Amat[mllevel]);
842       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
843       level--;
844     }
845   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
846     for (mllevel=1; mllevel<Nlevels; mllevel++) {
847       mlmat  = &(ml_object->Pmat[mllevel]);
848       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
849       mlmat  = &(ml_object->Rmat[mllevel-1]);
850       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
851 
852       mlmat  = &(ml_object->Amat[mllevel]);
853       ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
854       level--;
855     }
856   }
857 
858   /* create vectors and ksp at all levels */
859   for (level=0; level<fine_level; level++) {
860     level1 = level + 1;
861 
862     ierr = MatCreateVecs(gridctx[level].A,&gridctx[level].x,&gridctx[level].b);CHKERRQ(ierr);
863     ierr = MatCreateVecs(gridctx[level1].A,NULL,&gridctx[level1].r);CHKERRQ(ierr);
864     ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
865     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
866     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
867 
868     if (level == 0) {
869       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
870     } else {
871       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
872     }
873   }
874   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
875 
876   /* create coarse level and the interpolation between the levels */
877   for (level=0; level<fine_level; level++) {
878     level1 = level + 1;
879 
880     ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
881     ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
882     if (level > 0) {
883       ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr);
884     }
885     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);CHKERRQ(ierr);
886   }
887   ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr);
888   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);CHKERRQ(ierr);
889 
890   /* put coordinate info in levels */
891   if (pc_ml->dim) {
892     PetscInt  i,j,dim = pc_ml->dim;
893     PetscInt  bs, nloc;
894     PC        subpc;
895     PetscReal *array;
896 
897     level = fine_level;
898     for (mllevel = 0; mllevel < Nlevels; mllevel++) {
899       ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Amat[mllevel].to->Grid->Grid;
900       MPI_Comm               comm       = ((PetscObject)gridctx[level].A)->comm;
901 
902       ierr  = MatGetBlockSize (gridctx[level].A, &bs);CHKERRQ(ierr);
903       ierr  = MatGetLocalSize (gridctx[level].A, NULL, &nloc);CHKERRQ(ierr);
904       nloc /= bs; /* number of local nodes */
905 
906       ierr = VecCreate(comm,&gridctx[level].coords);CHKERRQ(ierr);
907       ierr = VecSetSizes(gridctx[level].coords,dim * nloc,PETSC_DECIDE);CHKERRQ(ierr);
908       ierr = VecSetType(gridctx[level].coords,VECMPI);CHKERRQ(ierr);
909       ierr = VecGetArray(gridctx[level].coords,&array);CHKERRQ(ierr);
910       for (j = 0; j < nloc; j++) {
911         for (i = 0; i < dim; i++) {
912           switch (i) {
913           case 0: array[dim * j + i] = grid_info->x[j]; break;
914           case 1: array[dim * j + i] = grid_info->y[j]; break;
915           case 2: array[dim * j + i] = grid_info->z[j]; break;
916           default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
917           }
918         }
919       }
920 
921       /* passing coordinates to smoothers/coarse solver, should they need them */
922       ierr = KSPGetPC(gridctx[level].ksp,&subpc);CHKERRQ(ierr);
923       ierr = PCSetCoordinates(subpc,dim,nloc,array);CHKERRQ(ierr);
924       ierr = VecRestoreArray(gridctx[level].coords,&array);CHKERRQ(ierr);
925       level--;
926     }
927   }
928 
929   /* setupcalled is set to 0 so that MG is setup from scratch */
930   pc->setupcalled = 0;
931   ierr            = PCSetUp_MG(pc);CHKERRQ(ierr);
932   PetscFunctionReturn(0);
933 }
934 
935 /* -------------------------------------------------------------------------- */
936 /*
937    PCDestroy_ML - Destroys the private context for the ML preconditioner
938    that was created with PCCreate_ML().
939 
940    Input Parameter:
941 .  pc - the preconditioner context
942 
943    Application Interface Routine: PCDestroy()
944 */
PCDestroy_ML(PC pc)945 PetscErrorCode PCDestroy_ML(PC pc)
946 {
947   PetscErrorCode ierr;
948   PC_MG          *mg   = (PC_MG*)pc->data;
949   PC_ML          *pc_ml= (PC_ML*)mg->innerctx;
950 
951   PetscFunctionBegin;
952   ierr = PCReset_ML(pc);CHKERRQ(ierr);
953   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
954   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
955   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",NULL);CHKERRQ(ierr);
956   PetscFunctionReturn(0);
957 }
958 
PCSetFromOptions_ML(PetscOptionItems * PetscOptionsObject,PC pc)959 PetscErrorCode PCSetFromOptions_ML(PetscOptionItems *PetscOptionsObject,PC pc)
960 {
961   PetscErrorCode ierr;
962   PetscInt       indx,PrintLevel,partindx;
963   const char     *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
964   const char     *part[]   = {"Zoltan","ParMETIS"};
965 #if defined(HAVE_ML_ZOLTAN)
966   const char *zscheme[] = {"RCB","hypergraph","fast_hypergraph"};
967 #endif
968   PC_MG       *mg    = (PC_MG*)pc->data;
969   PC_ML       *pc_ml = (PC_ML*)mg->innerctx;
970   PetscMPIInt size;
971   MPI_Comm    comm;
972 
973   PetscFunctionBegin;
974   ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr);
975   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
976   ierr = PetscOptionsHead(PetscOptionsObject,"ML options");CHKERRQ(ierr);
977 
978   PrintLevel = 0;
979   indx       = 0;
980   partindx   = 0;
981 
982   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,NULL);CHKERRQ(ierr);
983   PetscStackCall("ML_Set_PrintLevel",ML_Set_PrintLevel(PrintLevel));
984   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,NULL);CHKERRQ(ierr);
985   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,NULL);CHKERRQ(ierr);
986   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,NULL);CHKERRQ(ierr);
987 
988   pc_ml->CoarsenScheme = indx;
989 
990   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,NULL);CHKERRQ(ierr);
991   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,NULL);CHKERRQ(ierr);
992   ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,NULL);CHKERRQ(ierr);
993   ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,NULL);CHKERRQ(ierr);
994   ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,NULL);CHKERRQ(ierr);
995   ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,NULL);CHKERRQ(ierr);
996   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,NULL);CHKERRQ(ierr);
997   ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,NULL);CHKERRQ(ierr);
998   /*
999     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
1000     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
1001 
1002     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
1003     combination of options and ML's exit(1) explanations don't help matters.
1004   */
1005   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
1006   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
1007   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2\n");CHKERRQ(ierr);}
1008   if (pc_ml->EnergyMinimization) {
1009     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,NULL);CHKERRQ(ierr);
1010   }
1011   if (pc_ml->EnergyMinimization == 2) {
1012     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
1013     ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,NULL);CHKERRQ(ierr);
1014   }
1015   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
1016   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
1017   ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,NULL);CHKERRQ(ierr);
1018   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
1019   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
1020   ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,NULL);CHKERRQ(ierr);
1021   /*
1022     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
1023     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
1024     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
1025     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
1026     this context, but ML doesn't provide a way to find out which ones.
1027    */
1028   ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,NULL);CHKERRQ(ierr);
1029   ierr = PetscOptionsBool("-pc_ml_repartition", "Allow ML to repartition levels of the heirarchy","ML_Repartition_Activate",pc_ml->Repartition,&pc_ml->Repartition,NULL);CHKERRQ(ierr);
1030   if (pc_ml->Repartition) {
1031     ierr = PetscOptionsReal("-pc_ml_repartitionMaxMinRatio", "Acceptable ratio of repartitioned sizes","ML_Repartition_Set_LargestMinMaxRatio",pc_ml->MaxMinRatio,&pc_ml->MaxMinRatio,NULL);CHKERRQ(ierr);
1032     ierr = PetscOptionsInt("-pc_ml_repartitionMinPerProc", "Smallest repartitioned size","ML_Repartition_Set_MinPerProc",pc_ml->MinPerProc,&pc_ml->MinPerProc,NULL);CHKERRQ(ierr);
1033     ierr = PetscOptionsInt("-pc_ml_repartitionPutOnSingleProc", "Problem size automatically repartitioned to one processor","ML_Repartition_Set_PutOnSingleProc",pc_ml->PutOnSingleProc,&pc_ml->PutOnSingleProc,NULL);CHKERRQ(ierr);
1034 #if defined(HAVE_ML_ZOLTAN)
1035     partindx = 0;
1036     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[0],&partindx,NULL);CHKERRQ(ierr);
1037 
1038     pc_ml->RepartitionType = partindx;
1039     if (!partindx) {
1040       PetscInt zindx = 0;
1041 
1042       ierr = PetscOptionsEList("-pc_ml_repartitionZoltanScheme", "Repartitioning scheme to use","None",zscheme,3,zscheme[0],&zindx,NULL);CHKERRQ(ierr);
1043 
1044       pc_ml->ZoltanScheme = zindx;
1045     }
1046 #else
1047     partindx = 1;
1048     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[1],&partindx,NULL);CHKERRQ(ierr);
1049     pc_ml->RepartitionType = partindx;
1050     if (!partindx) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP_SYS,"ML not compiled with Zoltan");
1051 #endif
1052     ierr = PetscOptionsBool("-pc_ml_Aux","Aggregate using auxiliary coordinate-based laplacian","None",pc_ml->Aux,&pc_ml->Aux,NULL);CHKERRQ(ierr);
1053     ierr = PetscOptionsReal("-pc_ml_AuxThreshold","Auxiliary smoother drop tol","None",pc_ml->AuxThreshold,&pc_ml->AuxThreshold,NULL);CHKERRQ(ierr);
1054   }
1055   ierr = PetscOptionsTail();CHKERRQ(ierr);
1056   PetscFunctionReturn(0);
1057 }
1058 
1059 /* -------------------------------------------------------------------------- */
1060 /*
1061    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
1062    and sets this as the private data within the generic preconditioning
1063    context, PC, that was created within PCCreate().
1064 
1065    Input Parameter:
1066 .  pc - the preconditioner context
1067 
1068    Application Interface Routine: PCCreate()
1069 */
1070 
1071 /*MC
1072      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
1073        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
1074        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
1075        and the restriction/interpolation operators wrapped as PETSc shell matrices.
1076 
1077    Options Database Key:
1078    Multigrid options(inherited):
1079 +  -pc_mg_cycles <1> - 1 for V cycle, 2 for W-cycle (MGSetCycles)
1080 .  -pc_mg_distinct_smoothup - Should one configure the up and down smoothers separately (PCMGSetDistinctSmoothUp)
1081 -  -pc_mg_type <multiplicative> - (one of) additive multiplicative full kascade
1082 
1083    ML options:
1084 +  -pc_ml_PrintLevel <0> - Print level (ML_Set_PrintLevel)
1085 .  -pc_ml_maxNlevels <10> - Maximum number of levels (None)
1086 .  -pc_ml_maxCoarseSize <1> - Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
1087 .  -pc_ml_CoarsenScheme <Uncoupled> - (one of) Uncoupled Coupled MIS METIS
1088 .  -pc_ml_DampingFactor <1.33333> - P damping factor (ML_Aggregate_Set_DampingFactor)
1089 .  -pc_ml_Threshold <0> - Smoother drop tol (ML_Aggregate_Set_Threshold)
1090 .  -pc_ml_SpectralNormScheme_Anorm <false> - Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
1091 .  -pc_ml_repartition <false> - Allow ML to repartition levels of the heirarchy (ML_Repartition_Activate)
1092 .  -pc_ml_repartitionMaxMinRatio <1.3> - Acceptable ratio of repartitioned sizes (ML_Repartition_Set_LargestMinMaxRatio)
1093 .  -pc_ml_repartitionMinPerProc <512>: Smallest repartitioned size (ML_Repartition_Set_MinPerProc)
1094 .  -pc_ml_repartitionPutOnSingleProc <5000> - Problem size automatically repartitioned to one processor (ML_Repartition_Set_PutOnSingleProc)
1095 .  -pc_ml_repartitionType <Zoltan> - Repartitioning library to use (ML_Repartition_Set_Partitioner)
1096 .  -pc_ml_repartitionZoltanScheme <RCB> - Repartitioning scheme to use (None)
1097 .  -pc_ml_Aux <false> - Aggregate using auxiliary coordinate-based laplacian (None)
1098 -  -pc_ml_AuxThreshold <0.0> - Auxiliary smoother drop tol (None)
1099 
1100    Level: intermediate
1101 
1102 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
1103            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetDistinctSmoothUp(),
1104            PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
1105            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
1106            PCMGSetCycleTypeOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
1107 M*/
1108 
PCCreate_ML(PC pc)1109 PETSC_EXTERN PetscErrorCode PCCreate_ML(PC pc)
1110 {
1111   PetscErrorCode ierr;
1112   PC_ML          *pc_ml;
1113   PC_MG          *mg;
1114 
1115   PetscFunctionBegin;
1116   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
1117   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
1118   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
1119   /* Since PCMG tries to use DM assocated with PC must delete it */
1120   ierr = DMDestroy(&pc->dm);CHKERRQ(ierr);
1121   ierr = PCMGSetGalerkin(pc,PC_MG_GALERKIN_EXTERNAL);CHKERRQ(ierr);
1122   mg   = (PC_MG*)pc->data;
1123 
1124   /* create a supporting struct and attach it to pc */
1125   ierr         = PetscNewLog(pc,&pc_ml);CHKERRQ(ierr);
1126   mg->innerctx = pc_ml;
1127 
1128   pc_ml->ml_object                = 0;
1129   pc_ml->agg_object               = 0;
1130   pc_ml->gridctx                  = 0;
1131   pc_ml->PetscMLdata              = 0;
1132   pc_ml->Nlevels                  = -1;
1133   pc_ml->MaxNlevels               = 10;
1134   pc_ml->MaxCoarseSize            = 1;
1135   pc_ml->CoarsenScheme            = 1;
1136   pc_ml->Threshold                = 0.0;
1137   pc_ml->DampingFactor            = 4.0/3.0;
1138   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
1139   pc_ml->size                     = 0;
1140   pc_ml->dim                      = 0;
1141   pc_ml->nloc                     = 0;
1142   pc_ml->coords                   = 0;
1143   pc_ml->Repartition              = PETSC_FALSE;
1144   pc_ml->MaxMinRatio              = 1.3;
1145   pc_ml->MinPerProc               = 512;
1146   pc_ml->PutOnSingleProc          = 5000;
1147   pc_ml->RepartitionType          = 0;
1148   pc_ml->ZoltanScheme             = 0;
1149   pc_ml->Aux                      = PETSC_FALSE;
1150   pc_ml->AuxThreshold             = 0.0;
1151 
1152   /* allow for coordinates to be passed */
1153   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",PCSetCoordinates_ML);CHKERRQ(ierr);
1154 
1155   /* overwrite the pointers of PCMG by the functions of PCML */
1156   pc->ops->setfromoptions = PCSetFromOptions_ML;
1157   pc->ops->setup          = PCSetUp_ML;
1158   pc->ops->reset          = PCReset_ML;
1159   pc->ops->destroy        = PCDestroy_ML;
1160   PetscFunctionReturn(0);
1161 }
1162