1 /******************************************************************************
2  * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
3  * HYPRE Project Developers. See the top-level COPYRIGHT file for details.
4  *
5  * SPDX-License-Identifier: (Apache-2.0 OR MIT)
6  ******************************************************************************/
7 
8 /****************************************************************************/
9 /* HYPRE_LSI_ML interface                                                   */
10 /*--------------------------------------------------------------------------*/
11 /*  local functions :
12  *
13  *        MH_Irecv
14  *        MH_Send
15  *        MH_Wait
16  *        MH_ExchBdry
17  *        MH_MatVec
18  *        MH_GetRow
19  *        HYPRE_LSI_MLCreate
20  *        HYPRE_LSI_MLDestroy
21  *        HYPRE_LSI_MLSetup
22  *        HYPRE_LSI_MLSolve
23  *        HYPRE_LSI_MLSetStrongThreshold
24  *        HYPRE_LSI_MLSetMethod
25  *        HYPRE_LSI_MLSetNumPreSmoothings
26  *        HYPRE_LSI_MLSetNumPostSmoothings
27  *        HYPRE_LSI_MLSetPreSmoother
28  *        HYPRE_LSI_MLSetPostSmoother
29  *        HYPRE_LSI_MLSetDampingFactor
30  *        HYPRE_LSI_MLSetCoarseSolver
31  *        HYPRE_LSI_MLSetCoarsenScheme
32  *        HYPRE_LSI_MLConstructMHMatrix
33  ****************************************************************************/
34 
35 #include <stdlib.h>
36 #include <stdio.h>
37 #include <math.h>
38 
39 #include "../../parcsr_ls/HYPRE_parcsr_ls.h"
40 
41 #include "../../utilities/_hypre_utilities.h"
42 #include "../../distributed_matrix/HYPRE_distributed_matrix_types.h"
43 #include "../../distributed_matrix/HYPRE_distributed_matrix_protos.h"
44 
45 #include "../../matrix_matrix/HYPRE_matrix_matrix_protos.h"
46 
47 #include "../../seq_mv/vector.h"
48 #include "../../parcsr_mv/_hypre_parcsr_mv.h"
49 /* #include "../../parcsr_mv/par_vector.h" */
50 
51 extern void hypre_qsort0(int *, int, int);
52 
53 #include "HYPRE_MHMatrix.h"
54 
55 extern int  HYPRE_LSI_MLConstructMHMatrix(HYPRE_ParCSRMatrix, MH_Matrix *,
56                                           MPI_Comm, int *,MH_Context*);
57 
58 /****************************************************************************/
59 /* communication functions on parallel platforms                            */
60 /*--------------------------------------------------------------------------*/
61 
MH_Irecv(void * buf,unsigned int count,int * src,int * mid,MPI_Comm comm,MPI_Request * request)62 int MH_Irecv(void* buf, unsigned int count, int *src, int *mid,
63             MPI_Comm comm, MPI_Request *request )
64 {
65 #ifdef HYPRE_SEQUENTIAL
66    return 0;
67 #else
68    int my_id, lsrc, retcode;
69 
70    if ( *src < 0 ) lsrc = MPI_ANY_SOURCE; else lsrc = (*src);
71    retcode = MPI_Irecv( buf, (int) count, MPI_BYTE, lsrc, *mid, comm, request);
72    if ( retcode != 0 )
73    {
74       MPI_Comm_rank(comm, &my_id);
75       printf("%d : MH_Irecv warning : retcode = %d\n", my_id, retcode);
76    }
77    return 0;
78 #endif
79 }
80 
MH_Wait(void * buf,unsigned int count,int * src,int * mid,MPI_Comm comm,MPI_Request * request)81 int MH_Wait(void* buf, unsigned int count, int *src, int *mid,
82             MPI_Comm comm, MPI_Request *request )
83 {
84 #ifdef HYPRE_SEQUENTIAL
85    return count;
86 #else
87    MPI_Status status;
88    int        my_id, incount, retcode;
89 
90    retcode = MPI_Wait( request, &status );
91    if ( retcode != 0 )
92    {
93       MPI_Comm_rank(comm, &my_id);
94       printf("%d : MH_Wait warning : retcode = %d\n", my_id, retcode);
95    }
96    MPI_Get_count(&status, MPI_BYTE, &incount);
97    if ( *src < 0 ) *src = status.MPI_SOURCE;
98    return incount;
99 #endif
100 }
101 
MH_Send(void * buf,unsigned int count,int dest,int mid,MPI_Comm comm)102 int MH_Send(void* buf, unsigned int count, int dest, int mid, MPI_Comm comm )
103 {
104 #ifdef HYPRE_SEQUENTIAL
105    return 0;
106 #else
107    int my_id;
108    int retcode = MPI_Send( buf, (int) count, MPI_BYTE, dest, mid, comm);
109    if ( retcode != 0 )
110    {
111       MPI_Comm_rank(comm, &my_id);
112       printf("%d : MH_Send warning : retcode = %d\n", my_id, retcode);
113    }
114    return 0;
115 #endif
116 }
117 
118 /****************************************************************************/
119 /* wrapper function for interprocessor communication for matvec and getrow  */
120 /*--------------------------------------------------------------------------*/
121 
MH_ExchBdry(double * vec,void * obj)122 int MH_ExchBdry(double *vec, void *obj)
123 {
124 #ifdef HYPRE_SEQUENTIAL
125    return 0;
126 #else
127    int         i, j, msgid, leng, src, dest, offset, *tempList;
128    double      *dbuf;
129    MH_Context  *context;
130    MH_Matrix   *Amat;
131    MPI_Comm    comm;
132    MPI_Request *request;
133 
134    int sendProcCnt, recvProcCnt;
135    int *sendProc, *recvProc;
136    int *sendLeng, *recvLeng;
137    int **sendList, nRows;
138 
139    context     = (MH_Context *) obj;
140    Amat        = (MH_Matrix  *) context->Amat;
141    comm        = context->comm;
142    sendProcCnt = Amat->sendProcCnt;
143    recvProcCnt = Amat->recvProcCnt;
144    sendProc    = Amat->sendProc;
145    recvProc    = Amat->recvProc;
146    sendLeng    = Amat->sendLeng;
147    recvLeng    = Amat->recvLeng;
148    sendList    = Amat->sendList;
149    nRows       = Amat->Nrows;
150 
151    if ( recvProcCnt > 0 )
152       request = hypre_TAlloc( MPI_Request ,  recvProcCnt , HYPRE_MEMORY_HOST);
153    msgid = 234;
154    offset = nRows;
155    for ( i = 0; i < recvProcCnt; i++ )
156    {
157       leng = recvLeng[i] * sizeof( double );
158       src  = recvProc[i];
159       MH_Irecv((void*) &(vec[offset]), leng, &src, &msgid, comm, &request[i]);
160       offset += recvLeng[i];
161    }
162    msgid = 234;
163    for ( i = 0; i < sendProcCnt; i++ )
164    {
165       dest = sendProc[i];
166       leng = sendLeng[i] * sizeof( double );
167       dbuf = hypre_TAlloc(double,  leng , HYPRE_MEMORY_HOST);
168       tempList = sendList[i];
169       for ( j = 0; j < sendLeng[i]; j++ ) {
170          dbuf[j] = vec[tempList[j]];
171       }
172       MH_Send((void*) dbuf, leng, dest, msgid, comm);
173       hypre_TFree(dbuf , HYPRE_MEMORY_HOST);
174    }
175    offset = nRows;
176    for ( i = 0; i < recvProcCnt; i++ )
177    {
178       leng = recvLeng[i] * sizeof( double );
179       src  = recvProc[i];
180       MH_Wait((void*) &(vec[offset]), leng, &src, &msgid, comm, &request[i]);
181       offset += recvLeng[i];
182    }
183    if ( recvProcCnt > 0 )
184       hypre_TFree(request , HYPRE_MEMORY_HOST);
185    return 1;
186 #endif
187 }
188 
189 /****************************************************************************/
190 /* wrapper function for interprocessor communication for matvec and getrow  */
191 /*--------------------------------------------------------------------------*/
192 
MH_ExchBdryBack(double * vec,void * obj,int * length,double ** outvec,int ** outindices)193 int MH_ExchBdryBack(double *vec, void *obj, int *length, double **outvec,
194                     int **outindices)
195 {
196 #ifdef HYPRE_SEQUENTIAL
197    (*outvec) = NULL;
198    (*outindices) = NULL;
199    (*length) = 0;
200    return 0;
201 #else
202    int         i, j, msgid, leng, src, dest, offset;
203    MH_Context  *context;
204    MH_Matrix   *Amat;
205    MPI_Comm    comm;
206    MPI_Request *request;
207 
208    int sendProcCnt, recvProcCnt;
209    int *sendProc, *recvProc;
210    int *sendLeng, *recvLeng;
211    int **sendList, nRows;
212 
213    context     = (MH_Context *) obj;
214    Amat        = (MH_Matrix  *) context->Amat;
215    comm        = context->comm;
216    sendProcCnt = Amat->sendProcCnt;
217    recvProcCnt = Amat->recvProcCnt;
218    sendProc    = Amat->sendProc;
219    recvProc    = Amat->recvProc;
220    sendLeng    = Amat->sendLeng;
221    recvLeng    = Amat->recvLeng;
222    sendList    = Amat->sendList;
223    nRows       = Amat->Nrows;
224 
225    if ( sendProcCnt > 0 )
226    {
227       request = hypre_TAlloc( MPI_Request ,  sendProcCnt , HYPRE_MEMORY_HOST);
228       leng = 0;
229       for ( i = 0; i < sendProcCnt; i++ ) leng += sendLeng[i];
230       (*outvec) = hypre_TAlloc(double, leng , HYPRE_MEMORY_HOST);
231       (*outindices) = hypre_TAlloc(int, leng , HYPRE_MEMORY_HOST);
232       (*length) = leng;
233       offset = 0;
234       for ( i = 0; i < sendProcCnt; i++ )
235       {
236          for ( j = 0; j < sendLeng[i]; j++ )
237             (*outindices)[offset+j] = sendList[i][j];
238          offset += sendLeng[i];
239       }
240    }
241    else
242    {
243       (*outvec) = NULL;
244       (*outindices) = NULL;
245       (*length) = 0;
246    }
247    msgid = 8234;
248    offset = 0;
249    for ( i = 0; i < sendProcCnt; i++ )
250    {
251       leng = sendLeng[i] * sizeof( double );
252       src  = sendProc[i];
253       MH_Irecv((void*) &((*outvec)[offset]), leng, &src, &msgid, comm, &request[i]);
254       offset += sendLeng[i];
255    }
256    msgid = 8234;
257    offset = nRows;
258    for ( i = 0; i < recvProcCnt; i++ )
259    {
260       dest = recvProc[i];
261       leng = recvLeng[i] * sizeof( double );
262       MH_Send((void*) &(vec[offset]), leng, dest, msgid, comm);
263       offset += recvLeng[i];
264    }
265    offset = 0;
266    for ( i = 0; i < sendProcCnt; i++ )
267    {
268       leng = sendLeng[i] * sizeof( double );
269       src  = sendProc[i];
270       MH_Wait((void*) &((*outvec)[offset]), leng, &src, &msgid, comm, &request[i]);
271       offset += sendLeng[i];
272    }
273    if ( sendProcCnt > 0 )
274       hypre_TFree(request, HYPRE_MEMORY_HOST);
275    return 1;
276 #endif
277 }
278 
279 /****************************************************************************/
280 /* matvec function for local matrix structure MH_Matrix                     */
281 /*--------------------------------------------------------------------------*/
282 
MH_MatVec(void * obj,int leng1,double p[],int leng2,double ap[])283 int MH_MatVec(void *obj, int leng1, double p[], int leng2, double ap[])
284 {
285     MH_Context *context;
286     MH_Matrix *Amat;
287 
288     int    i, j, length, nRows, ibeg, iend, k;
289     double *dbuf, sum;
290     int    *rowptr, *colnum;
291     double *values;
292 
293     context = (MH_Context *) obj;
294     Amat    = (MH_Matrix*) context->Amat;
295     nRows = Amat->Nrows;
296     rowptr  = Amat->rowptr;
297     colnum  = Amat->colnum;
298     values  = Amat->values;
299 
300     length = nRows;
301     for ( i = 0; i < Amat->recvProcCnt; i++ ) length += Amat->recvLeng[i];
302     dbuf = hypre_TAlloc( double ,  length , HYPRE_MEMORY_HOST);
303     for ( i = 0; i < nRows; i++ ) dbuf[i] = p[i];
304     MH_ExchBdry(dbuf, obj);
305     for ( i = 0 ; i < nRows; i++ )
306     {
307        sum = 0.0;
308        ibeg = rowptr[i];
309        iend = rowptr[i+1];
310        for ( j = ibeg; j < iend; j++ )
311        {
312           k = colnum[j];
313           sum += ( values[j] * dbuf[k] );
314        }
315        ap[i] = sum;
316     }
317     hypre_TFree(dbuf, HYPRE_MEMORY_HOST);
318     return 1;
319 }
320 
321 /****************************************************************************/
322 /* getrow function for local matrix structure MH_Matrix (ML compatible)     */
323 /*--------------------------------------------------------------------------*/
324 
MH_GetRow(void * obj,int N_requested_rows,int requested_rows[],int allocated_space,int columns[],double values[],int row_lengths[])325 int MH_GetRow(void *obj, int N_requested_rows, int requested_rows[],
326    int allocated_space, int columns[], double values[], int row_lengths[])
327 {
328     int        i, j, ncnt, colindex, rowLeng, rowindex;
329     MH_Context *context = (MH_Context *) obj;
330     MH_Matrix *Amat     = (MH_Matrix*) context->Amat;
331     int    nRows        = Amat->Nrows;
332     int    *rowptr      = Amat->rowptr;
333     int    *colInd      = Amat->colnum;
334     double *colVal      = Amat->values;
335 
336     ncnt = 0;
337     for ( i = 0; i < N_requested_rows; i++ )
338     {
339        rowindex = requested_rows[i];
340        if ( rowindex < 0 || rowindex >= nRows )
341           printf("Invalid row request in GetRow : %d (%d)\n",rowindex, nRows);
342        rowLeng = rowptr[rowindex+1] - rowptr[rowindex];
343        if ( ncnt+rowLeng > allocated_space ) {row_lengths[i]=-9; return 0;}
344        row_lengths[i] = rowLeng;
345        colindex = rowptr[rowindex];
346        for ( j = 0; j < rowLeng; j++ )
347        {
348           columns[ncnt] = colInd[colindex];
349           values[ncnt++] = colVal[colindex++];
350        }
351     }
352     return 1;
353 }
354 
355 /****************************************************************************/
356 /* HYPRE_LSI_MLCreate                                                       */
357 /*--------------------------------------------------------------------------*/
358 
HYPRE_LSI_MLCreate(MPI_Comm comm,HYPRE_Solver * solver)359 int HYPRE_LSI_MLCreate( MPI_Comm comm, HYPRE_Solver *solver)
360 {
361 #ifdef HAVE_ML
362     /* create an internal ML data structure */
363 
364     MH_Link *link = hypre_TAlloc( MH_Link , 1, HYPRE_MEMORY_HOST);
365     if ( link == NULL ) return 1;
366 
367     /* fill in all other default parameters */
368 
369     link->comm          = comm;
370     link->nlevels       = 20;   /* max number of levels */
371     link->method        = 1;    /* default - smoothed aggregation */
372     link->num_PDEs      = 1;    /* default - 1 */
373     link->pre           = 1;    /* default - Gauss Seidel */
374     link->post          = 1;
375     link->pre_sweeps    = 2;    /* default - 2 smoothing steps */
376     link->post_sweeps   = 2;
377     link->BGS_blocksize = 3;
378     link->jacobi_wt     = 1.0;  /* default damping factor */
379     link->ml_ag         = NULL;
380     link->ml_amg        = NULL;
381     link->ag_threshold  = 0.08; /* threshold for aggregation */
382     link->contxt        = NULL; /* context for matvec */
383     link->coarse_solver = 0;    /* default = SuperLU */
384 
385     /* create the ML structure */
386 
387     ML_Create( &(link->ml_ptr), link->nlevels );
388 
389     *solver = (HYPRE_Solver) link;
390 
391     return 0;
392 #else
393     printf("ML not linked.\n");
394     return -1;
395 #endif
396 }
397 
398 /****************************************************************************/
399 /* HYPRE_LSI_MLDestroy                                                      */
400 /*--------------------------------------------------------------------------*/
401 
HYPRE_LSI_MLDestroy(HYPRE_Solver solver)402 int HYPRE_LSI_MLDestroy( HYPRE_Solver solver )
403 {
404 #ifdef HAVE_ML
405     int       i;
406     MH_Matrix *Amat;
407     MH_Link   *link = (MH_Link *) solver;
408 
409     if ( link->ml_ag  != NULL ) ML_Aggregate_Destroy( &(link->ml_ag) );
410     if ( link->ml_amg != NULL ) ML_AMG_Destroy( &(link->ml_amg) );
411     ML_Destroy( &(link->ml_ptr) );
412     hypre_TFree(link->contxt->partition, HYPRE_MEMORY_HOST);
413     if ( link->contxt->Amat != NULL )
414     {
415        Amat = (MH_Matrix *) link->contxt->Amat;
416        hypre_TFree(Amat->sendProc, HYPRE_MEMORY_HOST);
417        hypre_TFree(Amat->sendLeng, HYPRE_MEMORY_HOST);
418        if ( Amat->sendList != NULL )
419        {
420           for (i = 0; i < Amat->sendProcCnt; i++ )
421              hypre_TFree(Amat->sendList[i], HYPRE_MEMORY_HOST);
422           hypre_TFree(Amat->sendList, HYPRE_MEMORY_HOST);
423        }
424        hypre_TFree(Amat->recvProc, HYPRE_MEMORY_HOST);
425        hypre_TFree(Amat->recvLeng, HYPRE_MEMORY_HOST);
426        hypre_TFree(Amat->map, HYPRE_MEMORY_HOST);
427        hypre_TFree(Amat, HYPRE_MEMORY_HOST);
428     }
429     hypre_TFree(link->contxt, HYPRE_MEMORY_HOST);
430     hypre_TFree(link, HYPRE_MEMORY_HOST);
431 
432     return 0;
433 #else
434     printf("ML not linked.\n");
435     return -1;
436 #endif
437 
438 }
439 
440 /****************************************************************************/
441 /* HYPRE_LSI_MLSetup                                                        */
442 /*--------------------------------------------------------------------------*/
443 
HYPRE_LSI_MLSetup(HYPRE_Solver solver,HYPRE_ParCSRMatrix A,HYPRE_ParVector b,HYPRE_ParVector x)444 int HYPRE_LSI_MLSetup( HYPRE_Solver solver, HYPRE_ParCSRMatrix A,
445                          HYPRE_ParVector b,   HYPRE_ParVector x      )
446 {
447 #ifdef HAVE_ML
448     int        i, my_id, nprocs, coarsest_level, level, sweeps, nlevels;
449     int        *row_partition, localEqns, length;
450     int        Nblocks, *blockList;
451     double     wght;
452     MH_Context *context;
453     MH_Matrix  *mh_mat;
454 
455     /* -------------------------------------------------------- */
456     /* fetch the ML pointer                                     */
457     /* -------------------------------------------------------- */
458 
459     MH_Link *link = (MH_Link *) solver;
460     ML      *ml   = link->ml_ptr;
461     nlevels       = link->nlevels;
462 
463     /* -------------------------------------------------------- */
464     /* set up the parallel environment                          */
465     /* -------------------------------------------------------- */
466 
467     MPI_Comm_rank(link->comm, &my_id);
468     MPI_Comm_size(link->comm, &nprocs);
469 
470     /* -------------------------------------------------------- */
471     /* fetch the matrix row partition information and put it    */
472     /* into the matrix data object (for matvec and getrow)      */
473     /* -------------------------------------------------------- */
474 
475     HYPRE_ParCSRMatrixGetRowPartitioning( A, &row_partition );
476     localEqns  = row_partition[my_id+1] - row_partition[my_id];
477     context = hypre_TAlloc(MH_Context, 1, HYPRE_MEMORY_HOST);
478     link->contxt = context;
479     context->comm = link->comm;
480     context->globalEqns = row_partition[nprocs];
481     context->partition = hypre_TAlloc(int, (nprocs+1), HYPRE_MEMORY_HOST);
482     for (i=0; i<=nprocs; i++) context->partition[i] = row_partition[i];
483     hypre_TFree( row_partition , HYPRE_MEMORY_HOST);
484     mh_mat = hypre_TAlloc( MH_Matrix, 1, HYPRE_MEMORY_HOST);
485     context->Amat = mh_mat;
486     HYPRE_LSI_MLConstructMHMatrix(A,mh_mat,link->comm,
487                                   context->partition,context);
488 
489     /* -------------------------------------------------------- */
490     /* set up the ML communicator information                   */
491     /* -------------------------------------------------------- */
492 
493     ML_Set_Comm_Communicator(ml, link->comm);
494     ML_Set_Comm_MyRank(ml, my_id);
495     ML_Set_Comm_Nprocs(ml, nprocs);
496     ML_Set_Comm_Send(ml, MH_Send);
497     ML_Set_Comm_Recv(ml, MH_Irecv);
498     ML_Set_Comm_Wait(ml, MH_Wait);
499 
500     /* -------------------------------------------------------- */
501     /* set up the ML matrix information                         */
502     /* -------------------------------------------------------- */
503 
504     ML_Init_Amatrix(ml, nlevels-1, localEqns, localEqns, (void *) context);
505     ML_Set_Amatrix_Matvec(ml, nlevels-1, MH_MatVec);
506     length = localEqns;
507     for (i=0; i<mh_mat->recvProcCnt; i++ ) length += mh_mat->recvLeng[i];
508     ML_Set_Amatrix_Getrow(ml, nlevels-1, MH_GetRow, MH_ExchBdry, length);
509 
510     /* -------------------------------------------------------- */
511     /* create an AMG or aggregate context                       */
512     /* -------------------------------------------------------- */
513 
514     if ( link->method == 0 )
515     {
516        ML_AMG_Create(&(link->ml_amg));
517        ML_AMG_Set_Threshold( link->ml_amg, link->ag_threshold );
518        if ( link->num_PDEs > 1 )
519           ML_AMG_Set_AMGScheme_SystemUnknown(link->ml_amg, link->num_PDEs);
520        else
521           ML_AMG_Set_AMGScheme_Scalar(link->ml_amg);
522        ML_AMG_Set_MaxLevels( link->ml_amg, link->nlevels );
523        coarsest_level = ML_Gen_MGHierarchy_UsingAMG(ml, nlevels-1,
524                                         ML_DECREASING, link->ml_amg);
525     }
526     else
527     {
528        ML_Aggregate_Create(&(link->ml_ag));
529        ML_Aggregate_Set_MaxLevels( link->ml_ag, link->nlevels );
530        ML_Aggregate_Set_Threshold( link->ml_ag, link->ag_threshold );
531        switch (link->coarsen_scheme)
532        {
533           case 1 : ML_Aggregate_Set_CoarsenScheme_Uncoupled(link->ml_ag);
534                    break;
535           case 2 : ML_Aggregate_Set_CoarsenScheme_Coupled(link->ml_ag);
536                    break;
537           case 3 : ML_Aggregate_Set_CoarsenScheme_MIS(link->ml_ag);
538                    break;
539           case 5 : ML_Aggregate_Set_CoarsenScheme_UncoupledMIS(link->ml_ag);
540                    break;
541           case 6 : ML_Aggregate_Set_CoarsenScheme_UncoupledCoupled(link->ml_ag);
542                    break;
543           default: ML_Aggregate_Set_CoarsenScheme_Uncoupled(link->ml_ag);
544                    break;
545        }
546        coarsest_level = ML_Gen_MGHierarchy_UsingAggregation(ml, nlevels-1,
547                                         ML_DECREASING, link->ml_ag);
548     }
549 
550     /* -------------------------------------------------------- */
551     /* perform aggregation                                      */
552     /* -------------------------------------------------------- */
553 
554     if ( my_id == 0 )
555        printf("ML : number of levels = %d\n", coarsest_level);
556 
557     coarsest_level = nlevels - coarsest_level;
558 
559     /* -------------------------------------------------------- */
560     /* set up smoother and coarse solver                        */
561     /* -------------------------------------------------------- */
562 
563     for (level = nlevels-1; level > coarsest_level; level--)
564     {
565        sweeps = link->pre_sweeps;
566        wght   = link->jacobi_wt;
567        switch ( link->pre )
568        {
569           case 0 :
570              ML_Gen_Smoother_Jacobi(ml, level, ML_PRESMOOTHER, sweeps, wght);
571              break;
572           case 1 :
573              ML_Gen_Smoother_SymGaussSeidel(ml,level,ML_PRESMOOTHER,sweeps,1.0);
574              break;
575           case 2 :
576              ML_Gen_Smoother_SymGaussSeidelSequential(ml,level,ML_PRESMOOTHER,
577                                                       sweeps,1.0);
578              break;
579           case 3 :
580              if ( link->method == 1 )
581              {
582                 Nblocks = ML_Aggregate_Get_AggrCount( link->ml_ag, level );
583                 ML_Aggregate_Get_AggrMap( link->ml_ag, level, &blockList );
584                 ML_Gen_Smoother_VBlockJacobi(ml,level,ML_PRESMOOTHER,
585                                              sweeps, wght, Nblocks, blockList);
586              }
587              else
588              {
589                 ML_Gen_Smoother_SymGaussSeidel(ml,level,ML_PRESMOOTHER,
590                                                sweeps,1.0);
591              }
592              break;
593           case 4 :
594              if ( link->method == 1 )
595              {
596                 Nblocks = ML_Aggregate_Get_AggrCount( link->ml_ag, level );
597                 ML_Aggregate_Get_AggrMap( link->ml_ag, level, &blockList );
598                 ML_Gen_Smoother_VBlockSymGaussSeidel(ml,level, ML_PRESMOOTHER,
599                                              sweeps, 1.0, Nblocks, blockList);
600              }
601              else
602              {
603                 ML_Gen_Smoother_GaussSeidel(ml,level,ML_PRESMOOTHER,
604                                             sweeps,wght);
605              }
606              break;
607           case 5 :
608              if ( link->method == 1 )
609              {
610                 Nblocks = ML_Aggregate_Get_AggrCount( link->ml_ag, level );
611                 ML_Aggregate_Get_AggrMap( link->ml_ag, level, &blockList );
612                 ML_Gen_Smoother_VBlockSymGaussSeidelSequential(ml,level,
613                                ML_PRESMOOTHER,sweeps,1.0,Nblocks,blockList);
614              }
615              else
616              {
617                 ML_Gen_Smoother_GaussSeidel(ml,level,ML_PRESMOOTHER,
618                                             sweeps,wght);
619              }
620              break;
621           case 6 :
622              ML_Gen_Smoother_OverlappedDDILUT(ml,level, ML_PRESMOOTHER);
623              break;
624           case 7 :
625              ML_Gen_Smoother_VBlockAdditiveSchwarz(ml,level,ML_PRESMOOTHER,
626                                    sweeps, 0, NULL);
627              break;
628           case 8 :
629              ML_Gen_Smoother_VBlockMultiplicativeSchwarz(ml,level,
630                                    ML_PRESMOOTHER, sweeps, 0, NULL);
631              break;
632           case 9 :
633              ML_Gen_Smoother_ParaSails(ml, level, ML_PRESMOOTHER, sweeps, 0,
634                                        0.1, 1, 0.01, 0, 1);
635              break;
636           default :
637              if ( my_id == 0 )
638                 printf("ML Presmoother : set to default (SGS)\n");
639              ML_Gen_Smoother_SymGaussSeidel(ml,level,ML_PRESMOOTHER,sweeps,1.0);
640              break;
641        }
642 
643        sweeps = link->post_sweeps;
644        switch ( link->post )
645        {
646           case 0 :
647              ML_Gen_Smoother_Jacobi(ml, level, ML_POSTSMOOTHER, sweeps, wght);
648              break;
649           case 1 :
650              ML_Gen_Smoother_SymGaussSeidel(ml,level,ML_POSTSMOOTHER,
651                                             sweeps,1.0);
652              break;
653           case 2 :
654              ML_Gen_Smoother_SymGaussSeidelSequential(ml,level,ML_POSTSMOOTHER,
655                                                       sweeps,1.0);
656              break;
657           case 3 :
658              if ( link->method == 1 )
659              {
660                 Nblocks = ML_Aggregate_Get_AggrCount( link->ml_ag, level );
661                 ML_Aggregate_Get_AggrMap( link->ml_ag, level, &blockList );
662                 ML_Gen_Smoother_VBlockJacobi(ml,level,ML_POSTSMOOTHER,
663                                   sweeps, wght, Nblocks, blockList);
664              }
665              else
666              {
667                 ML_Gen_Smoother_SymGaussSeidel(ml,level,ML_POSTSMOOTHER,
668                                                sweeps,1.0);
669              }
670              break;
671           case 4 :
672              if ( link->method == 1 )
673              {
674                 Nblocks = ML_Aggregate_Get_AggrCount( link->ml_ag, level );
675                 ML_Aggregate_Get_AggrMap( link->ml_ag, level, &blockList );
676                 ML_Gen_Smoother_VBlockSymGaussSeidel(ml,level,ML_POSTSMOOTHER,
677                                          sweeps,1.0,Nblocks,blockList);
678              }
679              else
680              {
681                 ML_Gen_Smoother_SymGaussSeidel(ml,level,ML_POSTSMOOTHER,
682                                                sweeps,1.0);
683              }
684              break;
685           case 5 :
686              if ( link->method == 1 )
687              {
688                 Nblocks = ML_Aggregate_Get_AggrCount( link->ml_ag, level );
689                 ML_Aggregate_Get_AggrMap( link->ml_ag, level, &blockList );
690                 ML_Gen_Smoother_VBlockSymGaussSeidelSequential(ml,level,
691                                ML_POSTSMOOTHER,sweeps,1.0,Nblocks,blockList);
692              }
693              else
694              {
695                 ML_Gen_Smoother_SymGaussSeidel(ml,level,ML_POSTSMOOTHER,
696                                                sweeps,1.0);
697              }
698              break;
699           default :
700              if ( my_id == 0 )
701                 printf("ML Postsmoother : set to default (SGS)\n");
702              ML_Gen_Smoother_SymGaussSeidel(ml,level,ML_POSTSMOOTHER,
703                                             sweeps,1.0);
704              break;
705        }
706     }
707 
708     if ( link->coarse_solver == 0 )
709     {
710 #ifdef HAVE_SUPERLU
711        ML_Gen_CoarseSolverSuperLU(ml, coarsest_level);
712 #else
713        printf("SuperLU not compiled in : default to GS(50).\n");
714 #endif
715     }
716     else if ( link->coarse_solver == 1 )
717     {
718        ML_Gen_CoarseSolverAggregation(ml, coarsest_level, link->ml_ag);
719     }
720     else
721     {
722        ML_Gen_Smoother_GaussSeidel(ml,coarsest_level,ML_PRESMOOTHER,50,1.0);
723     }
724     ML_Gen_Solver(ml, ML_MGV, nlevels-1, coarsest_level);
725 
726     return 0;
727 #else
728     printf("ML not linked.\n");
729     return -1;
730 #endif
731 }
732 
733 /****************************************************************************/
734 /* HYPRE_LSI_MLSolve                                                        */
735 /*--------------------------------------------------------------------------*/
736 
HYPRE_LSI_MLSolve(HYPRE_Solver solver,HYPRE_ParCSRMatrix A,HYPRE_ParVector b,HYPRE_ParVector x)737 int HYPRE_LSI_MLSolve( HYPRE_Solver solver, HYPRE_ParCSRMatrix A,
738                        HYPRE_ParVector b, HYPRE_ParVector x )
739 {
740 #ifdef HAVE_ML
741     double  *rhs, *sol;
742     MH_Link *link = (MH_Link *) solver;
743     ML      *ml = link->ml_ptr;
744     int     leng, level = ml->ML_num_levels - 1;
745     ML_Operator *Amat = &(ml->Amat[level]);
746     ML_Krylov *ml_kry;
747 
748     rhs = hypre_VectorData(hypre_ParVectorLocalVector((hypre_ParVector *) b));
749     sol = hypre_VectorData(hypre_ParVectorLocalVector((hypre_ParVector *) x));
750 
751     /*
752     ml_kry = ML_Krylov_Create(ml->comm);
753     ML_Krylov_Set_Method(ml_kry, 1);
754     ML_Krylov_Set_Amatrix(ml_kry, Amat);
755     ML_Krylov_Set_Precon(ml_kry, ml);
756     ML_Krylov_Set_PreconFunc(ml_kry, ML_AMGVSolve_Wrapper);
757     leng = Amat->outvec_leng;
758     ML_Krylov_Solve(ml_kry, leng, rhs, sol);
759     ML_Krylov_Destroy(&ml_kry);
760     */
761 
762     ML_Solve_AMGV(ml, rhs, sol);
763     /*ML_Iterate(ml, sol, rhs);*/
764 
765     return 0;
766 #else
767     printf("ML not linked.\n");
768     return -1;
769 #endif
770 }
771 
772 /****************************************************************************/
773 /* HYPRE_LSI_MLSetStrongThreshold                                           */
774 /*--------------------------------------------------------------------------*/
775 
HYPRE_LSI_MLSetStrongThreshold(HYPRE_Solver solver,double strong_threshold)776 int HYPRE_LSI_MLSetStrongThreshold(HYPRE_Solver solver,double strong_threshold)
777 {
778     MH_Link *link = (MH_Link *) solver;
779 
780     if ( strong_threshold < 0.0 )
781     {
782        printf("HYPRE_LSI_MLSetStrongThreshold WARNING : reset to 0.\n");
783        link->ag_threshold = 0.0;
784     }
785     else
786     {
787        link->ag_threshold = strong_threshold;
788     }
789     return( 0 );
790 }
791 
792 /****************************************************************************/
793 /* HYPRE_LSI_MLSetMethod                                                    */
794 /*--------------------------------------------------------------------------*/
795 
HYPRE_LSI_MLSetMethod(HYPRE_Solver solver,int method)796 int HYPRE_LSI_MLSetMethod( HYPRE_Solver solver, int method )
797 {
798     MH_Link *link = (MH_Link *) solver;
799 
800     if ( method == 1 ) link->method = 1;  /* smoothed aggregation */
801     else               link->method = 0;  /* AMG */
802     return( 0 );
803 }
804 
805 /****************************************************************************/
806 /* HYPRE_LSI_MLSetNumPDEs                                                   */
807 /*--------------------------------------------------------------------------*/
808 
HYPRE_LSI_MLSetNumPDEs(HYPRE_Solver solver,int numPDE)809 int HYPRE_LSI_MLSetNumPDEs( HYPRE_Solver solver, int numPDE )
810 {
811     MH_Link *link = (MH_Link *) solver;
812 
813     if ( numPDE > 1 ) link->num_PDEs = numPDE;
814     else              link->num_PDEs = 1;
815     return( 0 );
816 }
817 
818 /****************************************************************************/
819 /* HYPRE_LSI_MLSetNumPreSmoothings                                          */
820 /*--------------------------------------------------------------------------*/
821 
HYPRE_LSI_MLSetNumPreSmoothings(HYPRE_Solver solver,int num_sweeps)822 int HYPRE_LSI_MLSetNumPreSmoothings( HYPRE_Solver solver, int num_sweeps  )
823 {
824     MH_Link *link = (MH_Link *) solver;
825 
826     if ( num_sweeps < 0 )
827     {
828        printf("HYPRE_LSI_MLSetNumPreSmoothings WARNING : reset to 0.\n");
829        link->pre_sweeps = 0;
830     }
831     else
832     {
833        link->pre_sweeps = num_sweeps;
834     }
835     return( 0 );
836 }
837 
838 /****************************************************************************/
839 /* HYPRE_LSI_MLSetNumPostSmoothings                                         */
840 /*--------------------------------------------------------------------------*/
841 
HYPRE_LSI_MLSetNumPostSmoothings(HYPRE_Solver solver,int num_sweeps)842 int HYPRE_LSI_MLSetNumPostSmoothings( HYPRE_Solver solver, int num_sweeps  )
843 {
844     MH_Link *link = (MH_Link *) solver;
845 
846     if ( num_sweeps < 0 )
847     {
848        printf("HYPRE_LSI_MLSetNumPostSmoothings WARNING : reset to 0.\n");
849        link->post_sweeps = 0;
850     }
851     else
852     {
853        link->post_sweeps = num_sweeps;
854     }
855     return( 0 );
856 }
857 
858 /****************************************************************************/
859 /* HYPRE_LSI_MLSetPreSmoother                                               */
860 /*--------------------------------------------------------------------------*/
861 
HYPRE_LSI_MLSetPreSmoother(HYPRE_Solver solver,int smoother_type)862 int HYPRE_LSI_MLSetPreSmoother( HYPRE_Solver solver, int smoother_type  )
863 {
864     MH_Link *link = (MH_Link *) solver;
865 
866     if ( smoother_type < 0 || smoother_type > 6 )
867     {
868        printf("HYPRE_LSI_MLSetPreSmoother WARNING : set to Jacobi.\n");
869        link->pre = 0;
870     }
871     else
872     {
873        link->pre = smoother_type;
874     }
875     return( 0 );
876 }
877 
878 /****************************************************************************/
879 /* HYPRE_LSI_MLSetPostSmoother                                              */
880 /*--------------------------------------------------------------------------*/
881 
HYPRE_LSI_MLSetPostSmoother(HYPRE_Solver solver,int smoother_type)882 int HYPRE_LSI_MLSetPostSmoother( HYPRE_Solver solver, int smoother_type  )
883 {
884     MH_Link *link = (MH_Link *) solver;
885 
886     if ( smoother_type < 0 || smoother_type > 6 )
887     {
888        printf("HYPRE_LSI_MLSetPostSmoother WARNING : set to Jacobi.\n");
889        link->post = 0;
890     }
891     else
892     {
893        link->post = smoother_type;
894     }
895     return( 0 );
896 }
897 
898 /****************************************************************************/
899 /* HYPRE_LSI_MLSetDampingFactor                                             */
900 /*--------------------------------------------------------------------------*/
901 
HYPRE_LSI_MLSetDampingFactor(HYPRE_Solver solver,double factor)902 int HYPRE_LSI_MLSetDampingFactor( HYPRE_Solver solver, double factor  )
903 {
904     MH_Link *link = (MH_Link *) solver;
905 
906     if ( factor < 0.0 || factor > 1.0 )
907     {
908        printf("HYPRE_LSI_MLSetDampingFactor WARNING : set to 0.5.\n");
909        link->jacobi_wt = 0.5;
910     }
911     else
912     {
913        link->jacobi_wt = factor;
914     }
915     return( 0 );
916 }
917 
918 /****************************************************************************/
919 /* HYPRE_LSI_MLSetCoarseSolver                                              */
920 /*--------------------------------------------------------------------------*/
921 
HYPRE_LSI_MLSetCoarseSolver(HYPRE_Solver solver,int solver_id)922 int HYPRE_LSI_MLSetCoarseSolver( HYPRE_Solver solver, int solver_id  )
923 {
924     MH_Link *link = (MH_Link *) solver;
925 
926     if ( solver_id < 0 || solver_id > 2 )
927     {
928        printf("HYPRE_LSI_MLSetCoarseSolver WARNING : reset to Aggr\n");
929        link->coarse_solver = 1;
930     }
931     else
932     {
933        link->coarse_solver = solver_id;
934     }
935     return( 0 );
936 }
937 
938 /****************************************************************************/
939 /* HYPRE_LSI_MLSetCoarsenScheme                                             */
940 /*--------------------------------------------------------------------------*/
941 
HYPRE_LSI_MLSetCoarsenScheme(HYPRE_Solver solver,int scheme)942 int HYPRE_LSI_MLSetCoarsenScheme( HYPRE_Solver solver, int scheme  )
943 {
944     MH_Link *link = (MH_Link *) solver;
945 
946     if ( scheme < 1 || scheme > 6 )
947     {
948        printf("HYPRE_LSI_MLSetCoarsenScheme WARNING : reset to uncoupled\n");
949        link->coarsen_scheme = 1;
950     }
951     else
952     {
953        link->coarsen_scheme = scheme;
954     }
955     return( 0 );
956 }
957 
958 /****************************************************************************/
959 /* HYPRE_LSI_MLSetBGSBlockSize                                              */
960 /*--------------------------------------------------------------------------*/
961 
HYPRE_LSI_MLSetBGSBlockSize(HYPRE_Solver solver,int size)962 int HYPRE_LSI_MLSetBGSBlockSize( HYPRE_Solver solver, int size  )
963 {
964     MH_Link *link = (MH_Link *) solver;
965 
966     if ( size < 0 )
967     {
968        printf("HYPRE_LSI_MLSetBGSBlockSize WARNING : reset to 1.\n");
969        link->BGS_blocksize = 1;
970     }
971     else
972     {
973        link->BGS_blocksize = size;
974     }
975     return( 0 );
976 }
977 
978 /****************************************************************************/
979 /* HYPRE_LSI_MLConstructMHMatrix                                            */
980 /*--------------------------------------------------------------------------*/
981 
HYPRE_LSI_MLConstructMHMatrix(HYPRE_ParCSRMatrix A,MH_Matrix * mh_mat,MPI_Comm comm,int * partition,MH_Context * obj)982 int HYPRE_LSI_MLConstructMHMatrix(HYPRE_ParCSRMatrix A, MH_Matrix *mh_mat,
983                              MPI_Comm comm, int *partition,MH_Context *obj)
984 {
985     int         i, j, index, my_id, nprocs;
986     int         rowLeng, *colInd, startRow, endRow, localEqns;
987     int         *diagSize, *offdiagSize, externLeng, *externList, ncnt, nnz;
988     int         *rowptr, *columns, num_bdry;
989     double      *colVal, *values;
990 #ifndef HYPRE_SEQUENTIAL
991     int         sendProcCnt, *sendLeng, *sendProc, **sendList;
992     int         recvProcCnt, *recvLeng, *recvProc, *tempCnt, msgid;
993     MPI_Request *Request;
994     MPI_Status  status;
995 #endif
996 
997     /* -------------------------------------------------------- */
998     /* get machine information and local matrix information     */
999     /* -------------------------------------------------------- */
1000 
1001 #ifdef HYPRE_SEQUENTIAL
1002     my_id = 0;
1003     nprocs = 1;
1004 #else
1005     MPI_Comm_rank(comm, &my_id);
1006     MPI_Comm_size(comm, &nprocs);
1007 #endif
1008 
1009     startRow  = partition[my_id];
1010     endRow    = partition[my_id+1] - 1;
1011     localEqns = endRow - startRow + 1;
1012 
1013     /* -------------------------------------------------------- */
1014     /* probe A to find out about diagonal and off-diagonal      */
1015     /* block information                                        */
1016     /* -------------------------------------------------------- */
1017 
1018     diagSize    = hypre_TAlloc(int,  localEqns , HYPRE_MEMORY_HOST);
1019     offdiagSize = hypre_TAlloc(int,  localEqns , HYPRE_MEMORY_HOST);
1020     num_bdry = 0;
1021     for ( i = startRow; i <= endRow; i++ )
1022     {
1023        diagSize[i-startRow] = offdiagSize[i-startRow] = 0;
1024        HYPRE_ParCSRMatrixGetRow(A, i, &rowLeng, &colInd, &colVal);
1025        for (j = 0; j < rowLeng; j++)
1026           if ( colInd[j] < startRow || colInd[j] > endRow )
1027           {
1028              if ( colVal[j] != 0.0 ) offdiagSize[i-startRow]++;
1029              /*offdiagSize[i-startRow]++;*/
1030           }
1031           else
1032           {
1033              if ( colVal[j] != 0.0 ) diagSize[i-startRow]++;
1034              /*diagSize[i-startRow]++;*/
1035           }
1036        HYPRE_ParCSRMatrixRestoreRow(A, i, &rowLeng, &colInd, &colVal);
1037        if ( diagSize[i-startRow] + offdiagSize[i-startRow] == 1 ) num_bdry++;
1038     }
1039 
1040     /* -------------------------------------------------------- */
1041     /* construct external node list in global eqn numbers       */
1042     /* -------------------------------------------------------- */
1043 
1044     externLeng = 0;
1045     for ( i = 0; i < localEqns; i++ ) externLeng += offdiagSize[i];
1046     if ( externLeng > 0 )
1047          externList = hypre_TAlloc(int,  externLeng, HYPRE_MEMORY_HOST);
1048     else externList = NULL;
1049     externLeng = 0;
1050     for ( i = startRow; i <= endRow; i++ )
1051     {
1052        HYPRE_ParCSRMatrixGetRow(A, i, &rowLeng, &colInd, &colVal);
1053        for (j = 0; j < rowLeng; j++)
1054        {
1055           if ( colInd[j] < startRow || colInd[j] > endRow )
1056              if ( colVal[j] != 0.0 ) externList[externLeng++] = colInd[j];
1057 /*
1058              externList[externLeng++] = colInd[j];
1059 */
1060        }
1061        HYPRE_ParCSRMatrixRestoreRow(A, i, &rowLeng, &colInd, &colVal);
1062     }
1063     if ( externLeng > 1 ) hypre_qsort0( externList, 0, externLeng-1 );
1064     ncnt = 0;
1065     for ( i = 1; i < externLeng; i++ )
1066     {
1067        if ( externList[i] != externList[ncnt] )
1068           externList[++ncnt] = externList[i];
1069     }
1070     if ( externLeng > 0 ) externLeng = ncnt + 1;
1071 
1072     /* -------------------------------------------------------- */
1073     /* allocate the CSR matrix                                  */
1074     /* -------------------------------------------------------- */
1075 
1076     nnz = 0;
1077     for ( i = 0; i < localEqns; i++ ) nnz += diagSize[i] + offdiagSize[i];
1078     rowptr  = hypre_TAlloc(int,  (localEqns + 1) , HYPRE_MEMORY_HOST);
1079     columns = hypre_TAlloc(int,  nnz , HYPRE_MEMORY_HOST);
1080     values  = hypre_TAlloc(double,  nnz , HYPRE_MEMORY_HOST);
1081     rowptr[0] = 0;
1082     for ( i = 1; i <= localEqns; i++ )
1083        rowptr[i] = rowptr[i-1] + diagSize[i-1] + offdiagSize[i-1];
1084     hypre_TFree(diagSize, HYPRE_MEMORY_HOST);
1085     hypre_TFree(offdiagSize, HYPRE_MEMORY_HOST);
1086 
1087     /* -------------------------------------------------------- */
1088     /* put the matrix data in the CSR matrix                    */
1089     /* -------------------------------------------------------- */
1090 
1091     rowptr[0] = 0;
1092     ncnt      = 0;
1093     for ( i = startRow; i <= endRow; i++ )
1094     {
1095        HYPRE_ParCSRMatrixGetRow(A, i, &rowLeng, &colInd, &colVal);
1096        for (j = 0; j < rowLeng; j++)
1097        {
1098           index = colInd[j];
1099           if ( colVal[j] != 0.0 )
1100           {
1101              if ( index < startRow || index > endRow )
1102              {
1103                 columns[ncnt] = hypre_BinarySearch(externList,index,
1104                                                    externLeng );
1105                 columns[ncnt] += localEqns;
1106                 values [ncnt++] = colVal[j];
1107              }
1108              else
1109              {
1110                 columns[ncnt] = index - startRow;
1111                 values[ncnt++] = colVal[j];
1112              }
1113           }
1114        }
1115        rowptr[i-startRow+1] = ncnt;
1116        HYPRE_ParCSRMatrixRestoreRow(A, i, &rowLeng, &colInd, &colVal);
1117     }
1118     hypre_assert( ncnt == nnz );
1119 
1120     /* -------------------------------------------------------- */
1121     /* initialize the MH_Matrix data structure                  */
1122     /* -------------------------------------------------------- */
1123 
1124     mh_mat->Nrows       = localEqns;
1125     mh_mat->rowptr      = rowptr;
1126     mh_mat->colnum      = columns;
1127     mh_mat->values      = values;
1128     mh_mat->sendProcCnt = 0;
1129     mh_mat->recvProcCnt = 0;
1130     mh_mat->sendLeng    = NULL;
1131     mh_mat->recvLeng    = NULL;
1132     mh_mat->sendProc    = NULL;
1133     mh_mat->recvProc    = NULL;
1134     mh_mat->sendList    = NULL;
1135     mh_mat->map         = externList;
1136 
1137     /* -------------------------------------------------------- */
1138     /* form the remote portion of the matrix                    */
1139     /* -------------------------------------------------------- */
1140 
1141 #ifndef HYPRE_SEQUENTIAL
1142     if ( nprocs > 1 )
1143     {
1144        /* ----------------------------------------------------- */
1145        /* count number of elements to be received from each     */
1146        /* remote processor (assume sequential mapping)          */
1147        /* ----------------------------------------------------- */
1148 
1149        tempCnt = hypre_TAlloc(int,  nprocs , HYPRE_MEMORY_HOST);
1150        for ( i = 0; i < nprocs; i++ ) tempCnt[i] = 0;
1151        for ( i = 0; i < externLeng; i++ )
1152        {
1153           for ( j = 0; j < nprocs; j++ )
1154           {
1155              if ( externList[i] >= partition[j] &&
1156                   externList[i] < partition[j+1] )
1157              {
1158                 tempCnt[j]++;
1159                 break;
1160              }
1161           }
1162        }
1163 
1164        /* ----------------------------------------------------- */
1165        /* compile a list processors data is to be received from */
1166        /* ----------------------------------------------------- */
1167 
1168        recvProcCnt = 0;
1169        for ( i = 0; i < nprocs; i++ )
1170           if ( tempCnt[i] > 0 ) recvProcCnt++;
1171        recvLeng = hypre_TAlloc(int,  recvProcCnt , HYPRE_MEMORY_HOST);
1172        recvProc = hypre_TAlloc(int,  recvProcCnt , HYPRE_MEMORY_HOST);
1173        recvProcCnt = 0;
1174        for ( i = 0; i < nprocs; i++ )
1175        {
1176           if ( tempCnt[i] > 0 )
1177           {
1178              recvProc[recvProcCnt]   = i;
1179              recvLeng[recvProcCnt++] = tempCnt[i];
1180           }
1181        }
1182 
1183        /* ----------------------------------------------------- */
1184        /* each processor has to find out how many processors it */
1185        /* has to send data to                                   */
1186        /* ----------------------------------------------------- */
1187 
1188        sendLeng = hypre_TAlloc(int,  nprocs , HYPRE_MEMORY_HOST);
1189        for ( i = 0; i < nprocs; i++ ) tempCnt[i] = 0;
1190        for ( i = 0; i < recvProcCnt; i++ ) tempCnt[recvProc[i]] = 1;
1191        MPI_Allreduce(tempCnt, sendLeng, nprocs, MPI_INT, MPI_SUM, comm );
1192        sendProcCnt = sendLeng[my_id];
1193        hypre_TFree(sendLeng, HYPRE_MEMORY_HOST);
1194        if ( sendProcCnt > 0 )
1195        {
1196           sendLeng = hypre_TAlloc(int,  sendProcCnt , HYPRE_MEMORY_HOST);
1197           sendProc = hypre_TAlloc(int,  sendProcCnt , HYPRE_MEMORY_HOST);
1198           sendList = hypre_TAlloc(int*,  sendProcCnt , HYPRE_MEMORY_HOST);
1199        }
1200        else
1201        {
1202           sendLeng = sendProc = NULL;
1203           sendList = NULL;
1204        }
1205 
1206        /* ----------------------------------------------------- */
1207        /* each processor sends to all processors it expects to  */
1208        /* receive data about the lengths of data expected       */
1209        /* ----------------------------------------------------- */
1210 
1211        msgid = 539;
1212        for ( i = 0; i < recvProcCnt; i++ )
1213        {
1214           MPI_Send((void*) &recvLeng[i],1,MPI_INT,recvProc[i],msgid,comm);
1215        }
1216        for ( i = 0; i < sendProcCnt; i++ )
1217        {
1218           MPI_Recv((void*) &sendLeng[i],1,MPI_INT,MPI_ANY_SOURCE,msgid,
1219                    comm,&status);
1220           sendProc[i] = status.MPI_SOURCE;
1221           sendList[i] = hypre_TAlloc(int,  sendLeng[i] , HYPRE_MEMORY_HOST);
1222           if ( sendList[i] == NULL )
1223              printf("allocate problem %d \n", sendLeng[i]);
1224        }
1225 
1226        /* ----------------------------------------------------- */
1227        /* each processor sends to all processors it expects to  */
1228        /* receive data about the equation numbers               */
1229        /* ----------------------------------------------------- */
1230 
1231        for ( i = 0; i < nprocs; i++ ) tempCnt[i] = 0;
1232        ncnt = 1;
1233        for ( i = 0; i < externLeng; i++ )
1234        {
1235           if ( externList[i] >= partition[ncnt] )
1236           {
1237              tempCnt[ncnt-1] = i;
1238              i--;
1239              ncnt++;
1240           }
1241        }
1242        for ( i = ncnt-1; i < nprocs; i++ ) tempCnt[i] = externLeng;
1243 
1244        /* ----------------------------------------------------- */
1245        /* send the global equation numbers                      */
1246        /* ----------------------------------------------------- */
1247 
1248        if ( sendProcCnt > 0 )
1249           Request = hypre_TAlloc(MPI_Request, sendProcCnt , HYPRE_MEMORY_HOST);
1250 
1251        msgid = 540;
1252        for ( i = 0; i < sendProcCnt; i++ )
1253        {
1254           MPI_Irecv((void*)sendList[i],sendLeng[i],MPI_INT,sendProc[i],
1255                     msgid,comm,&Request[i]);
1256        }
1257        for ( i = 0; i < recvProcCnt; i++ )
1258        {
1259           if ( recvProc[i] == 0 ) j = 0;
1260           else                    j = tempCnt[recvProc[i]-1];
1261           rowLeng = recvLeng[i];
1262           MPI_Send((void*) &externList[j], rowLeng, MPI_INT, recvProc[i],
1263                    msgid, comm);
1264        }
1265        for ( i = 0; i < sendProcCnt; i++ )
1266        {
1267           MPI_Wait( &Request[i], &status );
1268        }
1269        if ( sendProcCnt > 0 )
1270           hypre_TFree(Request, HYPRE_MEMORY_HOST);
1271 
1272        /* ----------------------------------------------------- */
1273        /* convert the send list from global to local numbers    */
1274        /* ----------------------------------------------------- */
1275 
1276        for ( i = 0; i < sendProcCnt; i++ )
1277        {
1278           for ( j = 0; j < sendLeng[i]; j++ )
1279           {
1280              index = sendList[i][j] - startRow;
1281              if ( index < 0 || index >= localEqns )
1282              {
1283                 printf("%d : Construct MH matrix Error - index out ",my_id);
1284                 printf("of range %d\n", index);
1285              }
1286              sendList[i][j] = index;
1287           }
1288        }
1289 
1290        /* ----------------------------------------------------- */
1291        /* convert the send list from global to local numbers    */
1292        /* ----------------------------------------------------- */
1293 
1294        mh_mat->sendProcCnt = sendProcCnt;
1295        mh_mat->recvProcCnt = recvProcCnt;
1296        mh_mat->sendLeng    = sendLeng;
1297        mh_mat->recvLeng    = recvLeng;
1298        mh_mat->sendProc    = sendProc;
1299        mh_mat->recvProc    = recvProc;
1300        mh_mat->sendList    = sendList;
1301 
1302        /* ----------------------------------------------------- */
1303        /* clean up                                              */
1304        /* ----------------------------------------------------- */
1305 
1306        hypre_TFree(tempCnt, HYPRE_MEMORY_HOST);
1307     }
1308     return 0;
1309 #else
1310     nprocs = 1;
1311     return (nprocs-1);
1312 #endif
1313 }
1314 
1315