1      SUBROUTINE PSPTTRF( N, D, E, JA, DESCA, AF, LAF, WORK, LWORK,
2     $                    INFO )
3*
4*  -- ScaLAPACK routine (version 1.7) --
5*     University of Tennessee, Knoxville, Oak Ridge National Laboratory,
6*     and University of California, Berkeley.
7*     April 3, 2000
8*
9*     .. Scalar Arguments ..
10      INTEGER            INFO, JA, LAF, LWORK, N
11*     ..
12*     .. Array Arguments ..
13      INTEGER            DESCA( * )
14      REAL               AF( * ), D( * ), E( * ), WORK( * )
15*     ..
16*
17*
18*  Purpose
19*  =======
20*
21*  PSPTTRF computes a Cholesky factorization
22*  of an N-by-N real tridiagonal
23*  symmetric positive definite distributed matrix
24*  A(1:N, JA:JA+N-1).
25*  Reordering is used to increase parallelism in the factorization.
26*  This reordering results in factors that are DIFFERENT from those
27*  produced by equivalent sequential codes. These factors cannot
28*  be used directly by users; however, they can be used in
29*  subsequent calls to PSPTTRS to solve linear systems.
30*
31*  The factorization has the form
32*
33*          P A(1:N, JA:JA+N-1) P^T = U' D U  or
34*
35*          P A(1:N, JA:JA+N-1) P^T = L D L',
36*
37*  where U is a tridiagonal upper triangular matrix and L is tridiagonal
38*  lower triangular, and P is a permutation matrix.
39*
40*  =====================================================================
41*
42*  Arguments
43*  =========
44*
45*
46*  N       (global input) INTEGER
47*          The number of rows and columns to be operated on, i.e. the
48*          order of the distributed submatrix A(1:N, JA:JA+N-1). N >= 0.
49*
50*  D       (local input/local output) REAL pointer to local
51*          part of global vector storing the main diagonal of the
52*          matrix.
53*          On exit, this array contains information containing the
54*            factors of the matrix.
55*          Must be of size >= DESCA( NB_ ).
56*
57*  E       (local input/local output) REAL pointer to local
58*          part of global vector storing the upper diagonal of the
59*          matrix. Globally, DU(n) is not referenced, and DU must be
60*          aligned with D.
61*          On exit, this array contains information containing the
62*            factors of the matrix.
63*          Must be of size >= DESCA( NB_ ).
64*
65*  JA      (global input) INTEGER
66*          The index in the global array A that points to the start of
67*          the matrix to be operated on (which may be either all of A
68*          or a submatrix of A).
69*
70*  DESCA   (global and local input) INTEGER array of dimension DLEN.
71*          if 1D type (DTYPE_A=501 or 502), DLEN >= 7;
72*          if 2D type (DTYPE_A=1), DLEN >= 9.
73*          The array descriptor for the distributed matrix A.
74*          Contains information of mapping of A to memory. Please
75*          see NOTES below for full description and options.
76*
77*  AF      (local output) REAL array, dimension LAF.
78*          Auxiliary Fillin Space.
79*          Fillin is created during the factorization routine
80*          PSPTTRF and this is stored in AF. If a linear system
81*          is to be solved using PSPTTRS after the factorization
82*          routine, AF *must not be altered* after the factorization.
83*
84*  LAF     (local input) INTEGER
85*          Size of user-input Auxiliary Fillin space AF. Must be >=
86*          (NB+2)
87*          If LAF is not large enough, an error code will be returned
88*          and the minimum acceptable size will be returned in AF( 1 )
89*
90*  WORK    (local workspace/local output)
91*          REAL temporary workspace. This space may
92*          be overwritten in between calls to routines. WORK must be
93*          the size given in LWORK.
94*          On exit, WORK( 1 ) contains the minimal LWORK.
95*
96*  LWORK   (local input or global input) INTEGER
97*          Size of user-input workspace WORK.
98*          If LWORK is too small, the minimal acceptable size will be
99*          returned in WORK(1) and an error code is returned. LWORK>=
100*          8*NPCOL
101*
102*  INFO    (local output) INTEGER
103*          = 0:  successful exit
104*          < 0:  If the i-th argument is an array and the j-entry had
105*                an illegal value, then INFO = -(i*100+j), if the i-th
106*                argument is a scalar and had an illegal value, then
107*                INFO = -i.
108*          > 0:  If INFO = K<=NPROCS, the submatrix stored on processor
109*                INFO and factored locally was not
110*                positive definite,  and
111*                the factorization was not completed.
112*                If INFO = K>NPROCS, the submatrix stored on processor
113*                INFO-NPROCS representing interactions with other
114*                processors was not
115*                positive definite,
116*                and the factorization was not completed.
117*
118*  =====================================================================
119*
120*
121*  Restrictions
122*  ============
123*
124*  The following are restrictions on the input parameters. Some of these
125*    are temporary and will be removed in future releases, while others
126*    may reflect fundamental technical limitations.
127*
128*    Non-cyclic restriction: VERY IMPORTANT!
129*      P*NB>= mod(JA-1,NB)+N.
130*      The mapping for matrices must be blocked, reflecting the nature
131*      of the divide and conquer algorithm as a task-parallel algorithm.
132*      This formula in words is: no processor may have more than one
133*      chunk of the matrix.
134*
135*    Blocksize cannot be too small:
136*      If the matrix spans more than one processor, the following
137*      restriction on NB, the size of each block on each processor,
138*      must hold:
139*      NB >= 2
140*      The bulk of parallel computation is done on the matrix of size
141*      O(NB) on each processor. If this is too small, divide and conquer
142*      is a poor choice of algorithm.
143*
144*    Submatrix reference:
145*      JA = IB
146*      Alignment restriction that prevents unnecessary communication.
147*
148*
149*  =====================================================================
150*
151*
152*  Notes
153*  =====
154*
155*  If the factorization routine and the solve routine are to be called
156*    separately (to solve various sets of righthand sides using the same
157*    coefficient matrix), the auxiliary space AF *must not be altered*
158*    between calls to the factorization routine and the solve routine.
159*
160*  The best algorithm for solving banded and tridiagonal linear systems
161*    depends on a variety of parameters, especially the bandwidth.
162*    Currently, only algorithms designed for the case N/P >> bw are
163*    implemented. These go by many names, including Divide and Conquer,
164*    Partitioning, domain decomposition-type, etc.
165*    For tridiagonal matrices, it is obvious: N/P >> bw(=1), and so D&C
166*    algorithms are the appropriate choice.
167*
168*  Algorithm description: Divide and Conquer
169*
170*    The Divide and Conqer algorithm assumes the matrix is narrowly
171*      banded compared with the number of equations. In this situation,
172*      it is best to distribute the input matrix A one-dimensionally,
173*      with columns atomic and rows divided amongst the processes.
174*      The basic algorithm divides the tridiagonal matrix up into
175*      P pieces with one stored on each processor,
176*      and then proceeds in 2 phases for the factorization or 3 for the
177*      solution of a linear system.
178*      1) Local Phase:
179*         The individual pieces are factored independently and in
180*         parallel. These factors are applied to the matrix creating
181*         fillin, which is stored in a non-inspectable way in auxiliary
182*         space AF. Mathematically, this is equivalent to reordering
183*         the matrix A as P A P^T and then factoring the principal
184*         leading submatrix of size equal to the sum of the sizes of
185*         the matrices factored on each processor. The factors of
186*         these submatrices overwrite the corresponding parts of A
187*         in memory.
188*      2) Reduced System Phase:
189*         A small ((P-1)) system is formed representing
190*         interaction of the larger blocks, and is stored (as are its
191*         factors) in the space AF. A parallel Block Cyclic Reduction
192*         algorithm is used. For a linear system, a parallel front solve
193*         followed by an analagous backsolve, both using the structure
194*         of the factored matrix, are performed.
195*      3) Backsubsitution Phase:
196*         For a linear system, a local backsubstitution is performed on
197*         each processor in parallel.
198*
199*
200*  Descriptors
201*  ===========
202*
203*  Descriptors now have *types* and differ from ScaLAPACK 1.0.
204*
205*  Note: tridiagonal codes can use either the old two dimensional
206*    or new one-dimensional descriptors, though the processor grid in
207*    both cases *must be one-dimensional*. We describe both types below.
208*
209*  Each global data object is described by an associated description
210*  vector.  This vector stores the information required to establish
211*  the mapping between an object element and its corresponding process
212*  and memory location.
213*
214*  Let A be a generic term for any 2D block cyclicly distributed array.
215*  Such a global array has an associated description vector DESCA.
216*  In the following comments, the character _ should be read as
217*  "of the global array".
218*
219*  NOTATION        STORED IN      EXPLANATION
220*  --------------- -------------- --------------------------------------
221*  DTYPE_A(global) DESCA( DTYPE_ )The descriptor type.  In this case,
222*                                 DTYPE_A = 1.
223*  CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
224*                                 the BLACS process grid A is distribu-
225*                                 ted over. The context itself is glo-
226*                                 bal, but the handle (the integer
227*                                 value) may vary.
228*  M_A    (global) DESCA( M_ )    The number of rows in the global
229*                                 array A.
230*  N_A    (global) DESCA( N_ )    The number of columns in the global
231*                                 array A.
232*  MB_A   (global) DESCA( MB_ )   The blocking factor used to distribute
233*                                 the rows of the array.
234*  NB_A   (global) DESCA( NB_ )   The blocking factor used to distribute
235*                                 the columns of the array.
236*  RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
237*                                 row of the array A is distributed.
238*  CSRC_A (global) DESCA( CSRC_ ) The process column over which the
239*                                 first column of the array A is
240*                                 distributed.
241*  LLD_A  (local)  DESCA( LLD_ )  The leading dimension of the local
242*                                 array.  LLD_A >= MAX(1,LOCr(M_A)).
243*
244*  Let K be the number of rows or columns of a distributed matrix,
245*  and assume that its process grid has dimension p x q.
246*  LOCr( K ) denotes the number of elements of K that a process
247*  would receive if K were distributed over the p processes of its
248*  process column.
249*  Similarly, LOCc( K ) denotes the number of elements of K that a
250*  process would receive if K were distributed over the q processes of
251*  its process row.
252*  The values of LOCr() and LOCc() may be determined via a call to the
253*  ScaLAPACK tool function, NUMROC:
254*          LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
255*          LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
256*  An upper bound for these quantities may be computed by:
257*          LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
258*          LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
259*
260*
261*  One-dimensional descriptors:
262*
263*  One-dimensional descriptors are a new addition to ScaLAPACK since
264*    version 1.0. They simplify and shorten the descriptor for 1D
265*    arrays.
266*
267*  Since ScaLAPACK supports two-dimensional arrays as the fundamental
268*    object, we allow 1D arrays to be distributed either over the
269*    first dimension of the array (as if the grid were P-by-1) or the
270*    2nd dimension (as if the grid were 1-by-P). This choice is
271*    indicated by the descriptor type (501 or 502)
272*    as described below.
273*    However, for tridiagonal matrices, since the objects being
274*    distributed are the individual vectors storing the diagonals, we
275*    have adopted the convention that both the P-by-1 descriptor and
276*    the 1-by-P descriptor are allowed and are equivalent for
277*    tridiagonal matrices. Thus, for tridiagonal matrices,
278*    DTYPE_A = 501 or 502 can be used interchangeably
279*    without any other change.
280*  We require that the distributed vectors storing the diagonals of a
281*    tridiagonal matrix be aligned with each other. Because of this, a
282*    single descriptor, DESCA, serves to describe the distribution of
283*    of all diagonals simultaneously.
284*
285*    IMPORTANT NOTE: the actual BLACS grid represented by the
286*    CTXT entry in the descriptor may be *either*  P-by-1 or 1-by-P
287*    irrespective of which one-dimensional descriptor type
288*    (501 or 502) is input.
289*    This routine will interpret the grid properly either way.
290*    ScaLAPACK routines *do not support intercontext operations* so that
291*    the grid passed to a single ScaLAPACK routine *must be the same*
292*    for all array descriptors passed to that routine.
293*
294*    NOTE: In all cases where 1D descriptors are used, 2D descriptors
295*    may also be used, since a one-dimensional array is a special case
296*    of a two-dimensional array with one dimension of size unity.
297*    The two-dimensional array used in this case *must* be of the
298*    proper orientation:
299*      If the appropriate one-dimensional descriptor is DTYPEA=501
300*      (1 by P type), then the two dimensional descriptor must
301*      have a CTXT value that refers to a 1 by P BLACS grid;
302*      If the appropriate one-dimensional descriptor is DTYPEA=502
303*      (P by 1 type), then the two dimensional descriptor must
304*      have a CTXT value that refers to a P by 1 BLACS grid.
305*
306*
307*  Summary of allowed descriptors, types, and BLACS grids:
308*  DTYPE           501         502         1         1
309*  BLACS grid      1xP or Px1  1xP or Px1  1xP       Px1
310*  -----------------------------------------------------
311*  A               OK          OK          OK        NO
312*  B               NO          OK          NO        OK
313*
314*  Let A be a generic term for any 1D block cyclicly distributed array.
315*  Such a global array has an associated description vector DESCA.
316*  In the following comments, the character _ should be read as
317*  "of the global array".
318*
319*  NOTATION        STORED IN  EXPLANATION
320*  --------------- ---------- ------------------------------------------
321*  DTYPE_A(global) DESCA( 1 ) The descriptor type. For 1D grids,
322*                                TYPE_A = 501: 1-by-P grid.
323*                                TYPE_A = 502: P-by-1 grid.
324*  CTXT_A (global) DESCA( 2 ) The BLACS context handle, indicating
325*                                the BLACS process grid A is distribu-
326*                                ted over. The context itself is glo-
327*                                bal, but the handle (the integer
328*                                value) may vary.
329*  N_A    (global) DESCA( 3 ) The size of the array dimension being
330*                                distributed.
331*  NB_A   (global) DESCA( 4 ) The blocking factor used to distribute
332*                                the distributed dimension of the array.
333*  SRC_A  (global) DESCA( 5 ) The process row or column over which the
334*                                first row or column of the array
335*                                is distributed.
336*  Ignored         DESCA( 6 ) Ignored for tridiagonal matrices.
337*  Reserved        DESCA( 7 ) Reserved for future use.
338*
339*
340*
341*  =====================================================================
342*
343*  Code Developer: Andrew J. Cleary, University of Tennessee.
344*    Current address: Lawrence Livermore National Labs.
345*
346*  =====================================================================
347*
348*     .. Parameters ..
349      REAL               ONE
350      PARAMETER          ( ONE = 1.0E+0 )
351      REAL               ZERO
352      PARAMETER          ( ZERO = 0.0E+0 )
353      INTEGER            INT_ONE
354      PARAMETER          ( INT_ONE = 1 )
355      INTEGER            DESCMULT, BIGNUM
356      PARAMETER          ( DESCMULT = 100, BIGNUM = DESCMULT*DESCMULT )
357      INTEGER            BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
358     $                   LLD_, MB_, M_, NB_, N_, RSRC_
359      PARAMETER          ( BLOCK_CYCLIC_2D = 1, DLEN_ = 9, DTYPE_ = 1,
360     $                     CTXT_ = 2, M_ = 3, N_ = 4, MB_ = 5, NB_ = 6,
361     $                     RSRC_ = 7, CSRC_ = 8, LLD_ = 9 )
362*     ..
363*     .. Local Scalars ..
364      INTEGER            COMM_PROC, CSRC, FIRST_PROC, I, ICTXT,
365     $                   ICTXT_NEW, ICTXT_SAVE, IDUM3, INT_TEMP, JA_NEW,
366     $                   LAF_MIN, LEVEL_DIST, LLDA, MYCOL, MYROW,
367     $                   MY_NUM_COLS, NB, NP, NPCOL, NPROW, NP_SAVE,
368     $                   ODD_SIZE, PART_OFFSET, PART_SIZE, RETURN_CODE,
369     $                   STORE_N_A, TEMP, WORK_SIZE_MIN
370*     ..
371*     .. Local Arrays ..
372      INTEGER            DESCA_1XP( 7 ), PARAM_CHECK( 7, 3 )
373*     ..
374*     .. External Subroutines ..
375      EXTERNAL           BLACS_GRIDEXIT, BLACS_GRIDINFO, DESC_CONVERT,
376     $                   GLOBCHK, IGAMX2D, IGEBR2D, IGEBS2D, PXERBLA,
377     $                   RESHAPE, SGERV2D, SGESD2D, SPTTRF, SPTTRSV,
378     $                   STRRV2D, STRSD2D
379*     ..
380*     .. External Functions ..
381      INTEGER            NUMROC
382      EXTERNAL           NUMROC
383*     ..
384*     .. Intrinsic Functions ..
385      INTRINSIC          MOD, REAL
386*     ..
387*     .. Executable Statements ..
388*
389*     Test the input parameters
390*
391      INFO = 0
392*
393*     Convert descriptor into standard form for easy access to
394*        parameters, check that grid is of right shape.
395*
396      DESCA_1XP( 1 ) = 501
397*
398      TEMP = DESCA( DTYPE_ )
399      IF( TEMP.EQ.502 ) THEN
400*        Temporarily set the descriptor type to 1xP type
401         DESCA( DTYPE_ ) = 501
402      END IF
403*
404      CALL DESC_CONVERT( DESCA, DESCA_1XP, RETURN_CODE )
405*
406      DESCA( DTYPE_ ) = TEMP
407*
408      IF( RETURN_CODE.NE.0 ) THEN
409         INFO = -( 5*100+2 )
410      END IF
411*
412*     Get values out of descriptor for use in code.
413*
414      ICTXT = DESCA_1XP( 2 )
415      CSRC = DESCA_1XP( 5 )
416      NB = DESCA_1XP( 4 )
417      LLDA = DESCA_1XP( 6 )
418      STORE_N_A = DESCA_1XP( 3 )
419*
420*     Get grid parameters
421*
422*
423      CALL BLACS_GRIDINFO( ICTXT, NPROW, NPCOL, MYROW, MYCOL )
424      NP = NPROW*NPCOL
425*
426*
427*
428      IF( LWORK.LT.-1 ) THEN
429         INFO = -9
430      ELSE IF( LWORK.EQ.-1 ) THEN
431         IDUM3 = -1
432      ELSE
433         IDUM3 = 1
434      END IF
435*
436      IF( N.LT.0 ) THEN
437         INFO = -1
438      END IF
439*
440      IF( N+JA-1.GT.STORE_N_A ) THEN
441         INFO = -( 5*100+6 )
442      END IF
443*
444*     Argument checking that is specific to Divide & Conquer routine
445*
446      IF( NPROW.NE.1 ) THEN
447         INFO = -( 5*100+2 )
448      END IF
449*
450      IF( N.GT.NP*NB-MOD( JA-1, NB ) ) THEN
451         INFO = -( 1 )
452         CALL PXERBLA( ICTXT, 'PSPTTRF, D&C alg.: only 1 block per proc'
453     $                 , -INFO )
454         RETURN
455      END IF
456*
457      IF( ( JA+N-1.GT.NB ) .AND. ( NB.LT.2*INT_ONE ) ) THEN
458         INFO = -( 5*100+4 )
459         CALL PXERBLA( ICTXT, 'PSPTTRF, D&C alg.: NB too small', -INFO )
460         RETURN
461      END IF
462*
463*
464*     Check auxiliary storage size
465*
466      LAF_MIN = ( 12*NPCOL+3*NB )
467*
468      IF( LAF.LT.LAF_MIN ) THEN
469         INFO = -7
470*        put minimum value of laf into AF( 1 )
471         AF( 1 ) = LAF_MIN
472         CALL PXERBLA( ICTXT, 'PSPTTRF: auxiliary storage error ',
473     $                 -INFO )
474         RETURN
475      END IF
476*
477*     Check worksize
478*
479      WORK_SIZE_MIN = 8*NPCOL
480*
481      WORK( 1 ) = WORK_SIZE_MIN
482*
483      IF( LWORK.LT.WORK_SIZE_MIN ) THEN
484         IF( LWORK.NE.-1 ) THEN
485            INFO = -9
486            CALL PXERBLA( ICTXT, 'PSPTTRF: worksize error ', -INFO )
487         END IF
488         RETURN
489      END IF
490*
491*     Pack params and positions into arrays for global consistency check
492*
493      PARAM_CHECK( 7, 1 ) = DESCA( 5 )
494      PARAM_CHECK( 6, 1 ) = DESCA( 4 )
495      PARAM_CHECK( 5, 1 ) = DESCA( 3 )
496      PARAM_CHECK( 4, 1 ) = DESCA( 1 )
497      PARAM_CHECK( 3, 1 ) = JA
498      PARAM_CHECK( 2, 1 ) = N
499      PARAM_CHECK( 1, 1 ) = IDUM3
500*
501      PARAM_CHECK( 7, 2 ) = 505
502      PARAM_CHECK( 6, 2 ) = 504
503      PARAM_CHECK( 5, 2 ) = 503
504      PARAM_CHECK( 4, 2 ) = 501
505      PARAM_CHECK( 3, 2 ) = 4
506      PARAM_CHECK( 2, 2 ) = 1
507      PARAM_CHECK( 1, 2 ) = 9
508*
509*     Want to find errors with MIN( ), so if no error, set it to a big
510*     number. If there already is an error, multiply by the the
511*     descriptor multiplier.
512*
513      IF( INFO.GE.0 ) THEN
514         INFO = BIGNUM
515      ELSE IF( INFO.LT.-DESCMULT ) THEN
516         INFO = -INFO
517      ELSE
518         INFO = -INFO*DESCMULT
519      END IF
520*
521*     Check consistency across processors
522*
523      CALL GLOBCHK( ICTXT, 7, PARAM_CHECK, 7, PARAM_CHECK( 1, 3 ),
524     $              INFO )
525*
526*     Prepare output: set info = 0 if no error, and divide by DESCMULT
527*     if error is not in a descriptor entry.
528*
529      IF( INFO.EQ.BIGNUM ) THEN
530         INFO = 0
531      ELSE IF( MOD( INFO, DESCMULT ).EQ.0 ) THEN
532         INFO = -INFO / DESCMULT
533      ELSE
534         INFO = -INFO
535      END IF
536*
537      IF( INFO.LT.0 ) THEN
538         CALL PXERBLA( ICTXT, 'PSPTTRF', -INFO )
539         RETURN
540      END IF
541*
542*     Quick return if possible
543*
544      IF( N.EQ.0 )
545     $   RETURN
546*
547*
548*     Adjust addressing into matrix space to properly get into
549*        the beginning part of the relevant data
550*
551      PART_OFFSET = NB*( ( JA-1 ) / ( NPCOL*NB ) )
552*
553      IF( ( MYCOL-CSRC ).LT.( JA-PART_OFFSET-1 ) / NB ) THEN
554         PART_OFFSET = PART_OFFSET + NB
555      END IF
556*
557      IF( MYCOL.LT.CSRC ) THEN
558         PART_OFFSET = PART_OFFSET - NB
559      END IF
560*
561*     Form a new BLACS grid (the "standard form" grid) with only procs
562*        holding part of the matrix, of size 1xNP where NP is adjusted,
563*        starting at csrc=0, with JA modified to reflect dropped procs.
564*
565*     First processor to hold part of the matrix:
566*
567      FIRST_PROC = MOD( ( JA-1 ) / NB+CSRC, NPCOL )
568*
569*     Calculate new JA one while dropping off unused processors.
570*
571      JA_NEW = MOD( JA-1, NB ) + 1
572*
573*     Save and compute new value of NP
574*
575      NP_SAVE = NP
576      NP = ( JA_NEW+N-2 ) / NB + 1
577*
578*     Call utility routine that forms "standard-form" grid
579*
580      CALL RESHAPE( ICTXT, INT_ONE, ICTXT_NEW, INT_ONE, FIRST_PROC,
581     $              INT_ONE, NP )
582*
583*     Use new context from standard grid as context.
584*
585      ICTXT_SAVE = ICTXT
586      ICTXT = ICTXT_NEW
587      DESCA_1XP( 2 ) = ICTXT_NEW
588*
589*     Get information about new grid.
590*
591      CALL BLACS_GRIDINFO( ICTXT, NPROW, NPCOL, MYROW, MYCOL )
592*
593*     Drop out processors that do not have part of the matrix.
594*
595      IF( MYROW.LT.0 ) THEN
596         GO TO 90
597      END IF
598*
599*     ********************************
600*     Values reused throughout routine
601*
602*     User-input value of partition size
603*
604      PART_SIZE = NB
605*
606*     Number of columns in each processor
607*
608      MY_NUM_COLS = NUMROC( N, PART_SIZE, MYCOL, 0, NPCOL )
609*
610*     Offset in columns to beginning of main partition in each proc
611*
612      IF( MYCOL.EQ.0 ) THEN
613         PART_OFFSET = PART_OFFSET + MOD( JA_NEW-1, PART_SIZE )
614         MY_NUM_COLS = MY_NUM_COLS - MOD( JA_NEW-1, PART_SIZE )
615      END IF
616*
617*     Size of main (or odd) partition in each processor
618*
619      ODD_SIZE = MY_NUM_COLS
620      IF( MYCOL.LT.NP-1 ) THEN
621         ODD_SIZE = ODD_SIZE - INT_ONE
622      END IF
623*
624*
625*       Zero out space for fillin
626*
627      DO 10 I = 1, LAF_MIN
628         AF( I ) = ZERO
629   10 CONTINUE
630*
631*     Begin main code
632*
633*
634********************************************************************
635*       PHASE 1: Local computation phase.
636********************************************************************
637*
638*
639      IF( MYCOL.LT.NP-1 ) THEN
640*         Transfer last triangle D_i of local matrix to next processor
641*         which needs it to calculate fillin due to factorization of
642*         its main (odd) block A_i.
643*         Overlap the send with the factorization of A_i.
644*
645         CALL STRSD2D( ICTXT, 'U', 'N', 1, 1,
646     $                 E( PART_OFFSET+ODD_SIZE+1 ), LLDA-1, 0, MYCOL+1 )
647*
648      END IF
649*
650*
651*       Factor main partition A_i = L_i {L_i}^T in each processor
652*       Or A_i = {U_i}^T {U_i} if E is the upper superdiagonal
653*
654      CALL SPTTRF( ODD_SIZE, D( PART_OFFSET+1 ), E( PART_OFFSET+1 ),
655     $             INFO )
656*
657      IF( INFO.NE.0 ) THEN
658         INFO = MYCOL + 1
659         GO TO 20
660      END IF
661*
662*
663      IF( MYCOL.LT.NP-1 ) THEN
664*         Apply factorization to odd-even connection block B_i
665*
666*
667*         Perform the triangular system solve {L_i}{{B'}_i}^T = {B_i}^T
668*           by dividing B_i by diagonal element
669*
670         E( PART_OFFSET+ODD_SIZE ) = E( PART_OFFSET+ODD_SIZE ) /
671     $                               D( PART_OFFSET+ODD_SIZE )
672*
673*
674*
675*         Compute contribution to diagonal block(s) of reduced system.
676*          {C'}_i = {C_i}-{{B'}_i}{{B'}_i}^T
677*
678         D( PART_OFFSET+ODD_SIZE+1 ) = D( PART_OFFSET+ODD_SIZE+1 ) -
679     $                                 D( PART_OFFSET+ODD_SIZE )*
680     $                                 ( E( PART_OFFSET+ODD_SIZE )*
681     $                                 ( E( PART_OFFSET+ODD_SIZE ) ) )
682*
683      END IF
684*       End of "if ( MYCOL .lt. NP-1 )..." loop
685*
686*
687   20 CONTINUE
688*       If the processor could not locally factor, it jumps here.
689*
690      IF( MYCOL.NE.0 ) THEN
691*
692*         Receive previously transmitted matrix section, which forms
693*         the right-hand-side for the triangular solve that calculates
694*         the "spike" fillin.
695*
696*
697         CALL STRRV2D( ICTXT, 'U', 'N', 1, 1, AF( 1 ), ODD_SIZE, 0,
698     $                 MYCOL-1 )
699*
700         IF( INFO.EQ.0 ) THEN
701*
702*         Calculate the "spike" fillin, ${L_i} {{G}_i}^T = {D_i}$ .
703*
704            CALL SPTTRSV( 'N', ODD_SIZE, INT_ONE, D( PART_OFFSET+1 ),
705     $                    E( PART_OFFSET+1 ), AF( 1 ), ODD_SIZE, INFO )
706*
707*         Divide by D
708*
709            DO 30 I = 1, ODD_SIZE
710               AF( I ) = AF( I ) / D( PART_OFFSET+I )
711   30       CONTINUE
712*
713*
714*         Calculate the update block for previous proc, E_i = G_i{G_i}^T
715*
716*
717*         Since there is no element-by-element vector multiplication in
718*           the BLAS, this loop must be hardwired in without a BLAS call
719*
720            INT_TEMP = ODD_SIZE*INT_ONE + 2 + 1
721            AF( INT_TEMP ) = 0
722*
723            DO 40 I = 1, ODD_SIZE
724               AF( INT_TEMP ) = AF( INT_TEMP ) -
725     $                          D( PART_OFFSET+I )*( AF( I )*
726     $                          ( AF( I ) ) )
727   40       CONTINUE
728*
729*
730*         Initiate send of E_i to previous processor to overlap
731*           with next computation.
732*
733            CALL SGESD2D( ICTXT, INT_ONE, INT_ONE, AF( ODD_SIZE+3 ),
734     $                    INT_ONE, 0, MYCOL-1 )
735*
736*
737            IF( MYCOL.LT.NP-1 ) THEN
738*
739*           Calculate off-diagonal block(s) of reduced system.
740*           Note: for ease of use in solution of reduced system, store
741*           L's off-diagonal block in transpose form.
742*           {F_i}^T =  {H_i}{{B'}_i}^T
743*
744               AF( ODD_SIZE+1 ) = -D( PART_OFFSET+ODD_SIZE )*
745     $                            ( E( PART_OFFSET+ODD_SIZE )*
746     $                            AF( ODD_SIZE ) )
747*
748*
749            END IF
750*
751         END IF
752*       End of "if ( MYCOL .ne. 0 )..."
753*
754      END IF
755*       End of "if (info.eq.0) then"
756*
757*
758*       Check to make sure no processors have found errors
759*
760      CALL IGAMX2D( ICTXT, 'A', ' ', 1, 1, INFO, 1, INFO, INFO, -1, 0,
761     $              0 )
762*
763      IF( MYCOL.EQ.0 ) THEN
764         CALL IGEBS2D( ICTXT, 'A', ' ', 1, 1, INFO, 1 )
765      ELSE
766         CALL IGEBR2D( ICTXT, 'A', ' ', 1, 1, INFO, 1, 0, 0 )
767      END IF
768*
769      IF( INFO.NE.0 ) THEN
770         GO TO 80
771      END IF
772*       No errors found, continue
773*
774*
775********************************************************************
776*       PHASE 2: Formation and factorization of Reduced System.
777********************************************************************
778*
779*       Gather up local sections of reduced system
780*
781*
782*     The last processor does not participate in the factorization of
783*       the reduced system, having sent its E_i already.
784      IF( MYCOL.EQ.NPCOL-1 ) THEN
785         GO TO 70
786      END IF
787*
788*       Initiate send of off-diag block(s) to overlap with next part.
789*       Off-diagonal block needed on neighboring processor to start
790*       algorithm.
791*
792      IF( ( MOD( MYCOL+1, 2 ).EQ.0 ) .AND. ( MYCOL.GT.0 ) ) THEN
793*
794         CALL SGESD2D( ICTXT, INT_ONE, INT_ONE, AF( ODD_SIZE+1 ),
795     $                 INT_ONE, 0, MYCOL-1 )
796*
797      END IF
798*
799*       Copy last diagonal block into AF storage for subsequent
800*         operations.
801*
802      AF( ODD_SIZE+2 ) = REAL( D( PART_OFFSET+ODD_SIZE+1 ) )
803*
804*       Receive cont. to diagonal block that is stored on this proc.
805*
806      IF( MYCOL.LT.NPCOL-1 ) THEN
807*
808         CALL SGERV2D( ICTXT, INT_ONE, INT_ONE, AF( ODD_SIZE+2+1 ),
809     $                 INT_ONE, 0, MYCOL+1 )
810*
811*          Add contribution to diagonal block
812*
813         AF( ODD_SIZE+2 ) = AF( ODD_SIZE+2 ) + AF( ODD_SIZE+3 )
814*
815      END IF
816*
817*
818*       *************************************
819*       Modification Loop
820*
821*       The distance for sending and receiving for each level starts
822*         at 1 for the first level.
823      LEVEL_DIST = 1
824*
825*       Do until this proc is needed to modify other procs' equations
826*
827   50 CONTINUE
828      IF( MOD( ( MYCOL+1 ) / LEVEL_DIST, 2 ).NE.0 )
829     $   GO TO 60
830*
831*         Receive and add contribution to diagonal block from the left
832*
833      IF( MYCOL-LEVEL_DIST.GE.0 ) THEN
834         CALL SGERV2D( ICTXT, INT_ONE, INT_ONE, WORK( 1 ), INT_ONE, 0,
835     $                 MYCOL-LEVEL_DIST )
836*
837         AF( ODD_SIZE+2 ) = AF( ODD_SIZE+2 ) + WORK( 1 )
838*
839      END IF
840*
841*         Receive and add contribution to diagonal block from the right
842*
843      IF( MYCOL+LEVEL_DIST.LT.NPCOL-1 ) THEN
844         CALL SGERV2D( ICTXT, INT_ONE, INT_ONE, WORK( 1 ), INT_ONE, 0,
845     $                 MYCOL+LEVEL_DIST )
846*
847         AF( ODD_SIZE+2 ) = AF( ODD_SIZE+2 ) + WORK( 1 )
848*
849      END IF
850*
851      LEVEL_DIST = LEVEL_DIST*2
852*
853      GO TO 50
854   60 CONTINUE
855*       [End of GOTO Loop]
856*
857*
858*       *********************************
859*       Calculate and use this proc's blocks to modify other procs'...
860      IF( AF( ODD_SIZE+2 ).EQ.ZERO ) THEN
861         INFO = NPCOL + MYCOL
862      END IF
863*
864*       ****************************************************************
865*       Receive offdiagonal block from processor to right.
866*         If this is the first group of processors, the receive comes
867*         from a different processor than otherwise.
868*
869      IF( LEVEL_DIST.EQ.1 ) THEN
870         COMM_PROC = MYCOL + 1
871*
872*           Move block into place that it will be expected to be for
873*             calcs.
874*
875         AF( ODD_SIZE+3 ) = AF( ODD_SIZE+1 )
876*
877      ELSE
878         COMM_PROC = MYCOL + LEVEL_DIST / 2
879      END IF
880*
881      IF( MYCOL / LEVEL_DIST.LE.( NPCOL-1 ) / LEVEL_DIST-2 ) THEN
882*
883         CALL SGERV2D( ICTXT, INT_ONE, INT_ONE, AF( ODD_SIZE+1 ),
884     $                 INT_ONE, 0, COMM_PROC )
885*
886         IF( INFO.EQ.0 ) THEN
887*
888*
889*         Modify upper off_diagonal block with diagonal block
890*
891*
892            AF( ODD_SIZE+1 ) = AF( ODD_SIZE+1 ) / AF( ODD_SIZE+2 )
893*
894         END IF
895*         End of "if ( info.eq.0 ) then"
896*
897*         Calculate contribution from this block to next diagonal block
898*
899         WORK( 1 ) = -ONE*AF( ODD_SIZE+1 )*AF( ODD_SIZE+2 )*
900     $               ( AF( ODD_SIZE+1 ) )
901*
902*         Send contribution to diagonal block's owning processor.
903*
904         CALL SGESD2D( ICTXT, INT_ONE, INT_ONE, WORK( 1 ), INT_ONE, 0,
905     $                 MYCOL+LEVEL_DIST )
906*
907      END IF
908*       End of "if( mycol/level_dist .le. (npcol-1)/level_dist-2 )..."
909*
910*
911*       ****************************************************************
912*       Receive off_diagonal block from left and use to finish with this
913*         processor.
914*
915      IF( ( MYCOL / LEVEL_DIST.GT.0 ) .AND.
916     $    ( MYCOL / LEVEL_DIST.LE.( NPCOL-1 ) / LEVEL_DIST-1 ) ) THEN
917*
918         IF( LEVEL_DIST.GT.1 ) THEN
919*
920*           Receive offdiagonal block(s) from proc level_dist/2 to the
921*           left
922*
923            CALL SGERV2D( ICTXT, INT_ONE, INT_ONE, AF( ODD_SIZE+2+1 ),
924     $                    INT_ONE, 0, MYCOL-LEVEL_DIST / 2 )
925*
926         END IF
927*
928*
929         IF( INFO.EQ.0 ) THEN
930*
931*         Use diagonal block(s) to modify this offdiagonal block
932*
933            AF( ODD_SIZE+3 ) = ( AF( ODD_SIZE+3 ) ) / AF( ODD_SIZE+2 )
934*
935         END IF
936*         End of "if( info.eq.0 ) then"
937*
938*         Use offdiag block(s) to calculate modification to diag block
939*           of processor to the left
940*
941         WORK( 1 ) = -ONE*AF( ODD_SIZE+3 )*AF( ODD_SIZE+2 )*
942     $               ( AF( ODD_SIZE+3 ) )
943*
944*         Send contribution to diagonal block's owning processor.
945*
946         CALL SGESD2D( ICTXT, INT_ONE, INT_ONE, WORK( 1 ), INT_ONE, 0,
947     $                 MYCOL-LEVEL_DIST )
948*
949*         *******************************************************
950*
951         IF( MYCOL / LEVEL_DIST.LE.( NPCOL-1 ) / LEVEL_DIST-2 ) THEN
952*
953*           Decide which processor offdiagonal block(s) goes to
954*
955            IF( ( MOD( MYCOL / ( 2*LEVEL_DIST ), 2 ) ).EQ.0 ) THEN
956               COMM_PROC = MYCOL + LEVEL_DIST
957            ELSE
958               COMM_PROC = MYCOL - LEVEL_DIST
959            END IF
960*
961*           Use offdiagonal blocks to calculate offdiag
962*             block to send to neighboring processor. Depending
963*             on circumstances, may need to transpose the matrix.
964*
965            WORK( 1 ) = -ONE*AF( ODD_SIZE+3 )*AF( ODD_SIZE+2 )*
966     $                  AF( ODD_SIZE+1 )
967*
968*           Send contribution to offdiagonal block's owning processor.
969*
970            CALL SGESD2D( ICTXT, INT_ONE, INT_ONE, WORK( 1 ), INT_ONE,
971     $                    0, COMM_PROC )
972*
973         END IF
974*
975      END IF
976*       End of "if( mycol/level_dist.le. (npcol-1)/level_dist -1 )..."
977*
978   70 CONTINUE
979*
980*
981   80 CONTINUE
982*
983*
984*     Free BLACS space used to hold standard-form grid.
985*
986      IF( ICTXT_SAVE.NE.ICTXT_NEW ) THEN
987         CALL BLACS_GRIDEXIT( ICTXT_NEW )
988      END IF
989*
990   90 CONTINUE
991*
992*     Restore saved input parameters
993*
994      ICTXT = ICTXT_SAVE
995      NP = NP_SAVE
996*
997*     Output minimum worksize
998*
999      WORK( 1 ) = WORK_SIZE_MIN
1000*
1001*         Make INFO consistent across processors
1002*
1003      CALL IGAMX2D( ICTXT, 'A', ' ', 1, 1, INFO, 1, INFO, INFO, -1, 0,
1004     $              0 )
1005*
1006      IF( MYCOL.EQ.0 ) THEN
1007         CALL IGEBS2D( ICTXT, 'A', ' ', 1, 1, INFO, 1 )
1008      ELSE
1009         CALL IGEBR2D( ICTXT, 'A', ' ', 1, 1, INFO, 1, 0, 0 )
1010      END IF
1011*
1012*
1013      RETURN
1014*
1015*     End of PSPTTRF
1016*
1017      END
1018