1      SUBROUTINE PZPTTRF( N, D, E, JA, DESCA, AF, LAF, WORK, LWORK,
2     $                    INFO )
3*
4*  -- ScaLAPACK routine (version 2.0.2) --
5*     Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver
6*     May 1 2012
7*
8*     .. Scalar Arguments ..
9      INTEGER            INFO, JA, LAF, LWORK, N
10*     ..
11*     .. Array Arguments ..
12      INTEGER            DESCA( * )
13      COMPLEX*16         AF( * ), E( * ), WORK( * )
14      DOUBLE PRECISION   D( * )
15*     ..
16*
17*
18*  Purpose
19*  =======
20*
21*  PZPTTRF computes a Cholesky factorization
22*  of an N-by-N complex tridiagonal
23*  symmetric positive definite distributed matrix
24*  A(1:N, JA:JA+N-1).
25*  Reordering is used to increase parallelism in the factorization.
26*  This reordering results in factors that are DIFFERENT from those
27*  produced by equivalent sequential codes. These factors cannot
28*  be used directly by users; however, they can be used in
29*  subsequent calls to PZPTTRS to solve linear systems.
30*
31*  The factorization has the form
32*
33*          P A(1:N, JA:JA+N-1) P^T = U' D U  or
34*
35*          P A(1:N, JA:JA+N-1) P^T = L D L',
36*
37*  where U is a tridiagonal upper triangular matrix and L is tridiagonal
38*  lower triangular, and P is a permutation matrix.
39*
40*  =====================================================================
41*
42*  Arguments
43*  =========
44*
45*
46*  N       (global input) INTEGER
47*          The number of rows and columns to be operated on, i.e. the
48*          order of the distributed submatrix A(1:N, JA:JA+N-1). N >= 0.
49*
50*  D       (local input/local output) COMPLEX*16 pointer to local
51*          part of global vector storing the main diagonal of the
52*          matrix.
53*          On exit, this array contains information containing the
54*            factors of the matrix.
55*          Must be of size >= DESCA( NB_ ).
56*
57*  E       (local input/local output) COMPLEX*16 pointer to local
58*          part of global vector storing the upper diagonal of the
59*          matrix. Globally, DU(n) is not referenced, and DU must be
60*          aligned with D.
61*          On exit, this array contains information containing the
62*            factors of the matrix.
63*          Must be of size >= DESCA( NB_ ).
64*
65*  JA      (global input) INTEGER
66*          The index in the global array A that points to the start of
67*          the matrix to be operated on (which may be either all of A
68*          or a submatrix of A).
69*
70*  DESCA   (global and local input) INTEGER array of dimension DLEN.
71*          if 1D type (DTYPE_A=501 or 502), DLEN >= 7;
72*          if 2D type (DTYPE_A=1), DLEN >= 9.
73*          The array descriptor for the distributed matrix A.
74*          Contains information of mapping of A to memory. Please
75*          see NOTES below for full description and options.
76*
77*  AF      (local output) COMPLEX*16 array, dimension LAF.
78*          Auxiliary Fillin Space.
79*          Fillin is created during the factorization routine
80*          PZPTTRF and this is stored in AF. If a linear system
81*          is to be solved using PZPTTRS after the factorization
82*          routine, AF *must not be altered* after the factorization.
83*
84*  LAF     (local input) INTEGER
85*          Size of user-input Auxiliary Fillin space AF. Must be >=
86*          (NB+2)
87*          If LAF is not large enough, an error code will be returned
88*          and the minimum acceptable size will be returned in AF( 1 )
89*
90*  WORK    (local workspace/local output)
91*          COMPLEX*16 temporary workspace. This space may
92*          be overwritten in between calls to routines. WORK must be
93*          the size given in LWORK.
94*          On exit, WORK( 1 ) contains the minimal LWORK.
95*
96*  LWORK   (local input or global input) INTEGER
97*          Size of user-input workspace WORK.
98*          If LWORK is too small, the minimal acceptable size will be
99*          returned in WORK(1) and an error code is returned. LWORK>=
100*          8*NPCOL
101*
102*  INFO    (local output) INTEGER
103*          = 0:  successful exit
104*          < 0:  If the i-th argument is an array and the j-entry had
105*                an illegal value, then INFO = -(i*100+j), if the i-th
106*                argument is a scalar and had an illegal value, then
107*                INFO = -i.
108*          > 0:  If INFO = K<=NPROCS, the submatrix stored on processor
109*                INFO and factored locally was not
110*                positive definite,  and
111*                the factorization was not completed.
112*                If INFO = K>NPROCS, the submatrix stored on processor
113*                INFO-NPROCS representing interactions with other
114*                processors was not
115*                positive definite,
116*                and the factorization was not completed.
117*
118*  =====================================================================
119*
120*
121*  Restrictions
122*  ============
123*
124*  The following are restrictions on the input parameters. Some of these
125*    are temporary and will be removed in future releases, while others
126*    may reflect fundamental technical limitations.
127*
128*    Non-cyclic restriction: VERY IMPORTANT!
129*      P*NB>= mod(JA-1,NB)+N.
130*      The mapping for matrices must be blocked, reflecting the nature
131*      of the divide and conquer algorithm as a task-parallel algorithm.
132*      This formula in words is: no processor may have more than one
133*      chunk of the matrix.
134*
135*    Blocksize cannot be too small:
136*      If the matrix spans more than one processor, the following
137*      restriction on NB, the size of each block on each processor,
138*      must hold:
139*      NB >= 2
140*      The bulk of parallel computation is done on the matrix of size
141*      O(NB) on each processor. If this is too small, divide and conquer
142*      is a poor choice of algorithm.
143*
144*    Submatrix reference:
145*      JA = IB
146*      Alignment restriction that prevents unnecessary communication.
147*
148*
149*  =====================================================================
150*
151*
152*  Notes
153*  =====
154*
155*  If the factorization routine and the solve routine are to be called
156*    separately (to solve various sets of righthand sides using the same
157*    coefficient matrix), the auxiliary space AF *must not be altered*
158*    between calls to the factorization routine and the solve routine.
159*
160*  The best algorithm for solving banded and tridiagonal linear systems
161*    depends on a variety of parameters, especially the bandwidth.
162*    Currently, only algorithms designed for the case N/P >> bw are
163*    implemented. These go by many names, including Divide and Conquer,
164*    Partitioning, domain decomposition-type, etc.
165*    For tridiagonal matrices, it is obvious: N/P >> bw(=1), and so D&C
166*    algorithms are the appropriate choice.
167*
168*  Algorithm description: Divide and Conquer
169*
170*    The Divide and Conqer algorithm assumes the matrix is narrowly
171*      banded compared with the number of equations. In this situation,
172*      it is best to distribute the input matrix A one-dimensionally,
173*      with columns atomic and rows divided amongst the processes.
174*      The basic algorithm divides the tridiagonal matrix up into
175*      P pieces with one stored on each processor,
176*      and then proceeds in 2 phases for the factorization or 3 for the
177*      solution of a linear system.
178*      1) Local Phase:
179*         The individual pieces are factored independently and in
180*         parallel. These factors are applied to the matrix creating
181*         fillin, which is stored in a non-inspectable way in auxiliary
182*         space AF. Mathematically, this is equivalent to reordering
183*         the matrix A as P A P^T and then factoring the principal
184*         leading submatrix of size equal to the sum of the sizes of
185*         the matrices factored on each processor. The factors of
186*         these submatrices overwrite the corresponding parts of A
187*         in memory.
188*      2) Reduced System Phase:
189*         A small ((P-1)) system is formed representing
190*         interaction of the larger blocks, and is stored (as are its
191*         factors) in the space AF. A parallel Block Cyclic Reduction
192*         algorithm is used. For a linear system, a parallel front solve
193*         followed by an analagous backsolve, both using the structure
194*         of the factored matrix, are performed.
195*      3) Backsubsitution Phase:
196*         For a linear system, a local backsubstitution is performed on
197*         each processor in parallel.
198*
199*
200*  Descriptors
201*  ===========
202*
203*  Descriptors now have *types* and differ from ScaLAPACK 1.0.
204*
205*  Note: tridiagonal codes can use either the old two dimensional
206*    or new one-dimensional descriptors, though the processor grid in
207*    both cases *must be one-dimensional*. We describe both types below.
208*
209*  Each global data object is described by an associated description
210*  vector.  This vector stores the information required to establish
211*  the mapping between an object element and its corresponding process
212*  and memory location.
213*
214*  Let A be a generic term for any 2D block cyclicly distributed array.
215*  Such a global array has an associated description vector DESCA.
216*  In the following comments, the character _ should be read as
217*  "of the global array".
218*
219*  NOTATION        STORED IN      EXPLANATION
220*  --------------- -------------- --------------------------------------
221*  DTYPE_A(global) DESCA( DTYPE_ )The descriptor type.  In this case,
222*                                 DTYPE_A = 1.
223*  CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
224*                                 the BLACS process grid A is distribu-
225*                                 ted over. The context itself is glo-
226*                                 bal, but the handle (the integer
227*                                 value) may vary.
228*  M_A    (global) DESCA( M_ )    The number of rows in the global
229*                                 array A.
230*  N_A    (global) DESCA( N_ )    The number of columns in the global
231*                                 array A.
232*  MB_A   (global) DESCA( MB_ )   The blocking factor used to distribute
233*                                 the rows of the array.
234*  NB_A   (global) DESCA( NB_ )   The blocking factor used to distribute
235*                                 the columns of the array.
236*  RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
237*                                 row of the array A is distributed.
238*  CSRC_A (global) DESCA( CSRC_ ) The process column over which the
239*                                 first column of the array A is
240*                                 distributed.
241*  LLD_A  (local)  DESCA( LLD_ )  The leading dimension of the local
242*                                 array.  LLD_A >= MAX(1,LOCr(M_A)).
243*
244*  Let K be the number of rows or columns of a distributed matrix,
245*  and assume that its process grid has dimension p x q.
246*  LOCr( K ) denotes the number of elements of K that a process
247*  would receive if K were distributed over the p processes of its
248*  process column.
249*  Similarly, LOCc( K ) denotes the number of elements of K that a
250*  process would receive if K were distributed over the q processes of
251*  its process row.
252*  The values of LOCr() and LOCc() may be determined via a call to the
253*  ScaLAPACK tool function, NUMROC:
254*          LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
255*          LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
256*  An upper bound for these quantities may be computed by:
257*          LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
258*          LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
259*
260*
261*  One-dimensional descriptors:
262*
263*  One-dimensional descriptors are a new addition to ScaLAPACK since
264*    version 1.0. They simplify and shorten the descriptor for 1D
265*    arrays.
266*
267*  Since ScaLAPACK supports two-dimensional arrays as the fundamental
268*    object, we allow 1D arrays to be distributed either over the
269*    first dimension of the array (as if the grid were P-by-1) or the
270*    2nd dimension (as if the grid were 1-by-P). This choice is
271*    indicated by the descriptor type (501 or 502)
272*    as described below.
273*    However, for tridiagonal matrices, since the objects being
274*    distributed are the individual vectors storing the diagonals, we
275*    have adopted the convention that both the P-by-1 descriptor and
276*    the 1-by-P descriptor are allowed and are equivalent for
277*    tridiagonal matrices. Thus, for tridiagonal matrices,
278*    DTYPE_A = 501 or 502 can be used interchangeably
279*    without any other change.
280*  We require that the distributed vectors storing the diagonals of a
281*    tridiagonal matrix be aligned with each other. Because of this, a
282*    single descriptor, DESCA, serves to describe the distribution of
283*    of all diagonals simultaneously.
284*
285*    IMPORTANT NOTE: the actual BLACS grid represented by the
286*    CTXT entry in the descriptor may be *either*  P-by-1 or 1-by-P
287*    irrespective of which one-dimensional descriptor type
288*    (501 or 502) is input.
289*    This routine will interpret the grid properly either way.
290*    ScaLAPACK routines *do not support intercontext operations* so that
291*    the grid passed to a single ScaLAPACK routine *must be the same*
292*    for all array descriptors passed to that routine.
293*
294*    NOTE: In all cases where 1D descriptors are used, 2D descriptors
295*    may also be used, since a one-dimensional array is a special case
296*    of a two-dimensional array with one dimension of size unity.
297*    The two-dimensional array used in this case *must* be of the
298*    proper orientation:
299*      If the appropriate one-dimensional descriptor is DTYPEA=501
300*      (1 by P type), then the two dimensional descriptor must
301*      have a CTXT value that refers to a 1 by P BLACS grid;
302*      If the appropriate one-dimensional descriptor is DTYPEA=502
303*      (P by 1 type), then the two dimensional descriptor must
304*      have a CTXT value that refers to a P by 1 BLACS grid.
305*
306*
307*  Summary of allowed descriptors, types, and BLACS grids:
308*  DTYPE           501         502         1         1
309*  BLACS grid      1xP or Px1  1xP or Px1  1xP       Px1
310*  -----------------------------------------------------
311*  A               OK          OK          OK        NO
312*  B               NO          OK          NO        OK
313*
314*  Let A be a generic term for any 1D block cyclicly distributed array.
315*  Such a global array has an associated description vector DESCA.
316*  In the following comments, the character _ should be read as
317*  "of the global array".
318*
319*  NOTATION        STORED IN  EXPLANATION
320*  --------------- ---------- ------------------------------------------
321*  DTYPE_A(global) DESCA( 1 ) The descriptor type. For 1D grids,
322*                                TYPE_A = 501: 1-by-P grid.
323*                                TYPE_A = 502: P-by-1 grid.
324*  CTXT_A (global) DESCA( 2 ) The BLACS context handle, indicating
325*                                the BLACS process grid A is distribu-
326*                                ted over. The context itself is glo-
327*                                bal, but the handle (the integer
328*                                value) may vary.
329*  N_A    (global) DESCA( 3 ) The size of the array dimension being
330*                                distributed.
331*  NB_A   (global) DESCA( 4 ) The blocking factor used to distribute
332*                                the distributed dimension of the array.
333*  SRC_A  (global) DESCA( 5 ) The process row or column over which the
334*                                first row or column of the array
335*                                is distributed.
336*  Ignored         DESCA( 6 ) Ignored for tridiagonal matrices.
337*  Reserved        DESCA( 7 ) Reserved for future use.
338*
339*
340*
341*  =====================================================================
342*
343*  Code Developer: Andrew J. Cleary, University of Tennessee.
344*    Current address: Lawrence Livermore National Labs.
345*  This version released: August, 2001.
346*
347*  =====================================================================
348*
349*     ..
350*     .. Parameters ..
351      DOUBLE PRECISION   ONE, ZERO
352      PARAMETER          ( ONE = 1.0D+0 )
353      PARAMETER          ( ZERO = 0.0D+0 )
354      COMPLEX*16         CONE, CZERO
355      PARAMETER          ( CONE = ( 1.0D+0, 0.0D+0 ) )
356      PARAMETER          ( CZERO = ( 0.0D+0, 0.0D+0 ) )
357      INTEGER            INT_ONE
358      PARAMETER          ( INT_ONE = 1 )
359      INTEGER            DESCMULT, BIGNUM
360      PARAMETER          (DESCMULT = 100, BIGNUM = DESCMULT * DESCMULT)
361      INTEGER            BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
362     $                   LLD_, MB_, M_, NB_, N_, RSRC_
363      PARAMETER          ( BLOCK_CYCLIC_2D = 1, DLEN_ = 9, DTYPE_ = 1,
364     $                     CTXT_ = 2, M_ = 3, N_ = 4, MB_ = 5, NB_ = 6,
365     $                     RSRC_ = 7, CSRC_ = 8, LLD_ = 9 )
366*     ..
367*     .. Local Scalars ..
368      INTEGER            COMM_PROC, CSRC, FIRST_PROC, I, ICTXT,
369     $                   ICTXT_NEW, ICTXT_SAVE, IDUM3, INT_TEMP, JA_NEW,
370     $                   LAF_MIN, LEVEL_DIST, LLDA, MYCOL, MYROW,
371     $                   MY_NUM_COLS, NB, NP, NPCOL, NPROW, NP_SAVE,
372     $                   ODD_SIZE, PART_OFFSET, PART_SIZE, RETURN_CODE,
373     $                   STORE_N_A, TEMP, WORK_SIZE_MIN
374*     ..
375*     .. Local Arrays ..
376      INTEGER            DESCA_1XP( 7 ), PARAM_CHECK( 7, 3 )
377*     ..
378*     .. External Subroutines ..
379      EXTERNAL           BLACS_GET, BLACS_GRIDEXIT, BLACS_GRIDINFO,
380     $                   DESC_CONVERT, GLOBCHK, PXERBLA, RESHAPE, ZAXPY,
381     $                   ZGEMM, ZGERV2D, ZGESD2D, ZLAMOV, ZLATCPY,
382     $                   ZPBTRF, ZPOTRF, ZSYRK, ZTBTRS, ZTRMM, ZTRRV2D,
383     $                   ZTRSD2D, ZTRSM, ZTRTRS
384*     ..
385*     .. External Functions ..
386      LOGICAL            LSAME
387      INTEGER            NUMROC
388      EXTERNAL           LSAME, NUMROC
389*     ..
390*     .. Intrinsic Functions ..
391      INTRINSIC          ICHAR, MIN, MOD
392*     ..
393*     .. Executable Statements ..
394*
395*     Test the input parameters
396*
397      INFO = 0
398*
399*     Convert descriptor into standard form for easy access to
400*        parameters, check that grid is of right shape.
401*
402      DESCA_1XP( 1 ) = 501
403*
404      TEMP = DESCA( DTYPE_ )
405      IF( TEMP .EQ. 502 ) THEN
406*        Temporarily set the descriptor type to 1xP type
407         DESCA( DTYPE_ ) = 501
408      ENDIF
409*
410      CALL DESC_CONVERT( DESCA, DESCA_1XP, RETURN_CODE )
411*
412      DESCA( DTYPE_ ) = TEMP
413*
414      IF( RETURN_CODE .NE. 0) THEN
415         INFO = -( 5*100 + 2 )
416      ENDIF
417*
418*     Get values out of descriptor for use in code.
419*
420      ICTXT = DESCA_1XP( 2 )
421      CSRC = DESCA_1XP( 5 )
422      NB = DESCA_1XP( 4 )
423      LLDA = DESCA_1XP( 6 )
424      STORE_N_A = DESCA_1XP( 3 )
425*
426*     Get grid parameters
427*
428*
429      CALL BLACS_GRIDINFO( ICTXT, NPROW, NPCOL, MYROW, MYCOL )
430      NP = NPROW * NPCOL
431*
432*
433*
434      IF( LWORK .LT. -1) THEN
435         INFO = -9
436      ELSE IF ( LWORK .EQ. -1 ) THEN
437         IDUM3 = -1
438      ELSE
439         IDUM3 = 1
440      ENDIF
441*
442      IF( N .LT. 0 ) THEN
443         INFO = -1
444      ENDIF
445*
446      IF( N+JA-1 .GT. STORE_N_A ) THEN
447         INFO = -( 5*100 + 6 )
448      ENDIF
449*
450*     Argument checking that is specific to Divide & Conquer routine
451*
452      IF( NPROW .NE. 1 ) THEN
453         INFO = -( 5*100+2 )
454      ENDIF
455*
456      IF( N .GT. NP*NB-MOD( JA-1, NB )) THEN
457         INFO = -( 1 )
458         CALL PXERBLA( ICTXT,
459     $      'PZPTTRF, D&C alg.: only 1 block per proc',
460     $      -INFO )
461         RETURN
462      ENDIF
463*
464      IF((JA+N-1.GT.NB) .AND. ( NB.LT.2*INT_ONE )) THEN
465         INFO = -( 5*100+4 )
466         CALL PXERBLA( ICTXT,
467     $      'PZPTTRF, D&C alg.: NB too small',
468     $      -INFO )
469         RETURN
470      ENDIF
471*
472*
473*     Check auxiliary storage size
474*
475      LAF_MIN = (12*NPCOL + 3*NB)
476*
477      IF( LAF .LT. LAF_MIN ) THEN
478         INFO = -7
479*        put minimum value of laf into AF( 1 )
480         AF( 1 ) = LAF_MIN
481         CALL PXERBLA( ICTXT,
482     $      'PZPTTRF: auxiliary storage error ',
483     $      -INFO )
484         RETURN
485      ENDIF
486*
487*     Check worksize
488*
489      WORK_SIZE_MIN = 8*NPCOL
490*
491      WORK( 1 ) = WORK_SIZE_MIN
492*
493      IF( LWORK .LT. WORK_SIZE_MIN ) THEN
494         IF( LWORK .NE. -1 ) THEN
495           INFO = -9
496           CALL PXERBLA( ICTXT,
497     $      'PZPTTRF: worksize error ',
498     $      -INFO )
499         ENDIF
500         RETURN
501      ENDIF
502*
503*     Pack params and positions into arrays for global consistency check
504*
505      PARAM_CHECK(  7, 1 ) = DESCA(5)
506      PARAM_CHECK(  6, 1 ) = DESCA(4)
507      PARAM_CHECK(  5, 1 ) = DESCA(3)
508      PARAM_CHECK(  4, 1 ) = DESCA(1)
509      PARAM_CHECK(  3, 1 ) = JA
510      PARAM_CHECK(  2, 1 ) = N
511      PARAM_CHECK(  1, 1 ) = IDUM3
512*
513      PARAM_CHECK(  7, 2 ) = 505
514      PARAM_CHECK(  6, 2 ) = 504
515      PARAM_CHECK(  5, 2 ) = 503
516      PARAM_CHECK(  4, 2 ) = 501
517      PARAM_CHECK(  3, 2 ) = 4
518      PARAM_CHECK(  2, 2 ) = 1
519      PARAM_CHECK(  1, 2 ) = 9
520*
521*     Want to find errors with MIN( ), so if no error, set it to a big
522*     number. If there already is an error, multiply by the the
523*     descriptor multiplier.
524*
525      IF( INFO.GE.0 ) THEN
526         INFO = BIGNUM
527      ELSE IF( INFO.LT.-DESCMULT ) THEN
528         INFO = -INFO
529      ELSE
530         INFO = -INFO * DESCMULT
531      END IF
532*
533*     Check consistency across processors
534*
535      CALL GLOBCHK( ICTXT, 7, PARAM_CHECK, 7,
536     $              PARAM_CHECK( 1, 3 ), INFO )
537*
538*     Prepare output: set info = 0 if no error, and divide by DESCMULT
539*     if error is not in a descriptor entry.
540*
541      IF( INFO.EQ.BIGNUM ) THEN
542         INFO = 0
543      ELSE IF( MOD( INFO, DESCMULT ) .EQ. 0 ) THEN
544         INFO = -INFO / DESCMULT
545      ELSE
546         INFO = -INFO
547      END IF
548*
549      IF( INFO.LT.0 ) THEN
550         CALL PXERBLA( ICTXT, 'PZPTTRF', -INFO )
551         RETURN
552      END IF
553*
554*     Quick return if possible
555*
556      IF( N.EQ.0 )
557     $   RETURN
558*
559*
560*     Adjust addressing into matrix space to properly get into
561*        the beginning part of the relevant data
562*
563      PART_OFFSET = NB*( (JA-1)/(NPCOL*NB) )
564*
565      IF ( (MYCOL-CSRC) .LT. (JA-PART_OFFSET-1)/NB ) THEN
566         PART_OFFSET = PART_OFFSET + NB
567      ENDIF
568*
569      IF ( MYCOL .LT. CSRC ) THEN
570         PART_OFFSET = PART_OFFSET - NB
571      ENDIF
572*
573*     Form a new BLACS grid (the "standard form" grid) with only procs
574*        holding part of the matrix, of size 1xNP where NP is adjusted,
575*        starting at csrc=0, with JA modified to reflect dropped procs.
576*
577*     First processor to hold part of the matrix:
578*
579      FIRST_PROC = MOD( ( JA-1 )/NB+CSRC, NPCOL )
580*
581*     Calculate new JA one while dropping off unused processors.
582*
583      JA_NEW = MOD( JA-1, NB ) + 1
584*
585*     Save and compute new value of NP
586*
587      NP_SAVE = NP
588      NP = ( JA_NEW+N-2 )/NB + 1
589*
590*     Call utility routine that forms "standard-form" grid
591*
592      CALL RESHAPE( ICTXT, INT_ONE, ICTXT_NEW, INT_ONE,
593     $              FIRST_PROC, INT_ONE, NP )
594*
595*     Use new context from standard grid as context.
596*
597      ICTXT_SAVE = ICTXT
598      ICTXT = ICTXT_NEW
599      DESCA_1XP( 2 ) = ICTXT_NEW
600*
601*     Get information about new grid.
602*
603      CALL BLACS_GRIDINFO( ICTXT, NPROW, NPCOL, MYROW, MYCOL )
604*
605*     Drop out processors that do not have part of the matrix.
606*
607      IF( MYROW .LT. 0 ) THEN
608         GOTO 1234
609      ENDIF
610*
611*     ********************************
612*     Values reused throughout routine
613*
614*     User-input value of partition size
615*
616      PART_SIZE = NB
617*
618*     Number of columns in each processor
619*
620      MY_NUM_COLS = NUMROC( N, PART_SIZE, MYCOL, 0, NPCOL )
621*
622*     Offset in columns to beginning of main partition in each proc
623*
624      IF ( MYCOL .EQ. 0 ) THEN
625        PART_OFFSET = PART_OFFSET+MOD( JA_NEW-1, PART_SIZE )
626        MY_NUM_COLS = MY_NUM_COLS - MOD(JA_NEW-1, PART_SIZE )
627      ENDIF
628*
629*     Size of main (or odd) partition in each processor
630*
631      ODD_SIZE = MY_NUM_COLS
632      IF ( MYCOL .LT. NP-1 ) THEN
633         ODD_SIZE = ODD_SIZE - INT_ONE
634      ENDIF
635*
636*
637*       Zero out space for fillin
638*
639        DO 10  I=1, LAF_MIN
640          AF( I ) = CZERO
641   10   CONTINUE
642*
643*     Begin main code
644*
645*
646********************************************************************
647*       PHASE 1: Local computation phase.
648********************************************************************
649*
650*
651        IF ( MYCOL .LT. NP-1 ) THEN
652*         Transfer last triangle D_i of local matrix to next processor
653*         which needs it to calculate fillin due to factorization of
654*         its main (odd) block A_i.
655*         Overlap the send with the factorization of A_i.
656*
657          CALL ZTRSD2D( ICTXT, 'U', 'N', 1, 1,
658     $                  E( PART_OFFSET+ODD_SIZE+1 ), LLDA-1, 0,
659     $                  MYCOL+1 )
660*
661        ENDIF
662*
663*
664*       Factor main partition A_i = L_i {L_i}^C in each processor
665*       Or A_i = {U_i}^C {U_i} if E is the upper superdiagonal
666*
667        CALL ZPTTRF( ODD_SIZE, D( PART_OFFSET+1 ), E( PART_OFFSET+1 ),
668     $                    INFO )
669*
670        IF( INFO.NE.0 ) THEN
671           INFO = MYCOL+1
672           GOTO 1500
673        ENDIF
674*
675*
676        IF ( MYCOL .LT. NP-1 ) THEN
677*         Apply factorization to odd-even connection block B_i
678*
679*
680*         Perform the triangular system solve {L_i}{{B'}_i}^C = {B_i}^C
681*           by dividing B_i by diagonal element
682*
683          E( PART_OFFSET+ODD_SIZE ) = E( PART_OFFSET+ODD_SIZE )/
684     $                                D( PART_OFFSET+ODD_SIZE )
685*
686*
687*
688*         Compute contribution to diagonal block(s) of reduced system.
689*          {C'}_i = {C_i}-{{B'}_i}{{B'}_i}^C
690*
691          D( PART_OFFSET+ODD_SIZE+1 ) = D( PART_OFFSET+ODD_SIZE+1 )-
692     $       D( PART_OFFSET+ODD_SIZE )*DBLE( E( PART_OFFSET+ODD_SIZE )*
693     $       DCONJG( E( PART_OFFSET+ODD_SIZE ) ) )
694*
695        ENDIF
696*       End of "if ( MYCOL .lt. NP-1 )..." loop
697*
698*
699 1500   CONTINUE
700*       If the processor could not locally factor, it jumps here.
701*
702        IF ( MYCOL .NE. 0 ) THEN
703*
704*         Receive previously transmitted matrix section, which forms
705*         the right-hand-side for the triangular solve that calculates
706*         the "spike" fillin.
707*
708*
709          CALL ZTRRV2D( ICTXT, 'U', 'N', 1, 1, AF( 1 ), ODD_SIZE, 0,
710     $                  MYCOL-1 )
711*
712          IF (INFO.EQ.0) THEN
713*
714*         Calculate the "spike" fillin, ${L_i} {{G}_i}^C = {D_i}$ .
715*
716          CALL ZPTTRSV( 'L', 'N', ODD_SIZE, INT_ONE, D( PART_OFFSET+1 ),
717     $                  E( PART_OFFSET+1 ), AF( 1 ), ODD_SIZE, INFO )
718*
719*         Divide by D
720*
721        DO 20  I=1, ODD_SIZE
722            AF( I ) = AF( I )/D( PART_OFFSET+I )
723   20     CONTINUE
724*
725*
726*         Calculate the update block for previous proc, E_i = G_i{G_i}^C
727*
728*
729*         Since there is no element-by-element vector multiplication in
730*           the BLAS, this loop must be hardwired in without a BLAS call
731*
732          INT_TEMP = ODD_SIZE*INT_ONE+2+1
733          AF( INT_TEMP ) = 0
734*
735        DO 30  I=1, ODD_SIZE
736            AF( INT_TEMP ) = AF( INT_TEMP )-D( PART_OFFSET+I )*
737     $        ( AF( I )*DCONJG( AF( I ) ) )
738   30     CONTINUE
739*
740*
741*         Initiate send of E_i to previous processor to overlap
742*           with next computation.
743*
744          CALL ZGESD2D( ICTXT, INT_ONE, INT_ONE, AF( ODD_SIZE+3 ),
745     $                  INT_ONE, 0, MYCOL-1 )
746*
747*
748          IF ( MYCOL .LT. NP-1 ) THEN
749*
750*           Calculate off-diagonal block(s) of reduced system.
751*           Note: for ease of use in solution of reduced system, store
752*           L's off-diagonal block in conjugate transpose form.
753*           {F_i}^C =  {H_i}{{B'}_i}^C
754*
755            AF( ODD_SIZE+1 ) =
756     $                         - D( PART_OFFSET+ODD_SIZE )
757     $                         * DCONJG( E( PART_OFFSET+ODD_SIZE )
758     $                         * AF( ODD_SIZE ) )
759*
760*
761          ENDIF
762*
763        ENDIF
764*       End of "if ( MYCOL .ne. 0 )..."
765*
766        ENDIF
767*       End of "if (info.eq.0) then"
768*
769*
770*       Check to make sure no processors have found errors
771*
772        CALL IGAMX2D( ICTXT, 'A', ' ', 1, 1, INFO, 1, INFO, INFO,
773     $              -1, 0, 0 )
774*
775        IF( MYCOL.EQ.0 ) THEN
776           CALL IGEBS2D( ICTXT, 'A', ' ', 1, 1, INFO, 1 )
777        ELSE
778           CALL IGEBR2D( ICTXT, 'A', ' ', 1, 1, INFO, 1, 0, 0 )
779        ENDIF
780*
781        IF ( INFO.NE.0 ) THEN
782           GOTO 1000
783        ENDIF
784*       No errors found, continue
785*
786*
787********************************************************************
788*       PHASE 2: Formation and factorization of Reduced System.
789********************************************************************
790*
791*       Gather up local sections of reduced system
792*
793*
794*     The last processor does not participate in the factorization of
795*       the reduced system, having sent its E_i already.
796      IF( MYCOL .EQ. NPCOL-1 ) THEN
797        GOTO 14
798      ENDIF
799*
800*       Initiate send of off-diag block(s) to overlap with next part.
801*       Off-diagonal block needed on neighboring processor to start
802*       algorithm.
803*
804        IF( (MOD( MYCOL+1, 2 ) .EQ. 0) .AND. ( MYCOL .GT. 0 ) ) THEN
805*
806          CALL ZGESD2D( ICTXT, INT_ONE, INT_ONE,
807     $                       AF( ODD_SIZE+1 ),
808     $                       INT_ONE, 0, MYCOL-1 )
809*
810        ENDIF
811*
812*       Copy last diagonal block into AF storage for subsequent
813*         operations.
814*
815        AF( ODD_SIZE+2 ) =
816     $        DCMPLX( D( PART_OFFSET+ODD_SIZE+1 ) )
817*
818*       Receive cont. to diagonal block that is stored on this proc.
819*
820        IF( MYCOL.LT. NPCOL-1 ) THEN
821*
822           CALL ZGERV2D( ICTXT, INT_ONE, INT_ONE,
823     $               AF( ODD_SIZE+2+1 ),
824     $               INT_ONE, 0,
825     $               MYCOL+1 )
826*
827*          Add contribution to diagonal block
828*
829           AF( ODD_SIZE+2 ) = AF( ODD_SIZE+2 )+AF( ODD_SIZE+3 )
830*
831        ENDIF
832*
833*
834*       *************************************
835*       Modification Loop
836*
837*       The distance for sending and receiving for each level starts
838*         at 1 for the first level.
839        LEVEL_DIST = 1
840*
841*       Do until this proc is needed to modify other procs' equations
842*
843   12   CONTINUE
844        IF( MOD( (MYCOL+1)/LEVEL_DIST, 2) .NE. 0 ) GOTO 11
845*
846*         Receive and add contribution to diagonal block from the left
847*
848          IF( MYCOL-LEVEL_DIST .GE. 0 ) THEN
849            CALL ZGERV2D( ICTXT, INT_ONE, INT_ONE, WORK( 1 ),
850     $                     INT_ONE, 0, MYCOL-LEVEL_DIST )
851*
852            AF( ODD_SIZE+2 ) = AF( ODD_SIZE+2 )+WORK( 1 )
853*
854          ENDIF
855*
856*         Receive and add contribution to diagonal block from the right
857*
858          IF( MYCOL+LEVEL_DIST .LT. NPCOL-1 ) THEN
859            CALL ZGERV2D( ICTXT, INT_ONE, INT_ONE, WORK( 1 ),
860     $                        INT_ONE, 0, MYCOL+LEVEL_DIST )
861*
862            AF( ODD_SIZE+2 ) = AF( ODD_SIZE+2 )+WORK( 1 )
863*
864          ENDIF
865*
866          LEVEL_DIST = LEVEL_DIST*2
867*
868        GOTO 12
869   11   CONTINUE
870*       [End of GOTO Loop]
871*
872*
873*       *********************************
874*       Calculate and use this proc's blocks to modify other procs'...
875        IF( AF( ODD_SIZE+2 ) .EQ. CZERO ) THEN
876               INFO = NPCOL + MYCOL
877        ENDIF
878*
879*       ****************************************************************
880*       Receive offdiagonal block from processor to right.
881*         If this is the first group of processors, the receive comes
882*         from a different processor than otherwise.
883*
884        IF( LEVEL_DIST .EQ. 1 )THEN
885          COMM_PROC = MYCOL + 1
886*
887*           Move block into place that it will be expected to be for
888*             calcs.
889*
890          AF( ODD_SIZE+3 ) = AF( ODD_SIZE+1 )
891*
892        ELSE
893          COMM_PROC = MYCOL + LEVEL_DIST/2
894        ENDIF
895*
896        IF( MYCOL/LEVEL_DIST .LE. (NPCOL-1)/LEVEL_DIST-2 )THEN
897*
898          CALL ZGERV2D( ICTXT, INT_ONE, INT_ONE,
899     $        AF( ODD_SIZE+1 ),
900     $                       INT_ONE, 0, COMM_PROC )
901*
902          IF( INFO .EQ. 0 ) THEN
903*
904*
905*         Modify upper off_diagonal block with diagonal block
906*
907*
908          AF( ODD_SIZE+1 ) = AF( ODD_SIZE+1 )/AF( ODD_SIZE+2 )
909*
910          ENDIF
911*         End of "if ( info.eq.0 ) then"
912*
913*         Calculate contribution from this block to next diagonal block
914*
915          WORK( 1 ) = -ONE*AF( ODD_SIZE+1 )*AF( ODD_SIZE+2 )
916     $                  *DCONJG( AF( ODD_SIZE+1 ) )
917*
918*         Send contribution to diagonal block's owning processor.
919*
920          CALL ZGESD2D( ICTXT, INT_ONE, INT_ONE, WORK( 1 ), INT_ONE,
921     $                     0, MYCOL+LEVEL_DIST )
922*
923        ENDIF
924*       End of "if( mycol/level_dist .le. (npcol-1)/level_dist-2 )..."
925*
926*
927*       ****************************************************************
928*       Receive off_diagonal block from left and use to finish with this
929*         processor.
930*
931        IF( (MYCOL/LEVEL_DIST .GT. 0 ).AND.
932     $      ( MYCOL/LEVEL_DIST .LE. (NPCOL-1)/LEVEL_DIST-1 ) ) THEN
933*
934          IF( LEVEL_DIST .GT. 1)THEN
935*
936*           Receive offdiagonal block(s) from proc level_dist/2 to the
937*           left
938*
939           CALL ZGERV2D( ICTXT, INT_ONE, INT_ONE,
940     $            AF( ODD_SIZE+2+1 ),
941     $            INT_ONE, 0, MYCOL-LEVEL_DIST/2 )
942*
943          ENDIF
944*
945*
946          IF( INFO.EQ.0 ) THEN
947*
948*         Use diagonal block(s) to modify this offdiagonal block
949*
950          AF( ODD_SIZE+3 ) = ( AF( ODD_SIZE+3 ) )
951     $                         /AF( ODD_SIZE+2 )
952*
953          ENDIF
954*         End of "if( info.eq.0 ) then"
955*
956*         Use offdiag block(s) to calculate modification to diag block
957*           of processor to the left
958*
959          WORK( 1 ) = -ONE*AF( ODD_SIZE+3 )*AF( ODD_SIZE+2 )
960     $                  *DCONJG( AF( ODD_SIZE+3 ) )
961*
962*         Send contribution to diagonal block's owning processor.
963*
964          CALL ZGESD2D( ICTXT, INT_ONE, INT_ONE, WORK( 1 ), INT_ONE,
965     $                     0, MYCOL-LEVEL_DIST )
966*
967*         *******************************************************
968*
969          IF( MYCOL/LEVEL_DIST .LE. (NPCOL-1)/LEVEL_DIST-2 ) THEN
970*
971*           Decide which processor offdiagonal block(s) goes to
972*
973            IF( ( MOD( MYCOL/( 2*LEVEL_DIST ),2 )) .EQ.0 ) THEN
974              COMM_PROC = MYCOL + LEVEL_DIST
975            ELSE
976              COMM_PROC = MYCOL - LEVEL_DIST
977            ENDIF
978*
979*           Use offdiagonal blocks to calculate offdiag
980*             block to send to neighboring processor. Depending
981*             on circumstances, may need to transpose the matrix.
982*
983              WORK( 1 ) = -ONE*AF( ODD_SIZE+3 )
984     $                  * AF( ODD_SIZE+2 )
985     $                  * AF( ODD_SIZE+1 )
986*
987*           Send contribution to offdiagonal block's owning processor.
988*
989            CALL ZGESD2D( ICTXT, INT_ONE, INT_ONE, WORK( 1 ), INT_ONE,
990     $                         0, COMM_PROC )
991*
992          ENDIF
993*
994        ENDIF
995*       End of "if( mycol/level_dist.le. (npcol-1)/level_dist -1 )..."
996*
997   14   CONTINUE
998*
999*
1000 1000 CONTINUE
1001*
1002*
1003*     Free BLACS space used to hold standard-form grid.
1004*
1005      IF( ICTXT_SAVE .NE. ICTXT_NEW ) THEN
1006         CALL BLACS_GRIDEXIT( ICTXT_NEW )
1007      ENDIF
1008*
1009 1234 CONTINUE
1010*
1011*     Restore saved input parameters
1012*
1013      ICTXT = ICTXT_SAVE
1014      NP = NP_SAVE
1015*
1016*     Output minimum worksize
1017*
1018      WORK( 1 ) = WORK_SIZE_MIN
1019*
1020*         Make INFO consistent across processors
1021*
1022          CALL IGAMX2D( ICTXT, 'A', ' ', 1, 1, INFO, 1, INFO, INFO,
1023     $              -1, 0, 0 )
1024*
1025          IF( MYCOL.EQ.0 ) THEN
1026             CALL IGEBS2D( ICTXT, 'A', ' ', 1, 1, INFO, 1 )
1027          ELSE
1028             CALL IGEBR2D( ICTXT, 'A', ' ', 1, 1, INFO, 1, 0, 0 )
1029          ENDIF
1030*
1031*
1032      RETURN
1033*
1034*     End of PZPTTRF
1035*
1036      END
1037