1      SUBROUTINE PDPTTRSV( UPLO, N, NRHS, D, E, JA, DESCA, B, IB, DESCB,
2     $                     AF, LAF, WORK, LWORK, INFO )
3*
4*  -- ScaLAPACK routine (version 1.7) --
5*     University of Tennessee, Knoxville, Oak Ridge National Laboratory,
6*     and University of California, Berkeley.
7*     April 3, 2000
8*
9*     .. Scalar Arguments ..
10      CHARACTER          UPLO
11      INTEGER            IB, INFO, JA, LAF, LWORK, N, NRHS
12*     ..
13*     .. Array Arguments ..
14      INTEGER            DESCA( * ), DESCB( * )
15      DOUBLE PRECISION   AF( * ), B( * ), D( * ), E( * ), WORK( * )
16*     ..
17*
18*
19*  Purpose
20*  =======
21*
22*  PDPTTRSV solves a tridiagonal triangular system of linear equations
23*
24*                      A(1:N, JA:JA+N-1) * X = B(IB:IB+N-1, 1:NRHS)
25*                                 or
26*                      A(1:N, JA:JA+N-1)^T * X = B(IB:IB+N-1, 1:NRHS)
27*
28*  where A(1:N, JA:JA+N-1) is a tridiagonal
29*  triangular matrix factor produced by the
30*  Cholesky factorization code PDPTTRF
31*  and is stored in A(1:N,JA:JA+N-1) and AF.
32*  The matrix stored in A(1:N, JA:JA+N-1) is either
33*  upper or lower triangular according to UPLO,
34*  and the choice of solving A(1:N, JA:JA+N-1) or A(1:N, JA:JA+N-1)^T
35*  is dictated by the user by the parameter TRANS.
36*
37*  Routine PDPTTRF MUST be called first.
38*
39*  =====================================================================
40*
41*  Arguments
42*  =========
43*
44*  UPLO    (global input) CHARACTER
45*          = 'U':  Upper triangle of A(1:N, JA:JA+N-1) is stored;
46*          = 'L':  Lower triangle of A(1:N, JA:JA+N-1) is stored.
47*
48*  N       (global input) INTEGER
49*          The number of rows and columns to be operated on, i.e. the
50*          order of the distributed submatrix A(1:N, JA:JA+N-1). N >= 0.
51*
52*  NRHS    (global input) INTEGER
53*          The number of right hand sides, i.e., the number of columns
54*          of the distributed submatrix B(IB:IB+N-1, 1:NRHS).
55*          NRHS >= 0.
56*
57*  D       (local input/local output) DOUBLE PRECISION pointer to local
58*          part of global vector storing the main diagonal of the
59*          matrix.
60*          On exit, this array contains information containing the
61*            factors of the matrix.
62*          Must be of size >= DESCA( NB_ ).
63*
64*  E       (local input/local output) DOUBLE PRECISION pointer to local
65*          part of global vector storing the upper diagonal of the
66*          matrix. Globally, DU(n) is not referenced, and DU must be
67*          aligned with D.
68*          On exit, this array contains information containing the
69*            factors of the matrix.
70*          Must be of size >= DESCA( NB_ ).
71*
72*  JA      (global input) INTEGER
73*          The index in the global array A that points to the start of
74*          the matrix to be operated on (which may be either all of A
75*          or a submatrix of A).
76*
77*  DESCA   (global and local input) INTEGER array of dimension DLEN.
78*          if 1D type (DTYPE_A=501 or 502), DLEN >= 7;
79*          if 2D type (DTYPE_A=1), DLEN >= 9.
80*          The array descriptor for the distributed matrix A.
81*          Contains information of mapping of A to memory. Please
82*          see NOTES below for full description and options.
83*
84*  B       (local input/local output) DOUBLE PRECISION pointer into
85*          local memory to an array of local lead dimension lld_b>=NB.
86*          On entry, this array contains the
87*          the local pieces of the right hand sides
88*          B(IB:IB+N-1, 1:NRHS).
89*          On exit, this contains the local piece of the solutions
90*          distributed matrix X.
91*
92*  IB      (global input) INTEGER
93*          The row index in the global array B that points to the first
94*          row of the matrix to be operated on (which may be either
95*          all of B or a submatrix of B).
96*
97*  DESCB   (global and local input) INTEGER array of dimension DLEN.
98*          if 1D type (DTYPE_B=502), DLEN >=7;
99*          if 2D type (DTYPE_B=1), DLEN >= 9.
100*          The array descriptor for the distributed matrix B.
101*          Contains information of mapping of B to memory. Please
102*          see NOTES below for full description and options.
103*
104*  AF      (local output) DOUBLE PRECISION array, dimension LAF.
105*          Auxiliary Fillin Space.
106*          Fillin is created during the factorization routine
107*          PDPTTRF and this is stored in AF. If a linear system
108*          is to be solved using PDPTTRS after the factorization
109*          routine, AF *must not be altered* after the factorization.
110*
111*  LAF     (local input) INTEGER
112*          Size of user-input Auxiliary Fillin space AF. Must be >=
113*          (NB+2)
114*          If LAF is not large enough, an error code will be returned
115*          and the minimum acceptable size will be returned in AF( 1 )
116*
117*  WORK    (local workspace/local output)
118*          DOUBLE PRECISION temporary workspace. This space may
119*          be overwritten in between calls to routines. WORK must be
120*          the size given in LWORK.
121*          On exit, WORK( 1 ) contains the minimal LWORK.
122*
123*  LWORK   (local input or global input) INTEGER
124*          Size of user-input workspace WORK.
125*          If LWORK is too small, the minimal acceptable size will be
126*          returned in WORK(1) and an error code is returned. LWORK>=
127*          (10+2*min(100,NRHS))*NPCOL+4*NRHS
128*
129*  INFO    (local output) INTEGER
130*          = 0:  successful exit
131*          < 0:  If the i-th argument is an array and the j-entry had
132*                an illegal value, then INFO = -(i*100+j), if the i-th
133*                argument is a scalar and had an illegal value, then
134*                INFO = -i.
135*
136*  =====================================================================
137*
138*
139*  Restrictions
140*  ============
141*
142*  The following are restrictions on the input parameters. Some of these
143*    are temporary and will be removed in future releases, while others
144*    may reflect fundamental technical limitations.
145*
146*    Non-cyclic restriction: VERY IMPORTANT!
147*      P*NB>= mod(JA-1,NB)+N.
148*      The mapping for matrices must be blocked, reflecting the nature
149*      of the divide and conquer algorithm as a task-parallel algorithm.
150*      This formula in words is: no processor may have more than one
151*      chunk of the matrix.
152*
153*    Blocksize cannot be too small:
154*      If the matrix spans more than one processor, the following
155*      restriction on NB, the size of each block on each processor,
156*      must hold:
157*      NB >= 2
158*      The bulk of parallel computation is done on the matrix of size
159*      O(NB) on each processor. If this is too small, divide and conquer
160*      is a poor choice of algorithm.
161*
162*    Submatrix reference:
163*      JA = IB
164*      Alignment restriction that prevents unnecessary communication.
165*
166*
167*  =====================================================================
168*
169*
170*  Notes
171*  =====
172*
173*  If the factorization routine and the solve routine are to be called
174*    separately (to solve various sets of righthand sides using the same
175*    coefficient matrix), the auxiliary space AF *must not be altered*
176*    between calls to the factorization routine and the solve routine.
177*
178*  The best algorithm for solving banded and tridiagonal linear systems
179*    depends on a variety of parameters, especially the bandwidth.
180*    Currently, only algorithms designed for the case N/P >> bw are
181*    implemented. These go by many names, including Divide and Conquer,
182*    Partitioning, domain decomposition-type, etc.
183*    For tridiagonal matrices, it is obvious: N/P >> bw(=1), and so D&C
184*    algorithms are the appropriate choice.
185*
186*  Algorithm description: Divide and Conquer
187*
188*    The Divide and Conqer algorithm assumes the matrix is narrowly
189*      banded compared with the number of equations. In this situation,
190*      it is best to distribute the input matrix A one-dimensionally,
191*      with columns atomic and rows divided amongst the processes.
192*      The basic algorithm divides the tridiagonal matrix up into
193*      P pieces with one stored on each processor,
194*      and then proceeds in 2 phases for the factorization or 3 for the
195*      solution of a linear system.
196*      1) Local Phase:
197*         The individual pieces are factored independently and in
198*         parallel. These factors are applied to the matrix creating
199*         fillin, which is stored in a non-inspectable way in auxiliary
200*         space AF. Mathematically, this is equivalent to reordering
201*         the matrix A as P A P^T and then factoring the principal
202*         leading submatrix of size equal to the sum of the sizes of
203*         the matrices factored on each processor. The factors of
204*         these submatrices overwrite the corresponding parts of A
205*         in memory.
206*      2) Reduced System Phase:
207*         A small ((P-1)) system is formed representing
208*         interaction of the larger blocks, and is stored (as are its
209*         factors) in the space AF. A parallel Block Cyclic Reduction
210*         algorithm is used. For a linear system, a parallel front solve
211*         followed by an analagous backsolve, both using the structure
212*         of the factored matrix, are performed.
213*      3) Backsubsitution Phase:
214*         For a linear system, a local backsubstitution is performed on
215*         each processor in parallel.
216*
217*
218*  Descriptors
219*  ===========
220*
221*  Descriptors now have *types* and differ from ScaLAPACK 1.0.
222*
223*  Note: tridiagonal codes can use either the old two dimensional
224*    or new one-dimensional descriptors, though the processor grid in
225*    both cases *must be one-dimensional*. We describe both types below.
226*
227*  Each global data object is described by an associated description
228*  vector.  This vector stores the information required to establish
229*  the mapping between an object element and its corresponding process
230*  and memory location.
231*
232*  Let A be a generic term for any 2D block cyclicly distributed array.
233*  Such a global array has an associated description vector DESCA.
234*  In the following comments, the character _ should be read as
235*  "of the global array".
236*
237*  NOTATION        STORED IN      EXPLANATION
238*  --------------- -------------- --------------------------------------
239*  DTYPE_A(global) DESCA( DTYPE_ )The descriptor type.  In this case,
240*                                 DTYPE_A = 1.
241*  CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
242*                                 the BLACS process grid A is distribu-
243*                                 ted over. The context itself is glo-
244*                                 bal, but the handle (the integer
245*                                 value) may vary.
246*  M_A    (global) DESCA( M_ )    The number of rows in the global
247*                                 array A.
248*  N_A    (global) DESCA( N_ )    The number of columns in the global
249*                                 array A.
250*  MB_A   (global) DESCA( MB_ )   The blocking factor used to distribute
251*                                 the rows of the array.
252*  NB_A   (global) DESCA( NB_ )   The blocking factor used to distribute
253*                                 the columns of the array.
254*  RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
255*                                 row of the array A is distributed.
256*  CSRC_A (global) DESCA( CSRC_ ) The process column over which the
257*                                 first column of the array A is
258*                                 distributed.
259*  LLD_A  (local)  DESCA( LLD_ )  The leading dimension of the local
260*                                 array.  LLD_A >= MAX(1,LOCr(M_A)).
261*
262*  Let K be the number of rows or columns of a distributed matrix,
263*  and assume that its process grid has dimension p x q.
264*  LOCr( K ) denotes the number of elements of K that a process
265*  would receive if K were distributed over the p processes of its
266*  process column.
267*  Similarly, LOCc( K ) denotes the number of elements of K that a
268*  process would receive if K were distributed over the q processes of
269*  its process row.
270*  The values of LOCr() and LOCc() may be determined via a call to the
271*  ScaLAPACK tool function, NUMROC:
272*          LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
273*          LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
274*  An upper bound for these quantities may be computed by:
275*          LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
276*          LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
277*
278*
279*  One-dimensional descriptors:
280*
281*  One-dimensional descriptors are a new addition to ScaLAPACK since
282*    version 1.0. They simplify and shorten the descriptor for 1D
283*    arrays.
284*
285*  Since ScaLAPACK supports two-dimensional arrays as the fundamental
286*    object, we allow 1D arrays to be distributed either over the
287*    first dimension of the array (as if the grid were P-by-1) or the
288*    2nd dimension (as if the grid were 1-by-P). This choice is
289*    indicated by the descriptor type (501 or 502)
290*    as described below.
291*    However, for tridiagonal matrices, since the objects being
292*    distributed are the individual vectors storing the diagonals, we
293*    have adopted the convention that both the P-by-1 descriptor and
294*    the 1-by-P descriptor are allowed and are equivalent for
295*    tridiagonal matrices. Thus, for tridiagonal matrices,
296*    DTYPE_A = 501 or 502 can be used interchangeably
297*    without any other change.
298*  We require that the distributed vectors storing the diagonals of a
299*    tridiagonal matrix be aligned with each other. Because of this, a
300*    single descriptor, DESCA, serves to describe the distribution of
301*    of all diagonals simultaneously.
302*
303*    IMPORTANT NOTE: the actual BLACS grid represented by the
304*    CTXT entry in the descriptor may be *either*  P-by-1 or 1-by-P
305*    irrespective of which one-dimensional descriptor type
306*    (501 or 502) is input.
307*    This routine will interpret the grid properly either way.
308*    ScaLAPACK routines *do not support intercontext operations* so that
309*    the grid passed to a single ScaLAPACK routine *must be the same*
310*    for all array descriptors passed to that routine.
311*
312*    NOTE: In all cases where 1D descriptors are used, 2D descriptors
313*    may also be used, since a one-dimensional array is a special case
314*    of a two-dimensional array with one dimension of size unity.
315*    The two-dimensional array used in this case *must* be of the
316*    proper orientation:
317*      If the appropriate one-dimensional descriptor is DTYPEA=501
318*      (1 by P type), then the two dimensional descriptor must
319*      have a CTXT value that refers to a 1 by P BLACS grid;
320*      If the appropriate one-dimensional descriptor is DTYPEA=502
321*      (P by 1 type), then the two dimensional descriptor must
322*      have a CTXT value that refers to a P by 1 BLACS grid.
323*
324*
325*  Summary of allowed descriptors, types, and BLACS grids:
326*  DTYPE           501         502         1         1
327*  BLACS grid      1xP or Px1  1xP or Px1  1xP       Px1
328*  -----------------------------------------------------
329*  A               OK          OK          OK        NO
330*  B               NO          OK          NO        OK
331*
332*  Note that a consequence of this chart is that it is not possible
333*    for *both* DTYPE_A and DTYPE_B to be 2D_type(1), as these lead
334*    to opposite requirements for the orientation of the BLACS grid,
335*    and as noted before, the *same* BLACS context must be used in
336*    all descriptors in a single ScaLAPACK subroutine call.
337*
338*  Let A be a generic term for any 1D block cyclicly distributed array.
339*  Such a global array has an associated description vector DESCA.
340*  In the following comments, the character _ should be read as
341*  "of the global array".
342*
343*  NOTATION        STORED IN  EXPLANATION
344*  --------------- ---------- ------------------------------------------
345*  DTYPE_A(global) DESCA( 1 ) The descriptor type. For 1D grids,
346*                                TYPE_A = 501: 1-by-P grid.
347*                                TYPE_A = 502: P-by-1 grid.
348*  CTXT_A (global) DESCA( 2 ) The BLACS context handle, indicating
349*                                the BLACS process grid A is distribu-
350*                                ted over. The context itself is glo-
351*                                bal, but the handle (the integer
352*                                value) may vary.
353*  N_A    (global) DESCA( 3 ) The size of the array dimension being
354*                                distributed.
355*  NB_A   (global) DESCA( 4 ) The blocking factor used to distribute
356*                                the distributed dimension of the array.
357*  SRC_A  (global) DESCA( 5 ) The process row or column over which the
358*                                first row or column of the array
359*                                is distributed.
360*  Ignored         DESCA( 6 ) Ignored for tridiagonal matrices.
361*  Reserved        DESCA( 7 ) Reserved for future use.
362*
363*
364*
365*  =====================================================================
366*
367*  Code Developer: Andrew J. Cleary, University of Tennessee.
368*    Current address: Lawrence Livermore National Labs.
369*
370*  =====================================================================
371*
372*     .. Parameters ..
373      DOUBLE PRECISION   ONE
374      PARAMETER          ( ONE = 1.0D+0 )
375      DOUBLE PRECISION   ZERO
376      PARAMETER          ( ZERO = 0.0D+0 )
377      INTEGER            INT_ONE
378      PARAMETER          ( INT_ONE = 1 )
379      INTEGER            DESCMULT, BIGNUM
380      PARAMETER          ( DESCMULT = 100, BIGNUM = DESCMULT*DESCMULT )
381      INTEGER            BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
382     $                   LLD_, MB_, M_, NB_, N_, RSRC_
383      PARAMETER          ( BLOCK_CYCLIC_2D = 1, DLEN_ = 9, DTYPE_ = 1,
384     $                     CTXT_ = 2, M_ = 3, N_ = 4, MB_ = 5, NB_ = 6,
385     $                     RSRC_ = 7, CSRC_ = 8, LLD_ = 9 )
386*     ..
387*     .. Local Scalars ..
388      INTEGER            CSRC, FIRST_PROC, ICTXT, ICTXT_NEW, ICTXT_SAVE,
389     $                   IDUM1, IDUM3, JA_NEW, LEVEL_DIST, LLDA, LLDB,
390     $                   MYCOL, MYROW, MY_NUM_COLS, NB, NP, NPCOL,
391     $                   NPROW, NP_SAVE, ODD_SIZE, PART_OFFSET,
392     $                   PART_SIZE, RETURN_CODE, STORE_M_B, STORE_N_A,
393     $                   TEMP, WORK_SIZE_MIN
394*     ..
395*     .. Local Arrays ..
396      INTEGER            DESCA_1XP( 7 ), DESCB_PX1( 7 ),
397     $                   PARAM_CHECK( 15, 3 )
398*     ..
399*     .. External Subroutines ..
400      EXTERNAL           BLACS_GRIDEXIT, BLACS_GRIDINFO, DAXPY,
401     $                   DESC_CONVERT, DGEMM, DGERV2D, DGESD2D, DMATADD,
402     $                   DPTTRSV, DTRTRS, GLOBCHK, PXERBLA, RESHAPE
403*     ..
404*     .. External Functions ..
405      LOGICAL            LSAME
406      INTEGER            NUMROC
407      EXTERNAL           LSAME, NUMROC
408*     ..
409*     .. Intrinsic Functions ..
410      INTRINSIC          ICHAR, MOD
411*     ..
412*     .. Executable Statements ..
413*
414*     Test the input parameters
415*
416      INFO = 0
417*
418*     Convert descriptor into standard form for easy access to
419*        parameters, check that grid is of right shape.
420*
421      DESCA_1XP( 1 ) = 501
422      DESCB_PX1( 1 ) = 502
423*
424      TEMP = DESCA( DTYPE_ )
425      IF( TEMP.EQ.502 ) THEN
426*        Temporarily set the descriptor type to 1xP type
427         DESCA( DTYPE_ ) = 501
428      END IF
429*
430      CALL DESC_CONVERT( DESCA, DESCA_1XP, RETURN_CODE )
431*
432      DESCA( DTYPE_ ) = TEMP
433*
434      IF( RETURN_CODE.NE.0 ) THEN
435         INFO = -( 7*100+2 )
436      END IF
437*
438      CALL DESC_CONVERT( DESCB, DESCB_PX1, RETURN_CODE )
439*
440      IF( RETURN_CODE.NE.0 ) THEN
441         INFO = -( 10*100+2 )
442      END IF
443*
444*     Consistency checks for DESCA and DESCB.
445*
446*     Context must be the same
447      IF( DESCA_1XP( 2 ).NE.DESCB_PX1( 2 ) ) THEN
448         INFO = -( 10*100+2 )
449      END IF
450*
451*        These are alignment restrictions that may or may not be removed
452*        in future releases. -Andy Cleary, April 14, 1996.
453*
454*     Block sizes must be the same
455      IF( DESCA_1XP( 4 ).NE.DESCB_PX1( 4 ) ) THEN
456         INFO = -( 10*100+4 )
457      END IF
458*
459*     Source processor must be the same
460*
461      IF( DESCA_1XP( 5 ).NE.DESCB_PX1( 5 ) ) THEN
462         INFO = -( 10*100+5 )
463      END IF
464*
465*     Get values out of descriptor for use in code.
466*
467      ICTXT = DESCA_1XP( 2 )
468      CSRC = DESCA_1XP( 5 )
469      NB = DESCA_1XP( 4 )
470      LLDA = DESCA_1XP( 6 )
471      STORE_N_A = DESCA_1XP( 3 )
472      LLDB = DESCB_PX1( 6 )
473      STORE_M_B = DESCB_PX1( 3 )
474*
475*     Get grid parameters
476*
477*
478      CALL BLACS_GRIDINFO( ICTXT, NPROW, NPCOL, MYROW, MYCOL )
479      NP = NPROW*NPCOL
480*
481*
482*
483      IF( LSAME( UPLO, 'U' ) ) THEN
484         IDUM1 = ICHAR( 'U' )
485      ELSE IF( LSAME( UPLO, 'L' ) ) THEN
486         IDUM1 = ICHAR( 'L' )
487      ELSE
488         INFO = -1
489      END IF
490*
491      IF( LWORK.LT.-1 ) THEN
492         INFO = -14
493      ELSE IF( LWORK.EQ.-1 ) THEN
494         IDUM3 = -1
495      ELSE
496         IDUM3 = 1
497      END IF
498*
499      IF( N.LT.0 ) THEN
500         INFO = -2
501      END IF
502*
503      IF( N+JA-1.GT.STORE_N_A ) THEN
504         INFO = -( 7*100+6 )
505      END IF
506*
507      IF( N+IB-1.GT.STORE_M_B ) THEN
508         INFO = -( 10*100+3 )
509      END IF
510*
511      IF( LLDB.LT.NB ) THEN
512         INFO = -( 10*100+6 )
513      END IF
514*
515      IF( NRHS.LT.0 ) THEN
516         INFO = -3
517      END IF
518*
519*     Current alignment restriction
520*
521      IF( JA.NE.IB ) THEN
522         INFO = -6
523      END IF
524*
525*     Argument checking that is specific to Divide & Conquer routine
526*
527      IF( NPROW.NE.1 ) THEN
528         INFO = -( 7*100+2 )
529      END IF
530*
531      IF( N.GT.NP*NB-MOD( JA-1, NB ) ) THEN
532         INFO = -( 2 )
533         CALL PXERBLA( ICTXT,
534     $                 'PDPTTRSV, D&C alg.: only 1 block per proc',
535     $                 -INFO )
536         RETURN
537      END IF
538*
539      IF( ( JA+N-1.GT.NB ) .AND. ( NB.LT.2*INT_ONE ) ) THEN
540         INFO = -( 7*100+4 )
541         CALL PXERBLA( ICTXT, 'PDPTTRSV, D&C alg.: NB too small',
542     $                 -INFO )
543         RETURN
544      END IF
545*
546*
547      WORK_SIZE_MIN = INT_ONE*NRHS
548*
549      WORK( 1 ) = WORK_SIZE_MIN
550*
551      IF( LWORK.LT.WORK_SIZE_MIN ) THEN
552         IF( LWORK.NE.-1 ) THEN
553            INFO = -14
554            CALL PXERBLA( ICTXT, 'PDPTTRSV: worksize error', -INFO )
555         END IF
556         RETURN
557      END IF
558*
559*     Pack params and positions into arrays for global consistency check
560*
561      PARAM_CHECK( 15, 1 ) = DESCB( 5 )
562      PARAM_CHECK( 14, 1 ) = DESCB( 4 )
563      PARAM_CHECK( 13, 1 ) = DESCB( 3 )
564      PARAM_CHECK( 12, 1 ) = DESCB( 2 )
565      PARAM_CHECK( 11, 1 ) = DESCB( 1 )
566      PARAM_CHECK( 10, 1 ) = IB
567      PARAM_CHECK( 9, 1 ) = DESCA( 5 )
568      PARAM_CHECK( 8, 1 ) = DESCA( 4 )
569      PARAM_CHECK( 7, 1 ) = DESCA( 3 )
570      PARAM_CHECK( 6, 1 ) = DESCA( 1 )
571      PARAM_CHECK( 5, 1 ) = JA
572      PARAM_CHECK( 4, 1 ) = NRHS
573      PARAM_CHECK( 3, 1 ) = N
574      PARAM_CHECK( 2, 1 ) = IDUM3
575      PARAM_CHECK( 1, 1 ) = IDUM1
576*
577      PARAM_CHECK( 15, 2 ) = 1005
578      PARAM_CHECK( 14, 2 ) = 1004
579      PARAM_CHECK( 13, 2 ) = 1003
580      PARAM_CHECK( 12, 2 ) = 1002
581      PARAM_CHECK( 11, 2 ) = 1001
582      PARAM_CHECK( 10, 2 ) = 9
583      PARAM_CHECK( 9, 2 ) = 705
584      PARAM_CHECK( 8, 2 ) = 704
585      PARAM_CHECK( 7, 2 ) = 703
586      PARAM_CHECK( 6, 2 ) = 701
587      PARAM_CHECK( 5, 2 ) = 6
588      PARAM_CHECK( 4, 2 ) = 3
589      PARAM_CHECK( 3, 2 ) = 2
590      PARAM_CHECK( 2, 2 ) = 14
591      PARAM_CHECK( 1, 2 ) = 1
592*
593*     Want to find errors with MIN( ), so if no error, set it to a big
594*     number. If there already is an error, multiply by the the
595*     descriptor multiplier.
596*
597      IF( INFO.GE.0 ) THEN
598         INFO = BIGNUM
599      ELSE IF( INFO.LT.-DESCMULT ) THEN
600         INFO = -INFO
601      ELSE
602         INFO = -INFO*DESCMULT
603      END IF
604*
605*     Check consistency across processors
606*
607      CALL GLOBCHK( ICTXT, 15, PARAM_CHECK, 15, PARAM_CHECK( 1, 3 ),
608     $              INFO )
609*
610*     Prepare output: set info = 0 if no error, and divide by DESCMULT
611*     if error is not in a descriptor entry.
612*
613      IF( INFO.EQ.BIGNUM ) THEN
614         INFO = 0
615      ELSE IF( MOD( INFO, DESCMULT ).EQ.0 ) THEN
616         INFO = -INFO / DESCMULT
617      ELSE
618         INFO = -INFO
619      END IF
620*
621      IF( INFO.LT.0 ) THEN
622         CALL PXERBLA( ICTXT, 'PDPTTRSV', -INFO )
623         RETURN
624      END IF
625*
626*     Quick return if possible
627*
628      IF( N.EQ.0 )
629     $   RETURN
630*
631      IF( NRHS.EQ.0 )
632     $   RETURN
633*
634*
635*     Adjust addressing into matrix space to properly get into
636*        the beginning part of the relevant data
637*
638      PART_OFFSET = NB*( ( JA-1 ) / ( NPCOL*NB ) )
639*
640      IF( ( MYCOL-CSRC ).LT.( JA-PART_OFFSET-1 ) / NB ) THEN
641         PART_OFFSET = PART_OFFSET + NB
642      END IF
643*
644      IF( MYCOL.LT.CSRC ) THEN
645         PART_OFFSET = PART_OFFSET - NB
646      END IF
647*
648*     Form a new BLACS grid (the "standard form" grid) with only procs
649*        holding part of the matrix, of size 1xNP where NP is adjusted,
650*        starting at csrc=0, with JA modified to reflect dropped procs.
651*
652*     First processor to hold part of the matrix:
653*
654      FIRST_PROC = MOD( ( JA-1 ) / NB+CSRC, NPCOL )
655*
656*     Calculate new JA one while dropping off unused processors.
657*
658      JA_NEW = MOD( JA-1, NB ) + 1
659*
660*     Save and compute new value of NP
661*
662      NP_SAVE = NP
663      NP = ( JA_NEW+N-2 ) / NB + 1
664*
665*     Call utility routine that forms "standard-form" grid
666*
667      CALL RESHAPE( ICTXT, INT_ONE, ICTXT_NEW, INT_ONE, FIRST_PROC,
668     $              INT_ONE, NP )
669*
670*     Use new context from standard grid as context.
671*
672      ICTXT_SAVE = ICTXT
673      ICTXT = ICTXT_NEW
674      DESCA_1XP( 2 ) = ICTXT_NEW
675      DESCB_PX1( 2 ) = ICTXT_NEW
676*
677*     Get information about new grid.
678*
679      CALL BLACS_GRIDINFO( ICTXT, NPROW, NPCOL, MYROW, MYCOL )
680*
681*     Drop out processors that do not have part of the matrix.
682*
683      IF( MYROW.LT.0 ) THEN
684         GO TO 100
685      END IF
686*
687*     ********************************
688*     Values reused throughout routine
689*
690*     User-input value of partition size
691*
692      PART_SIZE = NB
693*
694*     Number of columns in each processor
695*
696      MY_NUM_COLS = NUMROC( N, PART_SIZE, MYCOL, 0, NPCOL )
697*
698*     Offset in columns to beginning of main partition in each proc
699*
700      IF( MYCOL.EQ.0 ) THEN
701         PART_OFFSET = PART_OFFSET + MOD( JA_NEW-1, PART_SIZE )
702         MY_NUM_COLS = MY_NUM_COLS - MOD( JA_NEW-1, PART_SIZE )
703      END IF
704*
705*     Size of main (or odd) partition in each processor
706*
707      ODD_SIZE = MY_NUM_COLS
708      IF( MYCOL.LT.NP-1 ) THEN
709         ODD_SIZE = ODD_SIZE - INT_ONE
710      END IF
711*
712*
713*
714*     Begin main code
715*
716      IF( LSAME( UPLO, 'L' ) ) THEN
717*
718*
719*        Frontsolve
720*
721*
722******************************************
723*       Local computation phase
724******************************************
725*
726*       Use main partition in each processor to solve locally
727*
728         CALL DPTTRSV( 'N', ODD_SIZE, NRHS, D( PART_OFFSET+1 ),
729     $                 E( PART_OFFSET+1 ), B( PART_OFFSET+1 ), LLDB,
730     $                 INFO )
731*
732*
733         IF( MYCOL.LT.NP-1 ) THEN
734*         Use factorization of odd-even connection block to modify
735*           locally stored portion of right hand side(s)
736*
737            CALL DAXPY( NRHS, -E( PART_OFFSET+ODD_SIZE ),
738     $                  B( PART_OFFSET+ODD_SIZE ), LLDB,
739     $                  B( PART_OFFSET+ODD_SIZE+1 ), LLDB )
740*
741         END IF
742*
743*
744         IF( MYCOL.NE.0 ) THEN
745*         Use the "spike" fillin to calculate contribution to previous
746*           processor's righthand-side.
747*
748            CALL DGEMM( 'T', 'N', 1, NRHS, ODD_SIZE, -ONE, AF( 1 ),
749     $                  ODD_SIZE, B( PART_OFFSET+1 ), LLDB, ZERO,
750     $                  WORK( 1+INT_ONE-1 ), INT_ONE )
751         END IF
752*
753*
754************************************************
755*       Formation and solution of reduced system
756************************************************
757*
758*
759*       Send modifications to prior processor's right hand sides
760*
761         IF( MYCOL.GT.0 ) THEN
762*
763            CALL DGESD2D( ICTXT, INT_ONE, NRHS, WORK( 1 ), INT_ONE, 0,
764     $                    MYCOL-1 )
765*
766         END IF
767*
768*       Receive modifications to processor's right hand sides
769*
770         IF( MYCOL.LT.NPCOL-1 ) THEN
771*
772            CALL DGERV2D( ICTXT, INT_ONE, NRHS, WORK( 1 ), INT_ONE, 0,
773     $                    MYCOL+1 )
774*
775*         Combine contribution to locally stored right hand sides
776*
777            CALL DMATADD( INT_ONE, NRHS, ONE, WORK( 1 ), INT_ONE, ONE,
778     $                    B( PART_OFFSET+ODD_SIZE+1 ), LLDB )
779*
780         END IF
781*
782*
783*       The last processor does not participate in the solution of the
784*       reduced system, having sent its contribution already.
785         IF( MYCOL.EQ.NPCOL-1 ) THEN
786            GO TO 30
787         END IF
788*
789*
790*       *************************************
791*       Modification Loop
792*
793*       The distance for sending and receiving for each level starts
794*         at 1 for the first level.
795         LEVEL_DIST = 1
796*
797*       Do until this proc is needed to modify other procs' equations
798*
799   10    CONTINUE
800         IF( MOD( ( MYCOL+1 ) / LEVEL_DIST, 2 ).NE.0 )
801     $      GO TO 20
802*
803*         Receive and add contribution to righthand sides from left
804*
805         IF( MYCOL-LEVEL_DIST.GE.0 ) THEN
806*
807            CALL DGERV2D( ICTXT, INT_ONE, NRHS, WORK( 1 ), INT_ONE, 0,
808     $                    MYCOL-LEVEL_DIST )
809*
810            CALL DMATADD( INT_ONE, NRHS, ONE, WORK( 1 ), INT_ONE, ONE,
811     $                    B( PART_OFFSET+ODD_SIZE+1 ), LLDB )
812*
813         END IF
814*
815*         Receive and add contribution to righthand sides from right
816*
817         IF( MYCOL+LEVEL_DIST.LT.NPCOL-1 ) THEN
818*
819            CALL DGERV2D( ICTXT, INT_ONE, NRHS, WORK( 1 ), INT_ONE, 0,
820     $                    MYCOL+LEVEL_DIST )
821*
822            CALL DMATADD( INT_ONE, NRHS, ONE, WORK( 1 ), INT_ONE, ONE,
823     $                    B( PART_OFFSET+ODD_SIZE+1 ), LLDB )
824*
825         END IF
826*
827         LEVEL_DIST = LEVEL_DIST*2
828*
829         GO TO 10
830   20    CONTINUE
831*       [End of GOTO Loop]
832*
833*
834*
835*       *********************************
836*       Calculate and use this proc's blocks to modify other procs
837*
838*       Solve with diagonal block
839*
840         CALL DTRTRS( 'L', 'N', 'U', INT_ONE, NRHS, AF( ODD_SIZE+2 ),
841     $                INT_ONE, B( PART_OFFSET+ODD_SIZE+1 ), LLDB, INFO )
842*
843         IF( INFO.NE.0 ) THEN
844            GO TO 90
845         END IF
846*
847*
848*
849*       *********
850         IF( MYCOL / LEVEL_DIST.LE.( NPCOL-1 ) / LEVEL_DIST-2 ) THEN
851*
852*         Calculate contribution from this block to next diagonal block
853*
854            CALL DGEMM( 'T', 'N', INT_ONE, NRHS, INT_ONE, -ONE,
855     $                  AF( ( ODD_SIZE )*1+1 ), INT_ONE,
856     $                  B( PART_OFFSET+ODD_SIZE+1 ), LLDB, ZERO,
857     $                  WORK( 1 ), INT_ONE )
858*
859*         Send contribution to diagonal block's owning processor.
860*
861            CALL DGESD2D( ICTXT, INT_ONE, NRHS, WORK( 1 ), INT_ONE, 0,
862     $                    MYCOL+LEVEL_DIST )
863*
864         END IF
865*       End of "if( mycol/level_dist .le. (npcol-1)/level_dist-2 )..."
866*
867*       ************
868         IF( ( MYCOL / LEVEL_DIST.GT.0 ) .AND.
869     $       ( MYCOL / LEVEL_DIST.LE.( NPCOL-1 ) / LEVEL_DIST-1 ) ) THEN
870*
871*
872*         Use offdiagonal block to calculate modification to diag block
873*           of processor to the left
874*
875            CALL DGEMM( 'N', 'N', INT_ONE, NRHS, INT_ONE, -ONE,
876     $                  AF( ODD_SIZE*1+2+1 ), INT_ONE,
877     $                  B( PART_OFFSET+ODD_SIZE+1 ), LLDB, ZERO,
878     $                  WORK( 1 ), INT_ONE )
879*
880*         Send contribution to diagonal block's owning processor.
881*
882            CALL DGESD2D( ICTXT, INT_ONE, NRHS, WORK( 1 ), INT_ONE, 0,
883     $                    MYCOL-LEVEL_DIST )
884*
885         END IF
886*       End of "if( mycol/level_dist.le. (npcol-1)/level_dist -1 )..."
887*
888   30    CONTINUE
889*
890      ELSE
891*
892******************** BACKSOLVE *************************************
893*
894********************************************************************
895*     .. Begin reduced system phase of algorithm ..
896********************************************************************
897*
898*
899*
900*       The last processor does not participate in the solution of the
901*       reduced system and just waits to receive its solution.
902         IF( MYCOL.EQ.NPCOL-1 ) THEN
903            GO TO 80
904         END IF
905*
906*       Determine number of steps in tree loop
907*
908         LEVEL_DIST = 1
909   40    CONTINUE
910         IF( MOD( ( MYCOL+1 ) / LEVEL_DIST, 2 ).NE.0 )
911     $      GO TO 50
912*
913         LEVEL_DIST = LEVEL_DIST*2
914*
915         GO TO 40
916   50    CONTINUE
917*
918*
919         IF( ( MYCOL / LEVEL_DIST.GT.0 ) .AND.
920     $       ( MYCOL / LEVEL_DIST.LE.( NPCOL-1 ) / LEVEL_DIST-1 ) ) THEN
921*
922*         Receive solution from processor to left
923*
924            CALL DGERV2D( ICTXT, INT_ONE, NRHS, WORK( 1 ), INT_ONE, 0,
925     $                    MYCOL-LEVEL_DIST )
926*
927*
928*         Use offdiagonal block to calculate modification to RHS stored
929*           on this processor
930*
931            CALL DGEMM( 'T', 'N', INT_ONE, NRHS, INT_ONE, -ONE,
932     $                  AF( ODD_SIZE*1+2+1 ), INT_ONE, WORK( 1 ),
933     $                  INT_ONE, ONE, B( PART_OFFSET+ODD_SIZE+1 ),
934     $                  LLDB )
935         END IF
936*       End of "if( mycol/level_dist.le. (npcol-1)/level_dist -1 )..."
937*
938*
939         IF( MYCOL / LEVEL_DIST.LE.( NPCOL-1 ) / LEVEL_DIST-2 ) THEN
940*
941*         Receive solution from processor to right
942*
943            CALL DGERV2D( ICTXT, INT_ONE, NRHS, WORK( 1 ), INT_ONE, 0,
944     $                    MYCOL+LEVEL_DIST )
945*
946*         Calculate contribution from this block to next diagonal block
947*
948            CALL DGEMM( 'N', 'N', INT_ONE, NRHS, INT_ONE, -ONE,
949     $                  AF( ( ODD_SIZE )*1+1 ), INT_ONE, WORK( 1 ),
950     $                  INT_ONE, ONE, B( PART_OFFSET+ODD_SIZE+1 ),
951     $                  LLDB )
952*
953         END IF
954*       End of "if( mycol/level_dist .le. (npcol-1)/level_dist-2 )..."
955*
956*
957*       Solve with diagonal block
958*
959         CALL DTRTRS( 'L', 'T', 'U', INT_ONE, NRHS, AF( ODD_SIZE+2 ),
960     $                INT_ONE, B( PART_OFFSET+ODD_SIZE+1 ), LLDB, INFO )
961*
962         IF( INFO.NE.0 ) THEN
963            GO TO 90
964         END IF
965*
966*
967*
968***Modification Loop *******
969*
970   60    CONTINUE
971         IF( LEVEL_DIST.EQ.1 )
972     $      GO TO 70
973*
974         LEVEL_DIST = LEVEL_DIST / 2
975*
976*         Send solution to the right
977*
978         IF( MYCOL+LEVEL_DIST.LT.NPCOL-1 ) THEN
979*
980            CALL DGESD2D( ICTXT, INT_ONE, NRHS,
981     $                    B( PART_OFFSET+ODD_SIZE+1 ), LLDB, 0,
982     $                    MYCOL+LEVEL_DIST )
983*
984         END IF
985*
986*         Send solution to left
987*
988         IF( MYCOL-LEVEL_DIST.GE.0 ) THEN
989*
990            CALL DGESD2D( ICTXT, INT_ONE, NRHS,
991     $                    B( PART_OFFSET+ODD_SIZE+1 ), LLDB, 0,
992     $                    MYCOL-LEVEL_DIST )
993*
994         END IF
995*
996         GO TO 60
997   70    CONTINUE
998*       [End of GOTO Loop]
999*
1000   80    CONTINUE
1001*          [Processor npcol - 1 jumped to here to await next stage]
1002*
1003*******************************
1004*       Reduced system has been solved, communicate solutions to nearest
1005*         neighbors in preparation for local computation phase.
1006*
1007*
1008*       Send elements of solution to next proc
1009*
1010         IF( MYCOL.LT.NPCOL-1 ) THEN
1011*
1012            CALL DGESD2D( ICTXT, INT_ONE, NRHS,
1013     $                    B( PART_OFFSET+ODD_SIZE+1 ), LLDB, 0,
1014     $                    MYCOL+1 )
1015*
1016         END IF
1017*
1018*       Receive modifications to processor's right hand sides
1019*
1020         IF( MYCOL.GT.0 ) THEN
1021*
1022            CALL DGERV2D( ICTXT, INT_ONE, NRHS, WORK( 1 ), INT_ONE, 0,
1023     $                    MYCOL-1 )
1024*
1025         END IF
1026*
1027*
1028*
1029**********************************************
1030*       Local computation phase
1031**********************************************
1032*
1033         IF( MYCOL.NE.0 ) THEN
1034*         Use the "spike" fillin to calculate contribution from previous
1035*           processor's solution.
1036*
1037            CALL DGEMM( 'N', 'N', ODD_SIZE, NRHS, 1, -ONE, AF( 1 ),
1038     $                  ODD_SIZE, WORK( 1+INT_ONE-1 ), INT_ONE, ONE,
1039     $                  B( PART_OFFSET+1 ), LLDB )
1040*
1041         END IF
1042*
1043*
1044         IF( MYCOL.LT.NP-1 ) THEN
1045*         Use factorization of odd-even connection block to modify
1046*           locally stored portion of right hand side(s)
1047*
1048            CALL DAXPY( NRHS, -( E( PART_OFFSET+ODD_SIZE ) ),
1049     $                  B( PART_OFFSET+ODD_SIZE+1 ), LLDB,
1050     $                  B( PART_OFFSET+ODD_SIZE ), LLDB )
1051*
1052         END IF
1053*
1054*       Use main partition in each processor to solve locally
1055*
1056         CALL DPTTRSV( 'T', ODD_SIZE, NRHS, D( PART_OFFSET+1 ),
1057     $                 E( PART_OFFSET+1 ), B( PART_OFFSET+1 ), LLDB,
1058     $                 INFO )
1059*
1060*
1061      END IF
1062*     End of "IF( LSAME( UPLO, 'L' ) )"...
1063   90 CONTINUE
1064*
1065*
1066*     Free BLACS space used to hold standard-form grid.
1067*
1068      IF( ICTXT_SAVE.NE.ICTXT_NEW ) THEN
1069         CALL BLACS_GRIDEXIT( ICTXT_NEW )
1070      END IF
1071*
1072  100 CONTINUE
1073*
1074*     Restore saved input parameters
1075*
1076      ICTXT = ICTXT_SAVE
1077      NP = NP_SAVE
1078*
1079*     Output minimum worksize
1080*
1081      WORK( 1 ) = WORK_SIZE_MIN
1082*
1083*
1084      RETURN
1085*
1086*     End of PDPTTRSV
1087*
1088      END
1089