1      SUBROUTINE PCDBSV( N, BWL, BWU, NRHS, A, JA, DESCA, B, IB, DESCB,
2     $                   WORK, LWORK, INFO )
3*
4*
5*
6*  -- ScaLAPACK routine (version 1.7) --
7*     University of Tennessee, Knoxville, Oak Ridge National Laboratory,
8*     and University of California, Berkeley.
9*     November 15, 1997
10*
11*     .. Scalar Arguments ..
12      INTEGER            BWL, BWU, IB, INFO, JA, LWORK, N, NRHS
13*     ..
14*     .. Array Arguments ..
15      INTEGER            DESCA( * ), DESCB( * )
16      COMPLEX            A( * ), B( * ), WORK( * )
17*     ..
18*
19*
20*  Purpose
21*  =======
22*
23*  PCDBSV solves a system of linear equations
24*
25*                      A(1:N, JA:JA+N-1) * X = B(IB:IB+N-1, 1:NRHS)
26*
27*  where A(1:N, JA:JA+N-1) is an N-by-N complex
28*  banded diagonally dominant-like distributed
29*  matrix with bandwidth BWL, BWU.
30*
31*  Gaussian elimination without pivoting
32*  is used to factor a reordering
33*  of the matrix into L U.
34*
35*  See PCDBTRF and PCDBTRS for details.
36*
37*  =====================================================================
38*
39*  Arguments
40*  =========
41*
42*
43*  N       (global input) INTEGER
44*          The number of rows and columns to be operated on, i.e. the
45*          order of the distributed submatrix A(1:N, JA:JA+N-1). N >= 0.
46*
47*  BWL     (global input) INTEGER
48*          Number of subdiagonals. 0 <= BWL <= N-1
49*
50*  BWU     (global input) INTEGER
51*          Number of superdiagonals. 0 <= BWU <= N-1
52*
53*  NRHS    (global input) INTEGER
54*          The number of right hand sides, i.e., the number of columns
55*          of the distributed submatrix B(IB:IB+N-1, 1:NRHS).
56*          NRHS >= 0.
57*
58*  A       (local input/local output) COMPLEX pointer into
59*          local memory to an array with first dimension
60*          LLD_A >=(bwl+bwu+1) (stored in DESCA).
61*          On entry, this array contains the local pieces of the
62*          This local portion is stored in the packed banded format
63*            used in LAPACK. Please see the Notes below and the
64*            ScaLAPACK manual for more detail on the format of
65*            distributed matrices.
66*          On exit, this array contains information containing details
67*            of the factorization.
68*          Note that permutations are performed on the matrix, so that
69*            the factors returned are different from those returned
70*            by LAPACK.
71*
72*  JA      (global input) INTEGER
73*          The index in the global array A that points to the start of
74*          the matrix to be operated on (which may be either all of A
75*          or a submatrix of A).
76*
77*  DESCA   (global and local input) INTEGER array of dimension DLEN.
78*          if 1D type (DTYPE_A=501), DLEN >= 7;
79*          if 2D type (DTYPE_A=1), DLEN >= 9 .
80*          The array descriptor for the distributed matrix A.
81*          Contains information of mapping of A to memory. Please
82*          see NOTES below for full description and options.
83*
84*  B       (local input/local output) COMPLEX pointer into
85*          local memory to an array of local lead dimension lld_b>=NB.
86*          On entry, this array contains the
87*          the local pieces of the right hand sides
88*          B(IB:IB+N-1, 1:NRHS).
89*          On exit, this contains the local piece of the solutions
90*          distributed matrix X.
91*
92*  IB      (global input) INTEGER
93*          The row index in the global array B that points to the first
94*          row of the matrix to be operated on (which may be either
95*          all of B or a submatrix of B).
96*
97*  DESCB   (global and local input) INTEGER array of dimension DLEN.
98*          if 1D type (DTYPE_B=502), DLEN >=7;
99*          if 2D type (DTYPE_B=1), DLEN >= 9.
100*          The array descriptor for the distributed matrix B.
101*          Contains information of mapping of B to memory. Please
102*          see NOTES below for full description and options.
103*
104*  WORK    (local workspace/local output)
105*          COMPLEX temporary workspace. This space may
106*          be overwritten in between calls to routines. WORK must be
107*          the size given in LWORK.
108*          On exit, WORK( 1 ) contains the minimal LWORK.
109*
110*  LWORK   (local input or global input) INTEGER
111*          Size of user-input workspace WORK.
112*          If LWORK is too small, the minimal acceptable size will be
113*          returned in WORK(1) and an error code is returned. LWORK>=
114*          NB*(bwl+bwu)+6*max(bwl,bwu)*max(bwl,bwu)
115*          +max((max(bwl,bwu)*NRHS), max(bwl,bwu)*max(bwl,bwu))
116*
117*  INFO    (global output) INTEGER
118*          = 0:  successful exit
119*          < 0:  If the i-th argument is an array and the j-entry had
120*                an illegal value, then INFO = -(i*100+j), if the i-th
121*                argument is a scalar and had an illegal value, then
122*                INFO = -i.
123*          > 0:  If INFO = K<=NPROCS, the submatrix stored on processor
124*                INFO and factored locally was not
125*                diagonally dominant-like,  and
126*                the factorization was not completed.
127*                If INFO = K>NPROCS, the submatrix stored on processor
128*                INFO-NPROCS representing interactions with other
129*                processors was not
130*                stably factorable wo/interchanges,
131*                and the factorization was not completed.
132*
133*  =====================================================================
134*
135*
136*  Restrictions
137*  ============
138*
139*  The following are restrictions on the input parameters. Some of these
140*    are temporary and will be removed in future releases, while others
141*    may reflect fundamental technical limitations.
142*
143*    Non-cyclic restriction: VERY IMPORTANT!
144*      P*NB>= mod(JA-1,NB)+N.
145*      The mapping for matrices must be blocked, reflecting the nature
146*      of the divide and conquer algorithm as a task-parallel algorithm.
147*      This formula in words is: no processor may have more than one
148*      chunk of the matrix.
149*
150*    Blocksize cannot be too small:
151*      If the matrix spans more than one processor, the following
152*      restriction on NB, the size of each block on each processor,
153*      must hold:
154*      NB >= 2*MAX(BWL,BWU)
155*      The bulk of parallel computation is done on the matrix of size
156*      O(NB) on each processor. If this is too small, divide and conquer
157*      is a poor choice of algorithm.
158*
159*    Submatrix reference:
160*      JA = IB
161*      Alignment restriction that prevents unnecessary communication.
162*
163*
164*  =====================================================================
165*
166*
167*  Notes
168*  =====
169*
170*  If the factorization routine and the solve routine are to be called
171*    separately (to solve various sets of righthand sides using the same
172*    coefficient matrix), the auxiliary space AF *must not be altered*
173*    between calls to the factorization routine and the solve routine.
174*
175*  The best algorithm for solving banded and tridiagonal linear systems
176*    depends on a variety of parameters, especially the bandwidth.
177*    Currently, only algorithms designed for the case N/P >> bw are
178*    implemented. These go by many names, including Divide and Conquer,
179*    Partitioning, domain decomposition-type, etc.
180*
181*  Algorithm description: Divide and Conquer
182*
183*    The Divide and Conqer algorithm assumes the matrix is narrowly
184*      banded compared with the number of equations. In this situation,
185*      it is best to distribute the input matrix A one-dimensionally,
186*      with columns atomic and rows divided amongst the processes.
187*      The basic algorithm divides the banded matrix up into
188*      P pieces with one stored on each processor,
189*      and then proceeds in 2 phases for the factorization or 3 for the
190*      solution of a linear system.
191*      1) Local Phase:
192*         The individual pieces are factored independently and in
193*         parallel. These factors are applied to the matrix creating
194*         fillin, which is stored in a non-inspectable way in auxiliary
195*         space AF. Mathematically, this is equivalent to reordering
196*         the matrix A as P A P^T and then factoring the principal
197*         leading submatrix of size equal to the sum of the sizes of
198*         the matrices factored on each processor. The factors of
199*         these submatrices overwrite the corresponding parts of A
200*         in memory.
201*      2) Reduced System Phase:
202*         A small (max(bwl,bwu)* (P-1)) system is formed representing
203*         interaction of the larger blocks, and is stored (as are its
204*         factors) in the space AF. A parallel Block Cyclic Reduction
205*         algorithm is used. For a linear system, a parallel front solve
206*         followed by an analagous backsolve, both using the structure
207*         of the factored matrix, are performed.
208*      3) Backsubsitution Phase:
209*         For a linear system, a local backsubstitution is performed on
210*         each processor in parallel.
211*
212*
213*  Descriptors
214*  ===========
215*
216*  Descriptors now have *types* and differ from ScaLAPACK 1.0.
217*
218*  Note: banded codes can use either the old two dimensional
219*    or new one-dimensional descriptors, though the processor grid in
220*    both cases *must be one-dimensional*. We describe both types below.
221*
222*  Each global data object is described by an associated description
223*  vector.  This vector stores the information required to establish
224*  the mapping between an object element and its corresponding process
225*  and memory location.
226*
227*  Let A be a generic term for any 2D block cyclicly distributed array.
228*  Such a global array has an associated description vector DESCA.
229*  In the following comments, the character _ should be read as
230*  "of the global array".
231*
232*  NOTATION        STORED IN      EXPLANATION
233*  --------------- -------------- --------------------------------------
234*  DTYPE_A(global) DESCA( DTYPE_ )The descriptor type.  In this case,
235*                                 DTYPE_A = 1.
236*  CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
237*                                 the BLACS process grid A is distribu-
238*                                 ted over. The context itself is glo-
239*                                 bal, but the handle (the integer
240*                                 value) may vary.
241*  M_A    (global) DESCA( M_ )    The number of rows in the global
242*                                 array A.
243*  N_A    (global) DESCA( N_ )    The number of columns in the global
244*                                 array A.
245*  MB_A   (global) DESCA( MB_ )   The blocking factor used to distribute
246*                                 the rows of the array.
247*  NB_A   (global) DESCA( NB_ )   The blocking factor used to distribute
248*                                 the columns of the array.
249*  RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
250*                                 row of the array A is distributed.
251*  CSRC_A (global) DESCA( CSRC_ ) The process column over which the
252*                                 first column of the array A is
253*                                 distributed.
254*  LLD_A  (local)  DESCA( LLD_ )  The leading dimension of the local
255*                                 array.  LLD_A >= MAX(1,LOCr(M_A)).
256*
257*  Let K be the number of rows or columns of a distributed matrix,
258*  and assume that its process grid has dimension p x q.
259*  LOCr( K ) denotes the number of elements of K that a process
260*  would receive if K were distributed over the p processes of its
261*  process column.
262*  Similarly, LOCc( K ) denotes the number of elements of K that a
263*  process would receive if K were distributed over the q processes of
264*  its process row.
265*  The values of LOCr() and LOCc() may be determined via a call to the
266*  ScaLAPACK tool function, NUMROC:
267*          LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
268*          LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
269*  An upper bound for these quantities may be computed by:
270*          LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
271*          LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
272*
273*
274*  One-dimensional descriptors:
275*
276*  One-dimensional descriptors are a new addition to ScaLAPACK since
277*    version 1.0. They simplify and shorten the descriptor for 1D
278*    arrays.
279*
280*  Since ScaLAPACK supports two-dimensional arrays as the fundamental
281*    object, we allow 1D arrays to be distributed either over the
282*    first dimension of the array (as if the grid were P-by-1) or the
283*    2nd dimension (as if the grid were 1-by-P). This choice is
284*    indicated by the descriptor type (501 or 502)
285*    as described below.
286*
287*    IMPORTANT NOTE: the actual BLACS grid represented by the
288*    CTXT entry in the descriptor may be *either*  P-by-1 or 1-by-P
289*    irrespective of which one-dimensional descriptor type
290*    (501 or 502) is input.
291*    This routine will interpret the grid properly either way.
292*    ScaLAPACK routines *do not support intercontext operations* so that
293*    the grid passed to a single ScaLAPACK routine *must be the same*
294*    for all array descriptors passed to that routine.
295*
296*    NOTE: In all cases where 1D descriptors are used, 2D descriptors
297*    may also be used, since a one-dimensional array is a special case
298*    of a two-dimensional array with one dimension of size unity.
299*    The two-dimensional array used in this case *must* be of the
300*    proper orientation:
301*      If the appropriate one-dimensional descriptor is DTYPEA=501
302*      (1 by P type), then the two dimensional descriptor must
303*      have a CTXT value that refers to a 1 by P BLACS grid;
304*      If the appropriate one-dimensional descriptor is DTYPEA=502
305*      (P by 1 type), then the two dimensional descriptor must
306*      have a CTXT value that refers to a P by 1 BLACS grid.
307*
308*
309*  Summary of allowed descriptors, types, and BLACS grids:
310*  DTYPE           501         502         1         1
311*  BLACS grid      1xP or Px1  1xP or Px1  1xP       Px1
312*  -----------------------------------------------------
313*  A               OK          NO          OK        NO
314*  B               NO          OK          NO        OK
315*
316*  Note that a consequence of this chart is that it is not possible
317*    for *both* DTYPE_A and DTYPE_B to be 2D_type(1), as these lead
318*    to opposite requirements for the orientation of the BLACS grid,
319*    and as noted before, the *same* BLACS context must be used in
320*    all descriptors in a single ScaLAPACK subroutine call.
321*
322*  Let A be a generic term for any 1D block cyclicly distributed array.
323*  Such a global array has an associated description vector DESCA.
324*  In the following comments, the character _ should be read as
325*  "of the global array".
326*
327*  NOTATION        STORED IN  EXPLANATION
328*  --------------- ---------- ------------------------------------------
329*  DTYPE_A(global) DESCA( 1 ) The descriptor type. For 1D grids,
330*                                TYPE_A = 501: 1-by-P grid.
331*                                TYPE_A = 502: P-by-1 grid.
332*  CTXT_A (global) DESCA( 2 ) The BLACS context handle, indicating
333*                                the BLACS process grid A is distribu-
334*                                ted over. The context itself is glo-
335*                                bal, but the handle (the integer
336*                                value) may vary.
337*  N_A    (global) DESCA( 3 ) The size of the array dimension being
338*                                distributed.
339*  NB_A   (global) DESCA( 4 ) The blocking factor used to distribute
340*                                the distributed dimension of the array.
341*  SRC_A  (global) DESCA( 5 ) The process row or column over which the
342*                                first row or column of the array
343*                                is distributed.
344*  LLD_A  (local)  DESCA( 6 ) The leading dimension of the local array
345*                                storing the local blocks of the distri-
346*                                buted array A. Minimum value of LLD_A
347*                                depends on TYPE_A.
348*                                TYPE_A = 501: LLD_A >=
349*                                   size of undistributed dimension, 1.
350*                                TYPE_A = 502: LLD_A >=NB_A, 1.
351*  Reserved        DESCA( 7 ) Reserved for future use.
352*
353*
354*
355*  =====================================================================
356*
357*  Code Developer: Andrew J. Cleary, University of Tennessee.
358*    Current address: Lawrence Livermore National Labs.
359*  This version released: August, 2001.
360*
361*  =====================================================================
362*
363*     ..
364*     .. Parameters ..
365      REAL               ONE, ZERO
366      PARAMETER          ( ONE = 1.0E+0 )
367      PARAMETER          ( ZERO = 0.0E+0 )
368      COMPLEX            CONE, CZERO
369      PARAMETER          ( CONE = ( 1.0E+0, 0.0E+0 ) )
370      PARAMETER          ( CZERO = ( 0.0E+0, 0.0E+0 ) )
371      INTEGER            INT_ONE
372      PARAMETER          ( INT_ONE = 1 )
373      INTEGER            DESCMULT, BIGNUM
374      PARAMETER          (DESCMULT = 100, BIGNUM = DESCMULT * DESCMULT)
375      INTEGER            BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
376     $                   LLD_, MB_, M_, NB_, N_, RSRC_
377      PARAMETER          ( BLOCK_CYCLIC_2D = 1, DLEN_ = 9, DTYPE_ = 1,
378     $                     CTXT_ = 2, M_ = 3, N_ = 4, MB_ = 5, NB_ = 6,
379     $                     RSRC_ = 7, CSRC_ = 8, LLD_ = 9 )
380*     ..
381*     .. Local Scalars ..
382      INTEGER            ICTXT, MYCOL, MYROW, NB, NPCOL, NPROW,
383     $                   WS_FACTOR
384*     ..
385*     .. External Subroutines ..
386      EXTERNAL           PCDBTRF, PCDBTRS, PXERBLA
387*     ..
388*     .. Executable Statements ..
389*
390*     Note: to avoid duplication, most error checking is not performed
391*           in this routine and is left to routines
392*           PCDBTRF and PCDBTRS.
393*
394*     Begin main code
395*
396      INFO = 0
397*
398*     Get block size to calculate workspace requirements
399*
400      IF( DESCA( DTYPE_ ) .EQ. BLOCK_CYCLIC_2D ) THEN
401         NB = DESCA( NB_ )
402         ICTXT = DESCA( CTXT_ )
403      ELSEIF( DESCA( DTYPE_ ) .EQ. 501 ) THEN
404         NB = DESCA( 4 )
405         ICTXT = DESCA( 2 )
406      ELSE
407         INFO = -( 6*100 + DTYPE_ )
408         CALL PXERBLA( ICTXT,
409     $      'PCDBSV',
410     $      -INFO )
411         RETURN
412      ENDIF
413*
414      CALL BLACS_GRIDINFO( ICTXT, NPROW, NPCOL, MYROW, MYCOL )
415*
416*
417*     Size needed for AF in factorization
418*
419      WS_FACTOR = NB*(BWL+BWU)+6*MAX(BWL,BWU)*MAX(BWL,BWU)
420*
421*     Factor the matrix
422*
423      CALL PCDBTRF( N, BWL, BWU, A, JA, DESCA, WORK,
424     $              MIN( LWORK, WS_FACTOR ), WORK( 1+WS_FACTOR ),
425     $              LWORK-WS_FACTOR, INFO )
426*
427*     Check info for error conditions
428*
429      IF( INFO.NE.0 ) THEN
430         IF( INFO .LT. 0 ) THEN
431            CALL PXERBLA( ICTXT, 'PCDBSV', -INFO )
432         ENDIF
433         RETURN
434      END IF
435*
436*     Solve the system using the factorization
437*
438      CALL PCDBTRS( 'N', N, BWL, BWU, NRHS, A, JA, DESCA, B, IB, DESCB,
439     $              WORK, MIN( LWORK, WS_FACTOR ), WORK( 1+WS_FACTOR),
440     $              LWORK-WS_FACTOR, INFO )
441*
442*     Check info for error conditions
443*
444      IF( INFO.NE.0 ) THEN
445         CALL PXERBLA( ICTXT, 'PCDBSV', -INFO )
446         RETURN
447      END IF
448*
449      RETURN
450*
451*     End of PCDBSV
452*
453      END
454