1!
2! Copyright (C) 2013 Quantum ESPRESSO group
3! This file is distributed under the terms of the
4! GNU General Public License. See the file `License'
5! in the root directory of the present distribution,
6! or http://www.gnu.org/copyleft/gpl.txt .
7!
8!----------------------------------------------------------------------------
9MODULE mp_pools
10  !----------------------------------------------------------------------------
11  !
12  USE mp, ONLY : mp_barrier, mp_size, mp_rank, mp_comm_split
13  USE parallel_include
14  !
15  IMPLICIT NONE
16  SAVE
17  !
18  ! ... Pool groups (processors within a pool of k-points)
19  ! ... Subdivision of image group, used for k-point parallelization
20  !
21  INTEGER :: npool       = 1  ! number of "k-points"-pools
22  INTEGER :: nproc_pool  = 1  ! number of processors within a pool
23  INTEGER :: me_pool     = 0  ! index of the processor within a pool
24  INTEGER :: root_pool   = 0  ! index of the root processor within a pool
25  INTEGER :: my_pool_id  = 0  ! index of my pool
26  INTEGER :: inter_pool_comm  = 0  ! inter pool communicator
27  INTEGER :: intra_pool_comm  = 0  ! intra pool communicator
28  !
29  INTEGER :: kunit = 1  ! granularity of k-point distribution
30                        ! kunit=1 standard case. In phonon k and k+q must
31                        ! be on the same pool, so kunit=2.
32  !
33CONTAINS
34  !
35  !----------------------------------------------------------------------------
36  SUBROUTINE mp_start_pools( npool_, parent_comm )
37    !---------------------------------------------------------------------------
38    !
39    ! ... Divide processors (of the "parent_comm" group) into "pools"
40    ! ... Requires: npool_, read from command line
41    ! ...           parent_comm, typically world_comm = group of all processors
42    !
43    IMPLICIT NONE
44    !
45    INTEGER, INTENT(IN) :: npool_, parent_comm
46    !
47    INTEGER :: parent_nproc = 1, parent_mype  = 0
48    !
49#if defined (__MPI)
50    !
51    parent_nproc = mp_size( parent_comm )
52    parent_mype  = mp_rank( parent_comm )
53    !
54    ! ... npool_ must have been previously read from command line argument
55    ! ... by a call to routine get_command_line
56    !
57    npool = npool_
58    IF ( npool < 1 .OR. npool > parent_nproc ) CALL errore( 'mp_start_pools',&
59                          'invalid number of pools, out of range', 1 )
60
61    IF ( MOD( parent_nproc, npool ) /= 0 ) CALL errore( 'mp_start_pools', &
62           'invalid number of pools, parent_nproc /= nproc_pool * npool', 1 )
63    !
64    ! ... number of cpus per pool of k-points (created inside each parent group)
65    !
66    nproc_pool = parent_nproc / npool
67    !
68    !
69    ! ... my_pool_id  =  pool index for this processor    ( 0 : npool - 1 )
70    ! ... me_pool     =  processor index within the pool  ( 0 : nproc_pool - 1 )
71    !
72    my_pool_id = parent_mype / nproc_pool
73    me_pool    = MOD( parent_mype, nproc_pool )
74    !
75    CALL mp_barrier( parent_comm )
76    !
77    ! ... the intra_pool_comm communicator is created
78    !
79    CALL mp_comm_split ( parent_comm, my_pool_id, parent_mype, intra_pool_comm )
80    !
81    CALL mp_barrier( parent_comm )
82    !
83    ! ... the inter_pool_comm communicator is created
84    !
85    CALL mp_comm_split ( parent_comm, me_pool, parent_mype, inter_pool_comm )
86    !
87#endif
88    !
89    RETURN
90  END SUBROUTINE mp_start_pools
91  !
92END MODULE mp_pools
93
94
95
96!----------------------------------------------------------------------------
97MODULE mp_orthopools
98  !----------------------------------------------------------------------------
99  !
100  USE mp, ONLY : mp_barrier, mp_size, mp_rank, mp_comm_split
101  USE mp_pools
102  USE parallel_include
103  !
104  IMPLICIT NONE
105  SAVE
106  !
107  ! ... Ortho-pool groups each orthopool group collect the (n+1)th CPU of each pool
108  ! i.e. orthopool 0 -> first CPU of each pool
109  !      orthopool 1 -> second CPU of each pool
110  !
111  INTEGER :: northopool       = 1  ! number of "k-points"-orthopools, must be equal to nproc_pool
112  INTEGER :: nproc_orthopool  = 1  ! number of processors within a orthopool, must be equal to npool
113  INTEGER :: me_orthopool     = 0  ! index of the processor within a orthopool,
114                                   ! must be equal to the pool id of that cpu
115  INTEGER :: root_orthopool   = 0  ! index of the root processor within a orthopool
116  INTEGER :: my_orthopool_id  = 0  ! index of my orthopool
117  INTEGER :: inter_orthopool_comm  = 0  ! inter orthopool communicator
118  INTEGER :: intra_orthopool_comm  = 0  ! intra orthopool communicator
119  !
120  LOGICAL,PRIVATE :: init_orthopools = .false.
121  !
122CONTAINS
123  !
124  !----------------------------------------------------------------------------
125  SUBROUTINE mp_stop_orthopools( )
126    USE mp, ONLY : mp_comm_free
127    IMPLICIT NONE
128    ! Free the orthopools communicators (if they had been set up)
129    IF(init_orthopools) THEN
130      CALL mp_comm_free ( inter_orthopool_comm )
131      CALL mp_comm_free ( intra_orthopool_comm )
132      init_orthopools = .false.
133    ENDIF
134    !
135    RETURN
136  END SUBROUTINE
137  !
138  !----------------------------------------------------------------------------
139  SUBROUTINE mp_start_orthopools( parent_comm )
140    !---------------------------------------------------------------------------
141    !
142    ! ... Divide processors (of the "parent_comm" group) into "orthopools"
143    ! ... Requires: pools being already initialized
144    ! ...           parent_comm, typically world_comm = group of all processors
145    !
146    IMPLICIT NONE
147    !
148    INTEGER, INTENT(IN) :: parent_comm
149    !
150    INTEGER :: parent_nproc = 1, parent_mype  = 0
151    !
152    ! Only init this once (I put this check because initialisation
153    ! of orthopools is done later, during EXX bootstrap, not at the beginning
154    IF(init_orthopools) RETURN
155    init_orthopools = .true.
156    !
157#if defined (__MPI)
158    !
159    parent_nproc = mp_size( parent_comm )
160    parent_mype  = mp_rank( parent_comm )
161    !
162    northopool = nproc_pool
163    !
164    ! ... number of cpus per orthopool
165    nproc_orthopool = npool
166    !
167    !
168    ! ... my_orthopool_id  =  orthopool index for this processor    ( 0 : northopool - 1 )
169    ! ... me_orthopool     =  processor index within the orthopool  ( 0 : nproc_orthopool - 1 )
170    my_orthopool_id = MOD(parent_mype, northopool)
171    me_orthopool    = my_pool_id
172    !
173    CALL mp_barrier( parent_comm )
174    !
175    ! ... the intra_orthopool_comm communicator is created
176    !
177    CALL mp_comm_split ( parent_comm, my_orthopool_id, parent_mype, intra_orthopool_comm )
178    !
179    CALL mp_barrier( parent_comm )
180    !
181    ! ... the inter_orthopool_comm communicator is created
182    !
183    CALL mp_comm_split ( parent_comm, me_orthopool, parent_mype, inter_orthopool_comm )
184    !
185#endif
186    !
187    RETURN
188  END SUBROUTINE mp_start_orthopools
189  !
190END MODULE mp_orthopools
191