1 /*
2    CheMPS2: a spin-adapted implementation of DMRG for ab initio quantum chemistry
3    Copyright (C) 2013-2018 Sebastian Wouters
4 
5    This program is free software; you can redistribute it and/or modify
6    it under the terms of the GNU General Public License as published by
7    the Free Software Foundation; either version 2 of the License, or
8    (at your option) any later version.
9 
10    This program is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13    GNU General Public License for more details.
14 
15    You should have received a copy of the GNU General Public License along
16    with this program; if not, write to the Free Software Foundation, Inc.,
17    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19 
20 #ifndef MPICHEMPS2_CHEMPS2_H
21 #define MPICHEMPS2_CHEMPS2_H
22 
23 //#define CHEMPS2_MPI_COMPILATION
24 
25 #ifdef CHEMPS2_MPI_COMPILATION
26 
27    #include <mpi.h>
28    #include <assert.h>
29    #include "Tensor.h"
30 
31    #define MPI_CHEMPS2_MASTER   0
32 
33    //Assign diagrams which are not specifically owned round-robin
34 
35    #define MPI_CHEMPS2_4D1AB    1
36    #define MPI_CHEMPS2_4D2AB    2
37    #define MPI_CHEMPS2_4I1AB    3
38    #define MPI_CHEMPS2_4I2AB    4
39    #define MPI_CHEMPS2_4F1AB    5
40    #define MPI_CHEMPS2_4F2AB    6
41    #define MPI_CHEMPS2_4G1AB    7
42    #define MPI_CHEMPS2_4G2AB    8
43 
44    #define MPI_CHEMPS2_4D3ABCD  9
45    #define MPI_CHEMPS2_4D4ABCD  10
46    #define MPI_CHEMPS2_4I3ABCD  11
47    #define MPI_CHEMPS2_4I4ABCD  12
48    #define MPI_CHEMPS2_4F3ABCD  13
49    #define MPI_CHEMPS2_4F4ABCD  14
50    #define MPI_CHEMPS2_4G3ABCD  15
51    #define MPI_CHEMPS2_4G4ABCD  16
52 
53    #define MPI_CHEMPS2_4E1      17
54    #define MPI_CHEMPS2_4E2      18
55    #define MPI_CHEMPS2_4H1      19
56    #define MPI_CHEMPS2_4H2      20
57 
58    #define MPI_CHEMPS2_4E3A     21
59    #define MPI_CHEMPS2_4E3B     22
60    #define MPI_CHEMPS2_4E4A     23
61    #define MPI_CHEMPS2_4E4B     24
62    #define MPI_CHEMPS2_4H3A     25
63    #define MPI_CHEMPS2_4H3B     26
64    #define MPI_CHEMPS2_4H4A     27
65    #define MPI_CHEMPS2_4H4B     28
66 
67    #define MPI_CHEMPS2_5A1      29
68    #define MPI_CHEMPS2_5A2      30
69    #define MPI_CHEMPS2_5A3      31
70    #define MPI_CHEMPS2_5A4      32
71 
72    #define MPI_CHEMPS2_5B1      33
73    #define MPI_CHEMPS2_5B2      34
74    #define MPI_CHEMPS2_5B3      35
75    #define MPI_CHEMPS2_5B4      36
76 
77    #define MPI_CHEMPS2_5C1      37
78    #define MPI_CHEMPS2_5C2      38
79    #define MPI_CHEMPS2_5C3      39
80    #define MPI_CHEMPS2_5C4      40
81 
82    #define MPI_CHEMPS2_5D1      41
83    #define MPI_CHEMPS2_5D2      42
84    #define MPI_CHEMPS2_5D3      43
85    #define MPI_CHEMPS2_5D4      44
86 
87    #define MPI_CHEMPS2_5E1      45
88    #define MPI_CHEMPS2_5E2      46
89    #define MPI_CHEMPS2_5E3      47
90    #define MPI_CHEMPS2_5E4      48
91 
92    #define MPI_CHEMPS2_5F1      49
93    #define MPI_CHEMPS2_5F2      50
94    #define MPI_CHEMPS2_5F3      51
95    #define MPI_CHEMPS2_5F4      52
96 
97    #define MPI_CHEMPS2_OFFSET   53
98 
99 #endif
100 
101 namespace CheMPS2{
102 /** MPIchemps2 class
103     \author Sebastian Wouters <sebastianwouters@gmail.com>
104     \date May 27, 2015
105 
106     The MPIchemps2 class contains the bookkeeping for MPI */
107    class MPIchemps2{
108 
109       public:
110 
111          //! Constructor
MPIchemps2()112          MPIchemps2(){}
113 
114          //! Destructor
~MPIchemps2()115          virtual ~MPIchemps2(){}
116 
117          //! Get the number of MPI processes
118          /** \return The number of MPI processes */
mpi_size()119          static int mpi_size(){
120             #ifdef CHEMPS2_MPI_COMPILATION
121                int size;
122                MPI_Comm_size( MPI_COMM_WORLD, &size );
123                return size;
124             #else
125                return 1;
126             #endif
127          }
128 
129          //! Get the rank of this MPI process
130          /** \return The rank of this MPI process */
mpi_rank()131          static int mpi_rank(){
132             #ifdef CHEMPS2_MPI_COMPILATION
133                int rank;
134                MPI_Comm_rank( MPI_COMM_WORLD, &rank );
135                return rank;
136             #else
137                return 0;
138             #endif
139          }
140 
141          #ifdef CHEMPS2_MPI_COMPILATION
142          //! Initialize MPI
mpi_init()143          static void mpi_init(){
144             int zero = 0;
145             MPI_Init( &zero, NULL );
146          }
147          #endif
148 
149          #ifdef CHEMPS2_MPI_COMPILATION
150          //! Finalize MPI
mpi_finalize()151          static void mpi_finalize(){
152             MPI_Finalize();
153          }
154          #endif
155 
156          #ifdef CHEMPS2_MPI_COMPILATION
157          //! Get the owner of the X-tensors
owner_x()158          static int owner_x(){ return MPI_CHEMPS2_MASTER; }
159          #endif
160 
161          #ifdef CHEMPS2_MPI_COMPILATION
162          //! Get the owner of a certain {A,B,Sigma0,Sigma1}-tensor
163          /** \param index1 The first  DMRG lattice index of the tensor
164              \param index2 The second DMRG lattice index of the tensor
165              \return The owner rank */
owner_absigma(const int index1,const int index2)166          static int owner_absigma(const int index1, const int index2){ // 1 <= proc < 1 + L*(L+1)/2
167             assert( index1 <= index2 );
168             return ( 1 + index1 + (index2*(index2+1))/2 ) % mpi_size();
169          }
170          #endif
171 
172          #ifdef CHEMPS2_MPI_COMPILATION
173          //! Get the owner of a certain {C,D,F0,F1}-tensor
174          /** \param L The number of active space orbitals
175              \param index1 The first  DMRG lattice index of the tensor
176              \param index2 The second DMRG lattice index of the tensor
177              \return The owner rank */
owner_cdf(const int L,const int index1,const int index2)178          static int owner_cdf(const int L, const int index1, const int index2){ // 1 + L*(L+1)/2 <= proc < 1 + L*(L+1)
179             assert( index1 <= index2 );
180             return ( 1 + (L*(L+1))/2 + index1 + (index2*(index2+1))/2 ) % mpi_size();
181          }
182          #endif
183 
184          #ifdef CHEMPS2_MPI_COMPILATION
185          //! Get the owner of a certain Q-tensor
186          /** \param L The number of active space orbitals
187              \param index The DMRG lattice index of the tensor
188              \return The owner rank */
owner_q(const int L,const int index)189          static int owner_q(const int L, const int index){ // 1 + L*(L+1) <= proc < 1 + L*(L+2)
190             return ( 1 + L*(L+1) + index ) % mpi_size();
191          }
192          #endif
193 
194          #ifdef CHEMPS2_MPI_COMPILATION
195          //! Get the owner of a certain 3-index tensor for the 3-RDM
196          /** \param L The number of active space orbitals
197              \param index1 The first  DMRG lattice index of the tensor
198              \param index2 The second DMRG lattice index of the tensor
199              \param index3 The third  DMRG lattice index of the tensor
200              \return The owner rank */
owner_3rdm_diagram(const int L,const int index1,const int index2,const int index3)201          static int owner_3rdm_diagram(const int L, const int index1, const int index2, const int index3){ // 1 + L*(L+1) <= proc < 1 + L*(L+1) + L*(L+1)*(L+2)/6
202             assert( index1 <= index2 );
203             assert( index2 <= index3 );
204             return ( 1 + L*(L+1) + index1 + (index2*(index2+1))/2 + (index3*(index3+1)*(index3+2))/6 ) % mpi_size();
205          }
206          #endif
207 
208          #ifdef CHEMPS2_MPI_COMPILATION
209          //! Get the owner of the 1c, 1d, 2d, 3e, and 3h diagrams of the effective Hamiltonian
210          /** \return The owner rank */
owner_1cd2d3eh()211          static int owner_1cd2d3eh(){ return MPI_CHEMPS2_MASTER; }
212          #endif
213 
214          #ifdef CHEMPS2_MPI_COMPILATION
215          //! Get the owner of a specific diagram (or group of diagrams) of the effective Hamiltonian
216          /** \param L The number of active space orbitals
217              \param macro The macro number of the specific diagram (or group of diagrams) as defined in the file MPIchemps2.h
218              \return The owner rank */
owner_specific_diagram(const int L,const int macro)219          static int owner_specific_diagram(const int L, const int macro){
220             return (macro + L*(L+2)) % mpi_size();
221          }
222          #endif
223 
224          #ifdef CHEMPS2_MPI_COMPILATION
225          //! Get the owner of a specific excitation
226          /** \param L The number of active space orbitals
227              \param excitation The number of the specific excitation
228              \return The owner rank */
owner_specific_excitation(const int L,const int excitation)229          static int owner_specific_excitation(const int L, const int excitation){
230             return (MPI_CHEMPS2_OFFSET + L*(L+2) + excitation) % mpi_size();
231          }
232          #endif
233 
234          #ifdef CHEMPS2_MPI_COMPILATION
235          //! Broadcast a tensor
236          /** \param object The tensor to be broadcasted
237              \param ROOT The MPI process which should broadcast */
broadcast_tensor(Tensor * object,int ROOT)238          static void broadcast_tensor(Tensor * object, int ROOT){
239             int arraysize = object->gKappa2index(object->gNKappa());
240             MPI_Bcast(object->gStorage(), arraysize, MPI_DOUBLE, ROOT, MPI_COMM_WORLD);
241          }
242          #endif
243 
244          #ifdef CHEMPS2_MPI_COMPILATION
245          //! Broadcast an array of doubles
246          /** \param array The array to be broadcasted
247              \param length The length of the array
248              \param ROOT The MPI process which should broadcast */
broadcast_array_double(double * array,int length,int ROOT)249          static void broadcast_array_double(double * array, int length, int ROOT){
250             MPI_Bcast(array, length, MPI_DOUBLE, ROOT, MPI_COMM_WORLD);
251          }
252          #endif
253 
254          #ifdef CHEMPS2_MPI_COMPILATION
255          //! Broadcast an array of integers
256          /** \param array The array to be broadcasted
257              \param length The length of the array
258              \param ROOT The MPI process which should broadcast */
broadcast_array_int(int * array,int length,int ROOT)259          static void broadcast_array_int(int * array, int length, int ROOT){
260             MPI_Bcast(array, length, MPI_INT, ROOT, MPI_COMM_WORLD);
261          }
262          #endif
263 
264          #ifdef CHEMPS2_MPI_COMPILATION
265          //! Check whether all processes agree on a boolean
266          /** \param mybool The process's boolean
267              \return Whether the booleans of all processes are equal */
all_booleans_equal(const bool mybool)268          static bool all_booleans_equal(const bool mybool){
269             int my_value = ( mybool ) ? 1 : 0 ;
270             int tot_value;
271             MPI_Allreduce(&my_value, &tot_value, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
272             return ( my_value * MPIchemps2::mpi_size() == tot_value ); // Only true if mybool is the same for all processes
273          }
274          #endif
275 
276          #ifdef CHEMPS2_MPI_COMPILATION
277          //! Send a tensor from one process to another
278          /** \param object The tensor to be sent
279              \param SENDER The MPI process which should send the tensor
280              \param RECEIVER The MPI process which should receive the tensor
281              \param tag A tag which should be the same for the sender and receiver to make sure that the communication is desired */
sendreceive_tensor(Tensor * object,int SENDER,int RECEIVER,int tag)282          static void sendreceive_tensor(Tensor * object, int SENDER, int RECEIVER, int tag){
283             if ( SENDER != RECEIVER ){
284                const int MPIRANK = mpi_rank();
285                if ( SENDER == MPIRANK ){
286                   int arraysize = object->gKappa2index(object->gNKappa());
287                   MPI_Send(object->gStorage(), arraysize, MPI_DOUBLE, RECEIVER, tag, MPI_COMM_WORLD);
288                }
289                if ( RECEIVER == MPIRANK ){
290                   int arraysize = object->gKappa2index(object->gNKappa());
291                   MPI_Recv(object->gStorage(), arraysize, MPI_DOUBLE, SENDER, tag, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
292                }
293             }
294          }
295          #endif
296 
297          #ifdef CHEMPS2_MPI_COMPILATION
298          //! Add arrays of all processes and give result to ROOT
299          /** \param vec_in The array which should be added
300              \param vec_out The array where the result should be stored
301              \param size The size of the array
302              \param ROOT The MPI process which should have the result vector */
reduce_array_double(double * vec_in,double * vec_out,int size,int ROOT)303          static void reduce_array_double(double * vec_in, double * vec_out, int size, int ROOT){
304             MPI_Reduce(vec_in, vec_out, size, MPI_DOUBLE, MPI_SUM, ROOT, MPI_COMM_WORLD);
305          }
306          #endif
307 
308          #ifdef CHEMPS2_MPI_COMPILATION
309          //! Add arrays of all processes and give everyone the result
310          /** \param vec_in The array which should be added
311              \param vec_out The array where the result should be stored
312              \param size The size of the array */
allreduce_array_double(double * vec_in,double * vec_out,int size)313          static void allreduce_array_double(double * vec_in, double * vec_out, int size){
314             MPI_Allreduce(vec_in, vec_out, size, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
315          }
316          #endif
317 
318    };
319 }
320 
321 #endif
322 
323