1 //////////////////////////////////////////////////////////////////////////////////////
2 // This file is distributed under the University of Illinois/NCSA Open Source License.
3 // See LICENSE file in top directory for details.
4 //
5 // Copyright (c) 2020 QMCPACK developers.
6 //
7 // File developed by: Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign
8 //                    Miguel Morales, moralessilva2@llnl.gov, Lawrence Livermore National Laboratory
9 //                    Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign
10 //                    Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
11 //                    Mark Dewing, markdewing@gmail.com, University of Illinois at Urbana-Champaign
12 //                    Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory
13 //                    Peter Doak, doakpw@ornl.gov, Oak Ridge National Laboratory
14 //
15 // File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
16 //////////////////////////////////////////////////////////////////////////////////////
17 
18 
19 #ifndef OHMMS_COMMUNICATE_H
20 #define OHMMS_COMMUNICATE_H
21 
22 #ifdef HAVE_CONFIG_H
23 #include "config.h"
24 #endif
25 
26 #ifdef HAVE_MPI
27 #include "mpi3/environment.hpp"
28 namespace mpi3 = boost::mpi3;
29 #endif
30 
31 #ifdef HAVE_MPI
32 struct CommunicatorTraits
33 {
34   typedef MPI_Comm mpi_comm_type;
35   typedef MPI_Status status;
36   typedef MPI_Request request;
37 };
38 
39 #else
40 struct CommunicatorTraits
41 {
42   typedef int mpi_comm_type;
43   typedef int status;
44   typedef int request;
45   static const int MPI_COMM_NULL    = 0;
46   static const int MPI_REQUEST_NULL = 1;
47 };
48 #endif
49 
50 #include <string>
51 #include <vector>
52 #include <utility>
53 #include <unistd.h>
54 #include <cstring>
55 
56 #include "Message/AppAbort.h"
57 
58 /**@class Communicate
59  * @ingroup Message
60  * @brief
61  *  Wrapping information on parallelism.
62  *  Very limited in functions. Currently, only single-mode or mpi-mode
63  *  is available (mutually exclusive).
64  * @todo Possibly, make it a general manager class for mpi+openmp, mpi+mpi
65  */
66 class Communicate : public CommunicatorTraits
67 {
68 public:
69   ///constructor
70   Communicate();
71 
72   ///constructor from mpi3 environment
73 #ifdef HAVE_MPI
74   Communicate(const mpi3::environment& env);
75 
76   ///constructor with communicator
77   Communicate(const mpi3::communicator& in_comm);
78 #endif
79 
80   /** constructor that splits in_comm
81    */
82   Communicate(const Communicate& in_comm, int nparts);
83 
84   /**destructor
85    * Call proper finalization of Communication library
86    */
87   virtual ~Communicate();
88 
89   ///disable constructor
90   Communicate(const Communicate&) = delete;
91 
92   // Only for unit tests
93   void initialize(int argc, char** argv);
94 
95 #ifdef HAVE_MPI
96   void initialize(const mpi3::environment& env);
97 #endif
98   /// initialize this as a node/shared-memory communicator
99   void initializeAsNodeComm(const Communicate& parent);
100   void finalize();
101   void barrier() const;
102   void abort() const;
103   void barrier_and_abort(const std::string& msg) const;
104   void set_world();
105 
106 #if defined(HAVE_MPI)
107   ///operator for implicit conversion to MPI_Comm
MPI_Comm()108   inline operator MPI_Comm() const { return myMPI; }
109 #endif
110 
111   ///return the Communicator ID (typically MPI_WORLD_COMM)
getMPI()112   inline mpi_comm_type getMPI() const { return myMPI; }
113 
114   ///return the rank
rank()115   inline int rank() const { return d_mycontext; }
116   ///return the number of tasks
size()117   inline int size() const { return d_ncontexts; }
118 
119   ///return the group id
getGroupID()120   inline int getGroupID() const { return d_groupid; }
121   ///return the number of intra_comms which belong to the same group
getNumGroups()122   inline int getNumGroups() const { return d_ngroups; }
123   //inline bool master() const { return (d_mycontext == 0);}
124   //intra_comm_type split(int n);
125   void cleanupMessage(void*);
setNodeID(int i)126   inline void setNodeID(int i) { d_mycontext = i; }
setNumNodes(int n)127   inline void setNumNodes(int n) { d_ncontexts = n; }
128 
setName(const std::string & aname)129   inline void setName(const std::string& aname) { myName = aname; }
getName()130   inline const std::string& getName() const { return myName; }
131 
132   ///return true if the current MPI rank is the group lead
isGroupLeader()133   inline bool isGroupLeader() { return d_mycontext == 0; }
134 
135   // MMORALES: leaving this here temprarily, but it doesn;t belong here.
136   // MMORALES: FIX FIX FIX
137 #ifdef HAVE_MPI
138 
139 // For Mac OS X
140 #ifndef HOST_NAME_MAX
141 #ifdef _POSIX_HOST_NAME_MAX
142 #define HOST_NAME_MAX _POSIX_HOST_NAME_MAX
143 #endif
144 #endif
145 
head_nodes(MPI_Comm & MPI_COMM_HEAD_OF_NODES)146   inline bool head_nodes(MPI_Comm& MPI_COMM_HEAD_OF_NODES)
147   {
148     char hostname[HOST_NAME_MAX];
149     gethostname(hostname, HOST_NAME_MAX);
150     int myrank = rank(), nprocs = size();
151     char* dummy = new char[nprocs * HOST_NAME_MAX];
152     MPI_Allgather(hostname, HOST_NAME_MAX, MPI_CHAR, dummy, HOST_NAME_MAX, MPI_CHAR, myMPI);
153     bool head_of_node = true;
154     for (int i = 0; i < myrank; i++)
155       if (strcmp(hostname, dummy + i * HOST_NAME_MAX) == 0)
156       {
157         head_of_node = false;
158         break;
159       }
160     int key = head_of_node ? 0 : 10;
161     MPI_Comm_split(myMPI, key, myrank, &MPI_COMM_HEAD_OF_NODES);
162     delete[] dummy;
163     return head_of_node;
164   }
165 #endif
166 
167 #ifdef HAVE_MPI
168   /** A hack to get around Communicate not supporting flexible processor subgroups
169    *
170    *  MMORALES:
171    *  right now there is no easy way to use Communicate
172    *  for generic processor subgroups, so calling split on myMPI
173    *  and managing the communicator directly
174    *  \todo THIS MUST BE FIXED!!!
175    */
split_comm(int key,MPI_Comm & comm)176   inline void split_comm(int key, MPI_Comm& comm)
177   {
178     int myrank = rank();
179     MPI_Comm_split(myMPI, key, myrank, &comm);
180   }
181 #endif
182 
183   template<typename T>
184   void allreduce(T&);
185   template<typename T>
186   void reduce(T&);
187   template<typename T>
188   void reduce(T* restrict, T* restrict, int n);
189   template<typename T>
190   void reduce_in_place(T* restrict, int n);
191   template<typename T>
192   void bcast(T&);
193   template<typename T>
194   void bcast(T* restrict, int n);
195   template<typename T>
196   void send(int dest, int tag, T&);
197   template<typename T>
198   void gather(T& sb, T& rb, int dest = 0);
199   template<typename T, typename IT>
200   void gatherv(T& sb, T& rb, IT& counts, IT& displ, int dest = 0);
201   template<typename T>
202   void allgather(T& sb, T& rb, int count);
203   template<typename T, typename IT>
204   void allgatherv(T& sb, T& rb, IT& counts, IT& displ);
205   template<typename T>
206   void scatter(T& sb, T& rb, int dest = 0);
207   template<typename T, typename IT>
208   void scatterv(T& sb, T& rb, IT& counts, IT& displ, int source = 0);
209   template<typename T>
210   request irecv(int source, int tag, T&);
211   template<typename T>
212   request isend(int dest, int tag, T&);
213   template<typename T>
214   request irecv(int source, int tag, T*, int n);
215   template<typename T>
216   request isend(int dest, int tag, T*, int n);
217   template<typename T, typename IT>
218   void gatherv(T* sb, T* rb, int n, IT& counts, IT& displ, int dest = 0);
219   template<typename T, typename TMPI, typename IT>
220   void gatherv_in_place(T* buf, TMPI& datatype, IT& counts, IT& displ, int dest = 0);
221   template<typename T>
222   void allgather(T* sb, T* rb, int count);
223   template<typename T>
224   void gsum(T&);
225 
226 protected:
227   /** Raw communicator
228    *
229    *  Currently it is only owned by Communicate which manages its creation and destruction
230    *  After switching to mpi3::communicator, myMPI is only a reference to the raw communicator owned by mpi3::communicator
231    */
232   mpi_comm_type myMPI;
233   /// Communicator name
234   std::string myName;
235   /// Rank
236   int d_mycontext;
237   /// Size
238   int d_ncontexts;
239   /// Group ID of the current communicator in the parent communicator
240   int d_groupid;
241   /// Total number of groups in the parent communicator
242   int d_ngroups;
243 
244 public:
245   /// Group Lead Communicator
246   Communicate* GroupLeaderComm;
247 
248 #ifdef HAVE_MPI
249   /// mpi3 communicator wrapper
250   mpi3::communicator comm;
251 #endif
252 };
253 
254 
255 namespace OHMMS
256 {
257 /** Global Communicator for a process
258  */
259 extern Communicate* Controller;
260 } // namespace OHMMS
261 
262 
263 #endif // OHMMS_COMMUNICATE_H
264