1 //-----------------------------------------------------------------------bl-
2 //--------------------------------------------------------------------------
3 //
4 // QUESO - a library to support the Quantification of Uncertainty
5 // for Estimation, Simulation and Optimization
6 //
7 // Copyright (C) 2008-2017 The PECOS Development Team
8 //
9 // This library is free software; you can redistribute it and/or
10 // modify it under the terms of the Version 2.1 GNU Lesser General
11 // Public License as published by the Free Software Foundation.
12 //
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Lesser General Public License for more details.
17 //
18 // You should have received a copy of the GNU Lesser General Public
19 // License along with this library; if not, write to the Free Software
20 // Foundation, Inc. 51 Franklin Street, Fifth Floor,
21 // Boston, MA  02110-1301  USA
22 //
23 //-----------------------------------------------------------------------el-
24 
25 #ifndef UQ_MPI_COMM_H
26 #define UQ_MPI_COMM_H
27 
28 #include <queso/Defines.h>
29 
30 #ifdef QUESO_HAS_MPI
31 #include <mpi.h>
32 #endif
33 
34 #ifdef QUESO_HAS_TRILINOS
35 class Epetra_Comm;
36 #endif
37 
38 namespace QUESO {
39 
40 #ifdef QUESO_HAS_MPI
41 typedef MPI_Comm     RawType_MPI_Comm ;
42 typedef MPI_Group    RawType_MPI_Group ;
43 typedef MPI_Datatype RawType_MPI_Datatype ;
44 typedef MPI_Datatype data_type ;
45 typedef MPI_Op       RawType_MPI_Op ;
46 typedef MPI_Status   RawType_MPI_Status ;
47 #define RawValue_MPI_COMM_SELF  MPI_COMM_SELF
48 #define RawValue_MPI_ANY_SOURCE MPI_ANY_SOURCE
49 #define RawValue_MPI_CHAR       MPI_CHAR
50 #define RawValue_MPI_INT        MPI_INT
51 #define RawValue_MPI_DOUBLE     MPI_DOUBLE
52 #define RawValue_MPI_UNSIGNED   MPI_UNSIGNED
53 #define RawValue_MPI_MIN        MPI_MIN
54 #define RawValue_MPI_MAX        MPI_MAX
55 #define RawValue_MPI_SUM        MPI_SUM
56 #else
57 typedef int RawType_MPI_Comm;
58 typedef int RawType_MPI_Group;
59 typedef int RawType_MPI_Datatype;
60 struct data_type { };
61 typedef int RawType_MPI_Op;
62 typedef int RawType_MPI_Status;
63 #define RawValue_MPI_COMM_SELF   0
64 #define RawValue_MPI_ANY_SOURCE -1
65 #define RawValue_MPI_CHAR        0
66 #define RawValue_MPI_INT         1
67 #define RawValue_MPI_DOUBLE      2
68 #define RawValue_MPI_UNSIGNED    3
69 #define RawValue_MPI_MIN         0
70 #define RawValue_MPI_MAX         1
71 #define RawValue_MPI_SUM         2
72 #endif
73 
74 /**
75  * Encapsulates the MPI_Datatype.  Taken from libmesh.
76  */
77 class DataType
78 {
79 public:
DataType()80   DataType () : _datatype() {}
81 
DataType(const DataType & other)82   DataType (const DataType &other) :
83     _datatype(other._datatype)
84   {}
85 
DataType(const RawType_MPI_Datatype & type)86   DataType (const RawType_MPI_Datatype &type) :
87     _datatype(type)
88   {}
89 
90 #ifdef QUESO_HAS_MPI
DataType(const DataType & other,unsigned int count)91   DataType (const DataType &other, unsigned int count)
92   {
93     // FIXME - if we nest an inner type here will we run into bug
94     // https://github.com/libMesh/libmesh/issues/631 again?
95     MPI_Type_contiguous(count, other._datatype, &_datatype);
96     this->commit();
97   }
98 #else
DataType(const DataType &,unsigned int)99   DataType (const DataType &, unsigned int)
100   {
101   }
102 #endif
103 
104   DataType & operator = (const DataType &other)
105   { _datatype = other._datatype; return *this; }
106 
107   DataType & operator = (const RawType_MPI_Datatype &type)
108   { _datatype = type; return *this; }
109 
110   operator const RawType_MPI_Datatype & () const
111   { return _datatype; }
112 
113   operator RawType_MPI_Datatype & ()
114   { return _datatype; }
115 
commit()116   void commit ()
117   {
118 #ifdef QUESO_HAS_MPI
119     MPI_Type_commit (&_datatype);
120 #endif
121   }
122 
free()123   void free ()
124   {
125 #ifdef QUESO_HAS_MPI
126     MPI_Type_free (&_datatype);
127 #endif
128   }
129 
130 protected:
131   RawType_MPI_Datatype _datatype;
132 };
133 
134 /**
135  * Templated class to provide the appropriate MPI datatype
136  * for use with built-in C types or simple C++ constructions.
137  *
138  * More complicated data types may need to provide a pointer-to-T so
139  * that we can use MPI_Address without constructing a new T.
140  */
141 template <typename T>
142 class StandardType : public DataType
143 {
144 #ifdef QUESO_HAS_CXX11  // This macro isn't defined (yet)
145     // Get a slightly better compiler diagnostic if we have C++11
146   static_assert(dependent_false<T>::value,
147                 "Only specializations of StandardType may be used, did you forget to include a header file (e.g. parallel_algebra.h)?");
148 #endif
149 
150  /*
151   * The unspecialized class is useless, so we make its constructor
152   * private to catch mistakes at compile-time rather than link-time.
153   * Specializations should have a public constructor of the same
154   * form.
155   */
156 private:
157   StandardType(const T* example = NULL);
158 };
159 
160 #ifdef QUESO_HAS_MPI
161 
162 #define QUESO_STANDARD_TYPE(cxxtype,mpitype)                                  \
163   template<>                                                            \
164   class StandardType<cxxtype> : public DataType                         \
165   {                                                                     \
166   public:                                                               \
167     explicit                                                            \
168       StandardType(const cxxtype* = NULL) : DataType(mpitype) {}        \
169   }
170 
171 #else
172 
173 #define QUESO_STANDARD_TYPE(cxxtype,mpitype)                          \
174   template<>                                                    \
175   class StandardType<cxxtype> : public DataType                 \
176   {                                                             \
177   public:                                                       \
178     explicit                                                    \
179       StandardType(const cxxtype* = NULL) : DataType() {}       \
180   }
181 
182 #endif
183 
184 QUESO_STANDARD_TYPE(char,MPI_CHAR);
185 QUESO_STANDARD_TYPE(int,MPI_INT);
186 QUESO_STANDARD_TYPE(unsigned int,MPI_UNSIGNED);
187 QUESO_STANDARD_TYPE(double,MPI_DOUBLE);
188 
189 class BaseEnvironment;
190 
191 /*! \file MpiComm.h
192     \brief MPI Communicator Class.
193 */
194 
195 /*! \class MpiComm
196     \brief The QUESO MPI Communicator Class.
197 
198     This class uses MPI (the Message Passing Interface) for distributed-memory
199     communication between one or more parallel processes. It is meant to insulate
200     the user from the specifics of communication that are not required for normal
201     manipulation of linear algebra objects.
202 */
203 class MpiComm
204 {
205 public:
206    //! @name Constructor/Destructor methods
207   //@{
208 
209   //! QUESO MpiComm MPI parallel constructor.
210   /*!
211    * This constructs an MpiComm that uses the given "raw" MPI communicator
212    * underneath.  MPI_Init *must* have been called before instantiating an
213    * object of this type.
214    *
215    * The MPI_Comm must be valid for the lifetime of this MpiComm.
216    */
217   MpiComm(const BaseEnvironment& env, RawType_MPI_Comm inputRawComm);
218 
219   //! QUESO MpiComm MPI serial constructor.
220   /*!
221    * This constructs an MpiComm that defaults to MPI_COMM_SELF
222    * underneath.  MPI_Init need not be called before using this constructor.
223    *
224    * The MPI_Comm must be valid for the lifetime of this MpiComm.
225    */
226   MpiComm(const BaseEnvironment& env);
227 
228   //! Copy Constructor.
229   /** Makes an exact copy of an existing MpiComm instance.*/
230   MpiComm(const MpiComm& src);
231 
232   //! Destructor
233  ~MpiComm();
234   //@}
235 
236   //! @name Set methods
237   //@{
238   //! Assignment operator.
239   MpiComm& operator= (const MpiComm& rhs);
240   //@}
241 
242 
243   //! @name Attribute Accessor Methods
244   //@{
245 #ifdef QUESO_HAS_MPI
246   //! Extract MPI Communicator from a MpiComm object.
247   RawType_MPI_Comm Comm     () const;
248 #endif  // QUESO_HAS_MPI
249 
250   //! Return my process ID.
251   int                MyPID    () const;
252 
253   //! Returns total number of processes.
254   int                NumProc  () const;
255   //@}
256 
257   //! @name Methods Overridden from Comm
258   //@{
259   //! Combines values from all processes and distributes the result back to all processes
260   /*! \param sendbuf starting address of send buffer
261    * \param count number of elements in send buffer
262    * \param datatype data type of elements of send buffer
263    * \param op operation
264    * \param recvbuf (output) starting address of receive buffer
265    *
266    * This method is deprecated.  Use the templated Allreduce method instead.
267    */
268   void               Allreduce(void* sendbuf, void* recvbuf, int count, RawType_MPI_Datatype datatype,
269 			       RawType_MPI_Op op, const char* whereMsg, const char* whatMsg) const;
270 
271   //! Combines values from all processes and distributes the result back to all processes
272   /*!
273    * \param sendbuf starting address of send buffer containing elements of type T
274    * \param count number of elements in send buffer
275    * \param op operation
276    * \param recvbuf (output) starting address of receive buffer containing elements of type T
277    */
278   template <typename T>
279   void Allreduce(const T * sendbuf, T * recvbuf, int count, RawType_MPI_Op op,
280                  const char* whereMsg, const char* whatMsg) const;
281 
282   //! Pause every process in *this communicator until all the processes reach this point.
283   /*! Blocks the caller until all processes in the communicator have called it; that is,
284    * the call returns at any process only after all members of the communicator have entered the call.*/
285   void               Barrier  () const; // const char* whereMsg, const char* whatMsg) const;
286 
287   //! Broadcast values from the root process to the slave processes.
288   /*! Broadcasts a message from the process with rank "root" to all other processes of the communicator.
289    * \param buffer (input/output) starting address of buffer
290    * \param count number of entries in buffer
291    * \param datatype data type of buffer
292    * \param root rank of broadcast root */
293   void               Bcast    (void* buffer, int count, RawType_MPI_Datatype datatype, int root,
294                                const char* whereMsg, const char* whatMsg) const;
295 
296   //! Gather values from each process to collect on all processes.
297   /*!\param sendbuf starting address of send buffer
298    * \param sendcnt number of elements in send buffer
299    * \param sendtype data type of send buffer elements
300    * \param recvcount number of elements for any single receive
301    * \param recvtype data type of recv buffer elements
302    * \param root rank of receiving process
303    * \param recvbuf (output) address of receive buffer
304    *
305    * This method is deprecated.  Use the templated Gather method instead.
306    */
307   void               Gather   (void *sendbuf, int sendcnt, RawType_MPI_Datatype sendtype,
308                                void *recvbuf, int recvcount, RawType_MPI_Datatype recvtype,
309                                int root,
310                                const char* whereMsg, const char* whatMsg) const;
311 
312   //! Gather values from each process to collect on all processes.
313   /*!
314    * \param sendbuf starting address of send buffer containing elements of type T
315    * \param sendcnt number of elements in send buffer
316    * \param recvcount number of elements for any single receive
317    * \param root rank of receiving process
318    * \param recvbuf (output) address of receive buffer containing elements of type T
319    */
320   template <typename T>
321   void Gather(const T * sendbuf, int sendcnt, T * recvbuf, int recvcount, int root,
322               const char * whereMsg, const char * whatMsg) const;
323 
324  //! Gathers into specified locations from all processes in a group
325  /*! \param sendbuf starting address of send buffer
326   * \param sendcount number of elements in send buffer
327   * \param sendtype data type of send buffer elements
328   * \param recvcounts integer array (of length group size) containing the number of elements
329   * that are received from each process
330   * \param displs integer array (of length group size). Entry i specifies the displacement
331   * relative to recvbuf at which to place the incoming data from process i
332   * \param recvtype data type of recv buffer elements
333   * \param root rank of receiving process*/
334   void               Gatherv  (void *sendbuf, int sendcnt, RawType_MPI_Datatype sendtype,
335                                void *recvbuf, int *recvcnts, int *displs, RawType_MPI_Datatype recvtype,
336                                int root,
337                                const char* whereMsg, const char* whatMsg) const;
338 
339   //! Gathers into specified locations from all processes in a group
340   /*!
341    * \param sendbuf starting address of send buffer containing elements of type T
342    * \param sendcnt number of elements in send buffer
343    * \param recvcnts integer array (of length group size) containing the number
344    * of elements that are received from each process
345    * \param displs integer array (of length group size). Entry i specifies the
346    * displacement relative to recvbuf at which to place the incoming data from
347    * process i
348    * \param root rank of receiving process
349    */
350   template <typename T>
351   void Gatherv(const T * sendbuf, int sendcnt, T * recvbuf, int * recvcnts,
352                int * displs, int root, const char * whereMsg,
353                const char * whatMsg) const;
354 
355   //! Blocking receive of data from this process to another process.
356   /*!\param buf (output) initial address of receive buffer
357    * \param status (output) status object
358    * \param count maximum number of elements in receive buffer
359    * \param datatype datatype of each receive buffer element
360    * \param source rank of source
361    * \param tag message tag */
362   void               Recv     (void *buf, int count, RawType_MPI_Datatype datatype, int source, int tag, RawType_MPI_Status *status,
363                                const char* whereMsg, const char* whatMsg) const;
364 
365   //! Possibly blocking send of data from this process to another process.
366   /*!\param buf initial address of send buffer
367    * \param count number of elements in send buffer
368    * \param datatype datatype of each send buffer element
369    * \param dest rank of destination
370    * \param tag message tag*/
371   void               Send     (void *buf, int count, RawType_MPI_Datatype datatype, int dest, int tag,
372                                const char* whereMsg, const char* whatMsg) const;
373  //@}
374 
375 //! @name Miscellaneous Methods
376   //@{
377   //! Synchronizes all the processes and print debug message.
378   void               syncPrintDebugMsg(const char* msg, unsigned int msgVerbosity, unsigned int numUSecs) const;
379 
380 #ifdef QUESO_HAS_TRILINOS
381   //! Extract MPI Communicator from a Epetra_MpiComm object.
382   const Epetra_Comm& epetraMpiComm() const;
383 #endif
384 
385  //@}
386 private:
387   //! Default Constructor
388   /*! It should not be used by user.*/
389   MpiComm();
390 
391   //! Copies from an existing MpiComm instance.
392   void               copy          (const MpiComm& src);
393 
394   // QUESO environment
395   const BaseEnvironment& m_env;
396 
397 #ifdef QUESO_HAS_TRILINOS
398   // Epetra communicator
399   Epetra_Comm*               m_epetraComm;
400 #endif
401 
402   //! Embedded wrapped opaque MPI_Comm object.
403   RawType_MPI_Comm            m_rawComm;
404 
405   //! World rank
406   int                           m_worldRank;
407 
408   //! Process ID of this process
409   int                           m_myPid;
410 
411   // Total number of processes
412   int                           m_numProc;
413 };
414 
415 }  // End namespace QUESO
416 
417 #endif // UQ_MPI_COMM_H
418