1 /* ----------------------------------------------------------------------
2     This is the
3 
4     ██╗     ██╗ ██████╗  ██████╗  ██████╗ ██╗  ██╗████████╗███████╗
5     ██║     ██║██╔════╝ ██╔════╝ ██╔════╝ ██║  ██║╚══██╔══╝██╔════╝
6     ██║     ██║██║  ███╗██║  ███╗██║  ███╗███████║   ██║   ███████╗
7     ██║     ██║██║   ██║██║   ██║██║   ██║██╔══██║   ██║   ╚════██║
8     ███████╗██║╚██████╔╝╚██████╔╝╚██████╔╝██║  ██║   ██║   ███████║
9     ╚══════╝╚═╝ ╚═════╝  ╚═════╝  ╚═════╝ ╚═╝  ╚═╝   ╚═╝   ╚══════╝®
10 
11     DEM simulation engine, released by
12     DCS Computing Gmbh, Linz, Austria
13     http://www.dcs-computing.com, office@dcs-computing.com
14 
15     LIGGGHTS® is part of CFDEM®project:
16     http://www.liggghts.com | http://www.cfdem.com
17 
18     Core developer and main author:
19     Christoph Kloss, christoph.kloss@dcs-computing.com
20 
21     LIGGGHTS® is open-source, distributed under the terms of the GNU Public
22     License, version 2 or later. It is distributed in the hope that it will
23     be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
24     of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. You should have
25     received a copy of the GNU General Public License along with LIGGGHTS®.
26     If not, see http://www.gnu.org/licenses . See also top-level README
27     and LICENSE files.
28 
29     LIGGGHTS® and CFDEM® are registered trade marks of DCS Computing GmbH,
30     the producer of the LIGGGHTS® software and the CFDEM®coupling software
31     See http://www.cfdem.com/terms-trademark-policy for details.
32 
33 -------------------------------------------------------------------------
34     Contributing author and copyright for this file:
35     (if not contributing author is listed, this file has been contributed
36     by the core developer)
37 
38     Copyright 2012-     DCS Computing GmbH, Linz
39     Copyright 2009-2012 JKU Linz
40 ------------------------------------------------------------------------- */
41 
42 #ifndef LMP_MPI_LIGGGHTS_H
43 #define LMP_MPI_LIGGGHTS_H
44 
45 #include <mpi.h>
46 #include <stdio.h>
47 #include "lmptype.h"
48 
49 /* ---------------------------------------------------------------------- */
50 // a poor man's inline MPI wrappers for LIGGGHTS
51 /* ---------------------------------------------------------------------- */
52 
53 namespace LAMMPS_NS
54 {
55 
56 /* ----------------------------------------------------------------------
57    Helper function to be able to templatize wrappers
58 ------------------------------------------------------------------------- */
59 
60 template<typename T>
mpi_type()61 inline MPI_Datatype mpi_type()
62 {
63   printf("\n\n\n**************LIGGGHTS MPI: ILLEGAL CALL TO mpi_type()*************\n\n\n");
64   return 0;
65 }
66 
67 template<>
68 inline MPI_Datatype mpi_type<double>()
69 {
70   return MPI_DOUBLE;
71 }
72 
73 template<>
74 inline MPI_Datatype mpi_type<int>()
75 {
76   return MPI_INT;
77 }
78 
79 template<>
80 inline MPI_Datatype mpi_type<uint64_t>()
81 {
82   return MPI_LONG_LONG ;
83 }
84 
85 /* ---------------------------------------------------------------------- */
86 
87 template<typename T>
MPI_Sum_Vector(T * vector,int len,MPI_Comm comm)88 inline void MPI_Sum_Vector(T* vector, int len, MPI_Comm comm)
89 {
90   MPI_Allreduce(MPI_IN_PLACE, vector, len, mpi_type<T>(), MPI_SUM, comm);
91 }
92 
93 /* ---------------------------------------------------------------------- */
94 
95 template<typename T>
MPI_Sum_Scalar(T & scalar,MPI_Comm comm)96 inline void MPI_Sum_Scalar(T& scalar, MPI_Comm comm)
97 {
98   MPI_Allreduce(MPI_IN_PLACE, &scalar, 1, mpi_type<T>(), MPI_SUM, comm);
99 }
100 
101 /* ---------------------------------------------------------------------- */
102 
103 template<typename T>
MPI_Sum_Scalar(T & scalar,T & scalar_all,MPI_Comm comm)104 inline void MPI_Sum_Scalar(T& scalar, T& scalar_all, MPI_Comm comm)
105 {
106   MPI_Allreduce(&scalar, &scalar_all, 1, mpi_type<T>(), MPI_SUM, comm);
107 }
108 
109 /* ---------------------------------------------------------------------- */
110 
111 template<typename T>
MPI_Min_Scalar(T & scalar,MPI_Comm comm)112 inline void MPI_Min_Scalar(T& scalar, MPI_Comm comm)
113 {
114   MPI_Allreduce(MPI_IN_PLACE, &scalar, 1, mpi_type<T>(), MPI_MIN, comm);
115 }
116 
117 /* ---------------------------------------------------------------------- */
118 
119 template<typename T>
MPI_Min_Scalar(T scalar,T & scalar_all,MPI_Comm comm)120 inline void MPI_Min_Scalar(T scalar, T& scalar_all, MPI_Comm comm)
121 {
122   MPI_Allreduce(&scalar, &scalar_all, 1, mpi_type<T>(), MPI_MIN, comm);
123 }
124 
125 /* ---------------------------------------------------------------------- */
126 
127 template<typename T>
MPI_Max_Scalar(T & scalar,MPI_Comm comm)128 inline void MPI_Max_Scalar(T& scalar, MPI_Comm comm)
129 {
130   MPI_Allreduce(MPI_IN_PLACE, &scalar, 1, mpi_type<T>(), MPI_MAX, comm);
131 }
132 
133 /* ---------------------------------------------------------------------- */
134 
135 template<typename T>
MPI_Max_Scalar(T scalar,T & scalar_all,MPI_Comm comm)136 inline void MPI_Max_Scalar(T scalar, T& scalar_all, MPI_Comm comm)
137 {
138   MPI_Allreduce(&scalar, &scalar_all, 1, mpi_type<T>(), MPI_MAX, comm);
139 }
140 
141 /* ---------------------------------------------------------------------- */
142 
143 template<typename T>
MPI_Max_Vector(T * vector,int len,MPI_Comm comm)144 inline void MPI_Max_Vector(T *vector, int len, MPI_Comm comm)
145 {
146   MPI_Allreduce(MPI_IN_PLACE, vector, len, mpi_type<T>(), MPI_MAX, comm);
147 }
148 
149 /* ---------------------------------------------------------------------- */
150 
151 template<typename T>
MPI_Min_Vector(T * vector,int len,MPI_Comm comm)152 inline void MPI_Min_Vector(T* vector, int len, MPI_Comm comm)
153 {
154   MPI_Allreduce(MPI_IN_PLACE, vector, len, mpi_type<T>(), MPI_MIN, comm);
155 }
156 
157 /* ---------------------------------------------------------------------- */
158 
MPI_Allgather_Sum_Scalar(int scalar,int & scalar_acc,MPI_Comm comm)159 inline void MPI_Allgather_Sum_Scalar(int scalar,int &scalar_acc,MPI_Comm comm)
160 {
161     int rank,size, *allg;
162 
163     MPI_Comm_rank(comm, &rank);
164     MPI_Comm_size(comm, &size);
165 
166     allg = new int[size];
167 
168     MPI_Allgather(&scalar,1,MPI_INT,allg,1,MPI_INT,comm);
169 
170     scalar_acc = 0;
171     for (int iproc = 1; iproc < rank; iproc++)
172        scalar_acc = scalar_acc + allg[iproc-1];
173 
174     delete []allg;
175 }
176 
177 /* ----------------------------------------------------------------------
178    Gather vector data from all processors at proc 0
179    returns allocated and populated array vector0 to caller
180 ------------------------------------------------------------------------- */
181 
182 template<typename T>
MPI_Gather0_Vector(T * vector,int size,T * & vector_0,MPI_Comm comm)183 inline int MPI_Gather0_Vector(T *vector, int size ,T *&vector_0,MPI_Comm comm)
184 {
185     int me,nprocs, *recvcnts, *displs;
186     int size_0;
187 
188     MPI_Comm_size(comm, &nprocs);
189     MPI_Comm_rank(comm, &me);
190 
191     recvcnts = new int[nprocs];
192     displs = new int[nprocs];
193 
194     MPI_Allgather(&size,1,MPI_INT,recvcnts,1,MPI_INT,comm);
195 
196     size_0 = 0;
197     displs[0] = 0;
198     for (int iproc = 1; iproc < nprocs; iproc++)
199     {
200         size_0 += recvcnts[iproc-1];
201         displs[iproc] = displs[iproc-1] + recvcnts[iproc-1];
202     }
203     size_0 += recvcnts[nprocs-1];
204 
205     if(me == 0)
206         vector_0 = new T[size_0];
207     else
208         vector_0 = 0;
209 
210     MPI_Gatherv(vector,size,mpi_type<T>(),vector_0, recvcnts, displs, mpi_type<T>(),0, comm);
211 
212     delete []recvcnts;
213     delete []displs;
214 
215     return size_0;
216 }
217 
218 /* ----------------------------------------------------------------------
219    Allgather vector data from all processors
220    returns allocated and populated array vector_all and its length to caller
221 ------------------------------------------------------------------------- */
222 
223 template<typename T>
MPI_Allgather_Vector(T * vector,int size,T * & vector_all,MPI_Comm comm)224 inline int MPI_Allgather_Vector(T *vector, int size ,T *&vector_all,MPI_Comm comm)
225 {
226     int me,nprocs, *recvcnts, *displs;
227     int size_all;
228 
229     MPI_Comm_size(comm, &nprocs);
230     MPI_Comm_rank(comm, &me);
231 
232     recvcnts = new int[nprocs];
233     displs = new int[nprocs];
234 
235     MPI_Allgather(&size,1,MPI_INT,recvcnts,1,MPI_INT,comm);
236 
237     size_all = 0;
238     displs[0] = 0;
239     for (int iproc = 1; iproc < nprocs; iproc++)
240     {
241         size_all += recvcnts[iproc-1];
242         displs[iproc] = displs[iproc-1] + recvcnts[iproc-1];
243     }
244     size_all += recvcnts[nprocs-1];
245 
246     vector_all = new T[size_all];
247 
248     MPI_Allgatherv(vector,size,mpi_type<T>(),vector_all, recvcnts, displs, mpi_type<T>(), comm);
249 
250     delete []recvcnts;
251     delete []displs;
252 
253     return size_all;
254 }
255 
256 }; // end namespace LAMMPS_NS
257 
258 #endif
259