1 /* ----------------------------------------------------------------------
2     This is the
3 
4     ██╗     ██╗ ██████╗  ██████╗  ██████╗ ██╗  ██╗████████╗███████╗
5     ██║     ██║██╔════╝ ██╔════╝ ██╔════╝ ██║  ██║╚══██╔══╝██╔════╝
6     ██║     ██║██║  ███╗██║  ███╗██║  ███╗███████║   ██║   ███████╗
7     ██║     ██║██║   ██║██║   ██║██║   ██║██╔══██║   ██║   ╚════██║
8     ███████╗██║╚██████╔╝╚██████╔╝╚██████╔╝██║  ██║   ██║   ███████║
9     ╚══════╝╚═╝ ╚═════╝  ╚═════╝  ╚═════╝ ╚═╝  ╚═╝   ╚═╝   ╚══════╝®
10 
11     DEM simulation engine, released by
12     DCS Computing Gmbh, Linz, Austria
13     http://www.dcs-computing.com, office@dcs-computing.com
14 
15     LIGGGHTS® is part of CFDEM®project:
16     http://www.liggghts.com | http://www.cfdem.com
17 
18     Core developer and main author:
19     Christoph Kloss, christoph.kloss@dcs-computing.com
20 
21     LIGGGHTS® is open-source, distributed under the terms of the GNU Public
22     License, version 2 or later. It is distributed in the hope that it will
23     be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
24     of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. You should have
25     received a copy of the GNU General Public License along with LIGGGHTS®.
26     If not, see http://www.gnu.org/licenses . See also top-level README
27     and LICENSE files.
28 
29     LIGGGHTS® and CFDEM® are registered trade marks of DCS Computing GmbH,
30     the producer of the LIGGGHTS® software and the CFDEM®coupling software
31     See http://www.cfdem.com/terms-trademark-policy for details.
32 
33 -------------------------------------------------------------------------
34     Contributing author and copyright for this file:
35     (if not contributing author is listed, this file has been contributed
36     by the core developer)
37 
38     Copyright 2012-     DCS Computing GmbH, Linz
39     Copyright 2009-2012 JKU Linz
40 ------------------------------------------------------------------------- */
41 
42 #define DELTA 10000
43 
44 #include "multisphere_parallel.h"
45 #include "atom.h"
46 #include "atom_vec.h"
47 #include "vector_liggghts.h"
48 #include "domain.h"
49 #include "memory.h"
50 
51 #define BUFFACTOR 1.5
52 #define BUFMIN 1000
53 #define BUFEXTRA 1000
54 
55 /* ----------------------------------------------------------------------
56    constructor / destructor
57 ------------------------------------------------------------------------- */
58 
MultisphereParallel(LAMMPS * lmp)59 MultisphereParallel::MultisphereParallel(LAMMPS *lmp) :
60   Multisphere(lmp),
61 
62   // initialize comm buffers & exchange memory
63   maxsend_(BUFMIN),
64   maxrecv_(BUFMIN),
65   buf_send_((double *) memory->smalloc((maxsend_+BUFEXTRA)*sizeof(double),"frm:buf_send_")),
66   buf_recv_((double *) memory->smalloc((maxsend_+BUFEXTRA)*sizeof(double),"frm:buf_send_"))
67 {
68 
69 }
70 
~MultisphereParallel()71 MultisphereParallel::~MultisphereParallel()
72 {
73     memory->sfree(buf_send_);
74     memory->sfree(buf_recv_);
75 }
76 
77 /* ----------------------------------------------------------------------
78    realloc the size of the send buffer as needed with BUFFACTOR & BUFEXTRA
79    if flag = 1, realloc
80    if flag = 0, don't need to realloc with copy, just free/malloc
81 ------------------------------------------------------------------------- */
82 
grow_send(int n,int flag)83 void MultisphereParallel::grow_send(int n, int flag)
84 {
85   maxsend_ = static_cast<int> (BUFFACTOR * n);
86   if (flag)
87     buf_send_ = (double *) memory->srealloc(buf_send_,(maxsend_+BUFEXTRA)*sizeof(double),"comm:buf_send_");
88   else {
89     memory->sfree(buf_send_);
90     buf_send_ = (double *) memory->smalloc((maxsend_+BUFEXTRA)*sizeof(double),"comm:buf_send_");
91   }
92 }
93 
94 /* ----------------------------------------------------------------------
95    free/malloc the size of the recv buffer as needed with BUFFACTOR
96 ------------------------------------------------------------------------- */
97 
grow_recv(int n)98 void MultisphereParallel::grow_recv(int n)
99 {
100   maxrecv_ = static_cast<int> (BUFFACTOR * n);
101   memory->sfree(buf_recv_);
102   buf_recv_ = (double *) memory->smalloc(maxrecv_*sizeof(double),        "comm:buf_recv_");
103 }
104 
105 /* ----------------------------------------------------------------------
106    exchange bodies with neighbor procs
107 ------------------------------------------------------------------------- */
108 
exchange()109 void MultisphereParallel::exchange()
110 {
111   int i,m,nsend,nrecv,nrecv1,nrecv2;
112   double lo,hi,value;
113   double x[3];
114   double *sublo,*subhi,*buf;
115   MPI_Request request;
116   MPI_Status status;
117 
118   // subbox bounds for orthogonal
119   // triclinic not implemented
120 
121   sublo = domain->sublo;
122   subhi = domain->subhi;
123 
124   // loop over dimensions
125 
126   for (int dim = 0; dim < 3; dim++) {
127 
128     // fill buffer with atoms leaving my box, using < and >=
129     // when atom is deleted, fill it in with last atom
130 
131     lo = sublo[dim];
132     hi = subhi[dim];
133     i = nsend = 0;
134 
135     while (i < nbody_) {
136 
137           MathExtraLiggghts::local_coosys_to_cartesian(x,xcm_to_xbound_(i),ex_space_(i),ey_space_(i),ez_space_(i));
138           vectorAdd3D(xcm_(i),x,x);
139 
140           if (x[dim] < lo || x[dim] >= hi)
141           {
142             if (nsend > maxsend_)
143                 grow_send(nsend,1);
144             nsend += pack_exchange_rigid(i,&buf_send_[nsend]);
145 
146             remove_body(i);
147 
148           }
149           else i++;
150     }
151 
152     // send/recv atoms in both directions
153     // if 1 proc in dimension, no send/recv, set recv buf to send buf
154     // if 2 procs in dimension, single send/recv
155     // if more than 2 procs in dimension, send/recv to both neighbors
156 
157     int procneigh[3][2];
158     for(int i = 0; i < 3; i++)
159         for(int j = 0; j < 2; j++)
160             procneigh[i][j] = comm->procneigh[i][j];
161 
162     int *procgrid = comm->procgrid;
163 
164     if (procgrid[dim] == 1) {
165       nrecv = nsend;
166       buf = buf_send_;
167 
168     }
169     else
170     {
171           MPI_Sendrecv(&nsend,1,MPI_INT,procneigh[dim][0],0,&nrecv1,1,MPI_INT,procneigh[dim][1],0,world,&status);
172           nrecv = nrecv1;
173           if (procgrid[dim] > 2) {
174              MPI_Sendrecv(&nsend,1,MPI_INT,procneigh[dim][1],0,&nrecv2,1,MPI_INT,procneigh[dim][0],0,world,&status);
175              nrecv += nrecv2;
176           }
177 
178           if (nrecv > maxrecv_) grow_recv(nrecv);
179 
180           MPI_Irecv(buf_recv_,nrecv1,MPI_DOUBLE,procneigh[dim][1],0,world,&request);
181           MPI_Send(buf_send_,nsend,MPI_DOUBLE,procneigh[dim][0],0,world);
182           MPI_Wait(&request,&status);
183 
184           if (procgrid[dim] > 2) {
185             MPI_Irecv(&buf_recv_[nrecv1],nrecv2,MPI_DOUBLE,procneigh[dim][0],0,world,&request);
186             MPI_Send(buf_send_,nsend,MPI_DOUBLE,procneigh[dim][1],0,world);
187             MPI_Wait(&request,&status);
188           }
189 
190           buf = buf_recv_;
191     }
192 
193     // check incoming atoms to see if they are in my box
194     // if so, add to my list
195 
196     m = 0;
197 
198     while (m < nrecv) {
199       value = buf[m+dim+1];
200 
201       if (value >= lo && value < hi) m += unpack_exchange_rigid(&buf[m]);
202       else m += static_cast<int> (buf[m]);
203     }
204   }
205 
206 }
207 
208 /* ----------------------------------------------------------------------
209    restart functionality - write all required data into restart buffer
210    executed on all processes, but only proc 0 writes into writebuf
211 ------------------------------------------------------------------------- */
212 
writeRestart(FILE * fp)213 void MultisphereParallel::writeRestart(FILE *fp)
214 {
215     double *sendbuf = 0, *recvbuf = 0;
216     double xbnd[3];
217     bool dummy = false;
218     double nba = static_cast<double>(n_body_all());
219 
220     int sizeLocal = n_body() * (customValues_.elemBufSize(OPERATION_RESTART, NULL, dummy,dummy,dummy) + 4);
221     int sizeGlobal = 0, sizeOne = 0;
222 
223     // allocate send buffer and pack element data
224     // all local elements are in list
225 
226     memory->create(sendbuf,sizeLocal,"MultiNodeMeshParallel::writeRestart:sendbuf");
227     sizeLocal = 0;
228     for(int i = 0; i < n_body(); i++)
229     {
230         x_bound(xbnd,i);
231         sizeOne = customValues_.pushElemToBuffer(i,&(sendbuf[sizeLocal+4]),OPERATION_RESTART,dummy,dummy,dummy);
232         sendbuf[sizeLocal] = static_cast<double>(sizeOne+4);
233         sendbuf[sizeLocal+1] = xbnd[0];
234         sendbuf[sizeLocal+2] = xbnd[1];
235         sendbuf[sizeLocal+3] = xbnd[2];
236 
237         sizeLocal += (sizeOne+4);
238     }
239 
240     // gather the per-element data
241 
242     sizeGlobal = MPI_Gather0_Vector(sendbuf,sizeLocal,recvbuf,world);
243 
244     // write data to file
245     if(comm->me == 0)
246     {
247 
248         // size with 1 extra value (nba)
249         int size = (1+sizeGlobal) * sizeof(double);
250 
251         // write size
252         fwrite(&size,sizeof(int),1,fp);
253 
254         // write extra value
255         fwrite(&nba,sizeof(double),1,fp);
256 
257         // write per-element data
258         fwrite(recvbuf,sizeof(double),sizeGlobal,fp);
259     }
260 
261     // clean up
262 
263     memory->destroy(sendbuf);
264 
265     if(recvbuf)
266       delete []recvbuf;
267 }
268 
269 /* ----------------------------------------------------------------------
270    restart functionality - read all required data from restart buffer
271    executed on all processes
272 ------------------------------------------------------------------------- */
273 
restart(double * list)274 void MultisphereParallel::restart(double *list)
275 {
276     bool dummy = false;
277     int m = 0, nrecv_this;
278 
279     int nbody_all_old = static_cast<int> (list[m++]);
280 
281     nbody_ = nbody_all_ = 0;
282 
283     for(int i = 0; i < nbody_all_old; i++)
284     {
285         nrecv_this = static_cast<int>(list[m]);
286 
287         double *x_bnd = &(list[m+1]);
288 
289         if(domain->is_in_subdomain(x_bnd))
290         {
291 
292             customValues_.addZeroElement();
293             customValues_.deleteRestartElement(nbody_,dummy,dummy,dummy);
294             customValues_.popElemFromBuffer(&(list[m+4]),OPERATION_RESTART,dummy,dummy,dummy);
295 
296             nbody_++;
297         }
298         m += nrecv_this;
299     }
300 
301     // do initialization tasks
302 
303     MPI_Sum_Scalar(nbody_,nbody_all_,world);
304     generate_map();
305     reset_forces(true);
306 
307 }
308