1 /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
2    Copyright (c) 2011-2021 The plumed team
3    (see the PEOPLE file at the root of the distribution for a list of names)
4 
5    See http://www.plumed.org for more information.
6 
7    This file is part of plumed, version 2.
8 
9    plumed is free software: you can redistribute it and/or modify
10    it under the terms of the GNU Lesser General Public License as published by
11    the Free Software Foundation, either version 3 of the License, or
12    (at your option) any later version.
13 
14    plumed is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU Lesser General Public License for more details.
18 
19    You should have received a copy of the GNU Lesser General Public License
20    along with plumed.  If not, see <http://www.gnu.org/licenses/>.
21 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
22 #include "Atoms.h"
23 #include "ActionAtomistic.h"
24 #include "MDAtoms.h"
25 #include "PlumedMain.h"
26 #include "tools/Pbc.h"
27 #include <algorithm>
28 #include <iostream>
29 #include <string>
30 #include <cmath>
31 
32 using namespace std;
33 
34 namespace PLMD {
35 
36 /// We assume that charges and masses are constant along the simulation
37 /// Set this to false if you want to revert to the original (expensive) behavior
38 static const bool shareMassAndChargeOnlyAtFirstStep=true;
39 
40 class PlumedMain;
41 
Atoms(PlumedMain & plumed)42 Atoms::Atoms(PlumedMain&plumed):
43   natoms(0),
44   md_energy(0.0),
45   energy(0.0),
46   dataCanBeSet(false),
47   collectEnergy(false),
48   energyHasBeenSet(false),
49   positionsHaveBeenSet(0),
50   massesHaveBeenSet(false),
51   chargesHaveBeenSet(false),
52   boxHasBeenSet(false),
53   forcesHaveBeenSet(0),
54   virialHasBeenSet(false),
55   massAndChargeOK(false),
56   shuffledAtoms(0),
57   mdatoms(MDAtomsBase::create(sizeof(double))),
58   plumed(plumed),
59   naturalUnits(false),
60   MDnaturalUnits(false),
61   timestep(0.0),
62   forceOnEnergy(0.0),
63   zeroallforces(false),
64   kbT(0.0),
65   asyncSent(false),
66   atomsNeeded(false),
67   ddStep(0)
68 {
69 }
70 
~Atoms()71 Atoms::~Atoms() {
72   if(actions.size()>0) {
73     std::cerr<<"WARNING: there is some inconsistency in action added to atoms, as some of them were not properly destroyed. This might indicate an internal bug!!\n";
74   }
75 }
76 
startStep()77 void Atoms::startStep() {
78   collectEnergy=false; energyHasBeenSet=false; positionsHaveBeenSet=0;
79   massesHaveBeenSet=false; chargesHaveBeenSet=false; boxHasBeenSet=false;
80   forcesHaveBeenSet=0; virialHasBeenSet=false; dataCanBeSet=true;
81 }
82 
setBox(void * p)83 void Atoms::setBox(void*p) {
84   mdatoms->setBox(p);
85   Tensor b; mdatoms->getBox(b); boxHasBeenSet=true;
86 }
87 
setPositions(void * p)88 void Atoms::setPositions(void*p) {
89   plumed_massert( dataCanBeSet,"setPositions must be called after setStep in MD code interface");
90   plumed_massert( p || gatindex.size()==0, "NULL position pointer with non-zero local atoms");
91   mdatoms->setp(p); positionsHaveBeenSet=3;
92 }
93 
setMasses(void * p)94 void Atoms::setMasses(void*p) {
95   plumed_massert( dataCanBeSet,"setMasses must be called after setStep in MD code interface");
96   plumed_massert( p || gatindex.size()==0, "NULL mass pointer with non-zero local atoms");
97   mdatoms->setm(p); massesHaveBeenSet=true;
98 
99 }
100 
setCharges(void * p)101 void Atoms::setCharges(void*p) {
102   plumed_massert( dataCanBeSet, "setCharges must be called after setStep in MD code interface");
103   plumed_massert( p || gatindex.size()==0, "NULL charges pointer with non-zero local atoms");
104   mdatoms->setc(p); chargesHaveBeenSet=true;
105 }
106 
setVirial(void * p)107 void Atoms::setVirial(void*p) {
108   plumed_massert( dataCanBeSet,"setVirial must be called after setStep in MD code interface");
109   mdatoms->setVirial(p); virialHasBeenSet=true;
110 }
111 
setEnergy(void * p)112 void Atoms::setEnergy(void*p) {
113   plumed_massert( dataCanBeSet,"setEnergy must be called after setStep in MD code interface");
114   MD2double(p,md_energy);
115   md_energy*=MDUnits.getEnergy()/units.getEnergy();
116   energyHasBeenSet=true;
117 }
118 
setForces(void * p)119 void Atoms::setForces(void*p) {
120   plumed_massert( dataCanBeSet,"setForces must be called after setStep in MD code interface");
121   plumed_massert( p || gatindex.size()==0, "NULL force pointer with non-zero local atoms");
122   forcesHaveBeenSet=3;
123   mdatoms->setf(p);
124 }
125 
setPositions(void * p,int i)126 void Atoms::setPositions(void*p,int i) {
127   plumed_massert( dataCanBeSet,"setPositions must be called after setStep in MD code interface");
128   plumed_massert( p || gatindex.size()==0, "NULL positions pointer with non-zero local atoms");
129   mdatoms->setp(p,i); positionsHaveBeenSet++;
130 }
131 
setForces(void * p,int i)132 void Atoms::setForces(void*p,int i) {
133   plumed_massert( dataCanBeSet,"setForces must be called after setStep in MD code interface");
134   plumed_massert( p || gatindex.size()==0, "NULL force pointer with non-zero local atoms");
135   mdatoms->setf(p,i); forcesHaveBeenSet++;
136 }
137 
share()138 void Atoms::share() {
139 // At first step I scatter all the atoms so as to store their mass and charge
140 // Notice that this works with the assumption that charges and masses are
141 // not changing during the simulation!
142   if(!massAndChargeOK && shareMassAndChargeOnlyAtFirstStep) {
143     shareAll();
144     return;
145   }
146 
147   if(!(int(gatindex.size())==natoms && shuffledAtoms==0)) {
148     for(unsigned i=0; i<actions.size(); i++) {
149       if(actions[i]->isActive()) {
150         if(!actions[i]->getUnique().empty()) {
151           atomsNeeded=true;
152           // unique are the local atoms
153           unique.insert(actions[i]->getUniqueLocal().begin(),actions[i]->getUniqueLocal().end());
154         }
155       }
156     }
157   } else {
158     for(unsigned i=0; i<actions.size(); i++) {
159       if(actions[i]->isActive()) {
160         if(!actions[i]->getUnique().empty()) {
161           atomsNeeded=true;
162         }
163       }
164     }
165 
166   }
167 
168   share(unique);
169 }
170 
shareAll()171 void Atoms::shareAll() {
172   unique.clear();
173   // keep in unique only those atoms that are local
174   if(dd && shuffledAtoms>0) {
175     for(int i=0; i<natoms; i++) if(g2l[i]>=0) unique.insert(AtomNumber::index(i));
176   } else {
177     for(int i=0; i<natoms; i++) unique.insert(AtomNumber::index(i));
178   }
179   atomsNeeded=true;
180   share(unique);
181 }
182 
share(const std::set<AtomNumber> & unique)183 void Atoms::share(const std::set<AtomNumber>& unique) {
184   plumed_assert( positionsHaveBeenSet==3 && massesHaveBeenSet );
185 
186   virial.zero();
187   if(zeroallforces || int(gatindex.size())==natoms) {
188     for(int i=0; i<natoms; i++) forces[i].zero();
189   } else {
190     for(const auto & p : unique) forces[p.index()].zero();
191   }
192   for(unsigned i=getNatoms(); i<positions.size(); i++) forces[i].zero(); // virtual atoms
193   forceOnEnergy=0.0;
194   mdatoms->getBox(box);
195 
196   if(!atomsNeeded) return;
197   atomsNeeded=false;
198 
199   if(int(gatindex.size())==natoms && shuffledAtoms==0) {
200 // faster version, which retrieves all atoms
201     mdatoms->getPositions(0,natoms,positions);
202   } else {
203     uniq_index.clear();
204     uniq_index.reserve(unique.size());
205     if(shuffledAtoms>0) {
206       for(const auto & p : unique) uniq_index.push_back(g2l[p.index()]);
207     }
208     mdatoms->getPositions(unique,uniq_index,positions);
209   }
210 
211 
212 // how many double per atom should be scattered:
213   int ndata=3;
214   if(!massAndChargeOK) {
215     ndata=5;
216     masses.assign(masses.size(),std::numeric_limits<double>::quiet_NaN());
217     charges.assign(charges.size(),std::numeric_limits<double>::quiet_NaN());
218     mdatoms->getCharges(gatindex,charges);
219     mdatoms->getMasses(gatindex,masses);
220   }
221 
222   if(dd && shuffledAtoms>0) {
223     if(dd.async) {
224       for(unsigned i=0; i<dd.mpi_request_positions.size(); i++) dd.mpi_request_positions[i].wait();
225       for(unsigned i=0; i<dd.mpi_request_index.size(); i++)     dd.mpi_request_index[i].wait();
226     }
227     int count=0;
228     for(const auto & p : unique) {
229       dd.indexToBeSent[count]=p.index();
230       dd.positionsToBeSent[ndata*count+0]=positions[p.index()][0];
231       dd.positionsToBeSent[ndata*count+1]=positions[p.index()][1];
232       dd.positionsToBeSent[ndata*count+2]=positions[p.index()][2];
233       if(!massAndChargeOK) {
234         dd.positionsToBeSent[ndata*count+3]=masses[p.index()];
235         dd.positionsToBeSent[ndata*count+4]=charges[p.index()];
236       }
237       count++;
238     }
239     if(dd.async) {
240       asyncSent=true;
241       dd.mpi_request_positions.resize(dd.Get_size());
242       dd.mpi_request_index.resize(dd.Get_size());
243       for(int i=0; i<dd.Get_size(); i++) {
244         dd.mpi_request_index[i]=dd.Isend(&dd.indexToBeSent[0],count,i,666);
245         dd.mpi_request_positions[i]=dd.Isend(&dd.positionsToBeSent[0],ndata*count,i,667);
246       }
247     } else {
248       const int n=(dd.Get_size());
249       vector<int> counts(n);
250       vector<int> displ(n);
251       vector<int> counts5(n);
252       vector<int> displ5(n);
253       dd.Allgather(count,counts);
254       displ[0]=0;
255       for(int i=1; i<n; ++i) displ[i]=displ[i-1]+counts[i-1];
256       for(int i=0; i<n; ++i) counts5[i]=counts[i]*ndata;
257       for(int i=0; i<n; ++i) displ5[i]=displ[i]*ndata;
258       dd.Allgatherv(&dd.indexToBeSent[0],count,&dd.indexToBeReceived[0],&counts[0],&displ[0]);
259       dd.Allgatherv(&dd.positionsToBeSent[0],ndata*count,&dd.positionsToBeReceived[0],&counts5[0],&displ5[0]);
260       int tot=displ[n-1]+counts[n-1];
261       for(int i=0; i<tot; i++) {
262         positions[dd.indexToBeReceived[i]][0]=dd.positionsToBeReceived[ndata*i+0];
263         positions[dd.indexToBeReceived[i]][1]=dd.positionsToBeReceived[ndata*i+1];
264         positions[dd.indexToBeReceived[i]][2]=dd.positionsToBeReceived[ndata*i+2];
265         if(!massAndChargeOK) {
266           masses[dd.indexToBeReceived[i]]      =dd.positionsToBeReceived[ndata*i+3];
267           charges[dd.indexToBeReceived[i]]     =dd.positionsToBeReceived[ndata*i+4];
268         }
269       }
270     }
271   }
272 }
273 
wait()274 void Atoms::wait() {
275   dataCanBeSet=false; // Everything should be set by this stage
276 // How many double per atom should be scattered
277   int ndata=3;
278   if(!massAndChargeOK)ndata=5;
279 
280   if(dd) {
281     dd.Bcast(box,0);
282   }
283   pbc.setBox(box);
284 
285   if(collectEnergy) energy=md_energy;
286 
287   if(dd && shuffledAtoms>0) {
288 // receive toBeReceived
289     if(asyncSent) {
290       Communicator::Status status;
291       int count=0;
292       for(int i=0; i<dd.Get_size(); i++) {
293         dd.Recv(&dd.indexToBeReceived[count],dd.indexToBeReceived.size()-count,i,666,status);
294         int c=status.Get_count<int>();
295         dd.Recv(&dd.positionsToBeReceived[ndata*count],dd.positionsToBeReceived.size()-ndata*count,i,667);
296         count+=c;
297       }
298       for(int i=0; i<count; i++) {
299         positions[dd.indexToBeReceived[i]][0]=dd.positionsToBeReceived[ndata*i+0];
300         positions[dd.indexToBeReceived[i]][1]=dd.positionsToBeReceived[ndata*i+1];
301         positions[dd.indexToBeReceived[i]][2]=dd.positionsToBeReceived[ndata*i+2];
302         if(!massAndChargeOK) {
303           masses[dd.indexToBeReceived[i]]      =dd.positionsToBeReceived[ndata*i+3];
304           charges[dd.indexToBeReceived[i]]     =dd.positionsToBeReceived[ndata*i+4];
305         }
306       }
307       asyncSent=false;
308     }
309     if(collectEnergy) dd.Sum(energy);
310   }
311 // I take note that masses and charges have been set once for all
312 // at the beginning of the simulation.
313   if(shareMassAndChargeOnlyAtFirstStep) massAndChargeOK=true;
314 }
315 
updateForces()316 void Atoms::updateForces() {
317   plumed_assert( forcesHaveBeenSet==3 );
318   if(forceOnEnergy*forceOnEnergy>epsilon) {
319     double alpha=1.0-forceOnEnergy;
320     mdatoms->rescaleForces(gatindex,alpha);
321     mdatoms->updateForces(gatindex,forces);
322   } else {
323     if(int(gatindex.size())==natoms && shuffledAtoms==0) mdatoms->updateForces(gatindex,forces);
324     else mdatoms->updateForces(unique,uniq_index,forces);
325   }
326   if( !plumed.novirial && dd.Get_rank()==0 ) {
327     plumed_assert( virialHasBeenSet );
328     mdatoms->updateVirial(virial);
329   }
330 }
331 
setNatoms(int n)332 void Atoms::setNatoms(int n) {
333   natoms=n;
334   positions.resize(n);
335   forces.resize(n);
336   masses.resize(n);
337   charges.resize(n);
338   gatindex.resize(n);
339   for(unsigned i=0; i<gatindex.size(); i++) gatindex[i]=i;
340 }
341 
342 
add(ActionAtomistic * a)343 void Atoms::add(ActionAtomistic*a) {
344   actions.push_back(a);
345 }
346 
remove(ActionAtomistic * a)347 void Atoms::remove(ActionAtomistic*a) {
348   auto f=find(actions.begin(),actions.end(),a);
349   plumed_massert(f!=actions.end(),"cannot remove an action registered to atoms");
350   actions.erase(f);
351 }
352 
353 
enable(Communicator & c)354 void Atoms::DomainDecomposition::enable(Communicator& c) {
355   on=true;
356   Set_comm(c.Get_comm());
357   async=Get_size()<10;
358   if(std::getenv("PLUMED_ASYNC_SHARE")) {
359     std::string s(std::getenv("PLUMED_ASYNC_SHARE"));
360     if(s=="yes") async=true;
361     else if(s=="no") async=false;
362     else plumed_merror("PLUMED_ASYNC_SHARE variable is set to " + s + "; should be yes or no");
363   }
364 }
365 
setAtomsNlocal(int n)366 void Atoms::setAtomsNlocal(int n) {
367   gatindex.resize(n);
368   g2l.resize(natoms,-1);
369   if(dd) {
370 // Since these vectors are sent with MPI by using e.g.
371 // &dd.positionsToBeSent[0]
372 // we make sure they are non-zero-sized so as to
373 // avoid errors when doing boundary check
374     if(n==0) n++;
375     dd.positionsToBeSent.resize(n*5,0.0);
376     dd.positionsToBeReceived.resize(natoms*5,0.0);
377     dd.indexToBeSent.resize(n,0);
378     dd.indexToBeReceived.resize(natoms,0);
379   }
380 }
381 
setAtomsGatindex(int * g,bool fortran)382 void Atoms::setAtomsGatindex(int*g,bool fortran) {
383   plumed_massert( g || gatindex.size()==0, "NULL gatindex pointer with non-zero local atoms");
384   ddStep=plumed.getStep();
385   if(fortran) {
386     for(unsigned i=0; i<gatindex.size(); i++) gatindex[i]=g[i]-1;
387   } else {
388     for(unsigned i=0; i<gatindex.size(); i++) gatindex[i]=g[i];
389   }
390   for(unsigned i=0; i<g2l.size(); i++) g2l[i]=-1;
391   if( gatindex.size()==natoms ) {
392     shuffledAtoms=0;
393     for(unsigned i=0; i<gatindex.size(); i++) {
394       if( gatindex[i]!=i ) { shuffledAtoms=1; break; }
395     }
396   } else {
397     shuffledAtoms=1;
398   }
399   if(dd) {
400     dd.Sum(shuffledAtoms);
401   }
402   for(unsigned i=0; i<gatindex.size(); i++) g2l[gatindex[i]]=i;
403 
404   for(unsigned i=0; i<actions.size(); i++) {
405     // keep in unique only those atoms that are local
406     actions[i]->updateUniqueLocal();
407   }
408   unique.clear();
409 }
410 
setAtomsContiguous(int start)411 void Atoms::setAtomsContiguous(int start) {
412   ddStep=plumed.getStep();
413   for(unsigned i=0; i<gatindex.size(); i++) gatindex[i]=start+i;
414   for(unsigned i=0; i<g2l.size(); i++) g2l[i]=-1;
415   for(unsigned i=0; i<gatindex.size(); i++) g2l[gatindex[i]]=i;
416   if(gatindex.size()<natoms) shuffledAtoms=1;
417   for(unsigned i=0; i<actions.size(); i++) {
418     // keep in unique only those atoms that are local
419     actions[i]->updateUniqueLocal();
420   }
421   unique.clear();
422 }
423 
setRealPrecision(int p)424 void Atoms::setRealPrecision(int p) {
425   mdatoms=MDAtomsBase::create(p);
426 }
427 
getRealPrecision() const428 int Atoms::getRealPrecision()const {
429   return mdatoms->getRealPrecision();
430 }
431 
MD2double(const void * m,double & d) const432 void Atoms::MD2double(const void*m,double&d)const {
433   plumed_assert(mdatoms); mdatoms->MD2double(m,d);
434 }
double2MD(const double & d,void * m) const435 void Atoms::double2MD(const double&d,void*m)const {
436   plumed_assert(mdatoms); mdatoms->double2MD(d,m);
437 }
438 
updateUnits()439 void Atoms::updateUnits() {
440   mdatoms->setUnits(units,MDUnits);
441 }
442 
setTimeStep(void * p)443 void Atoms::setTimeStep(void*p) {
444   MD2double(p,timestep);
445 // The following is to avoid extra digits in case the MD code uses floats
446 // e.g.: float f=0.002 when converted to double becomes 0.002000000094995
447 // To avoid this, we keep only up to 6 significant digits after first one
448   double magnitude=std::pow(10,std::floor(std::log10(timestep)));
449   timestep=std::floor(timestep/magnitude*1e6)/1e6*magnitude;
450 }
451 
getTimeStep() const452 double Atoms::getTimeStep()const {
453   return timestep/units.getTime()*MDUnits.getTime();
454 }
455 
setKbT(void * p)456 void Atoms::setKbT(void*p) {
457   MD2double(p,kbT);
458 }
459 
getKbT() const460 double Atoms::getKbT()const {
461   return kbT/units.getEnergy()*MDUnits.getEnergy();
462 }
463 
464 
createFullList(int * n)465 void Atoms::createFullList(int*n) {
466   if(!massAndChargeOK && shareMassAndChargeOnlyAtFirstStep) {
467     *n=natoms;
468     fullList.resize(natoms);
469     for(unsigned i=0; i<natoms; i++) fullList[i]=i;
470   } else {
471 // We update here the unique list defined at Atoms::unique.
472 // This is not very clear, and probably should be coded differently.
473 // Hopefully this fix the longstanding issue with NAMD.
474     unique.clear();
475     for(unsigned i=0; i<actions.size(); i++) {
476       if(actions[i]->isActive()) {
477         if(!actions[i]->getUnique().empty()) {
478           atomsNeeded=true;
479           // unique are the local atoms
480           unique.insert(actions[i]->getUnique().begin(),actions[i]->getUnique().end());
481         }
482       }
483     }
484     fullList.resize(0);
485     fullList.reserve(unique.size());
486     for(const auto & p : unique) fullList.push_back(p.index());
487     *n=fullList.size();
488   }
489 }
490 
getFullList(int ** x)491 void Atoms::getFullList(int**x) {
492   if(!fullList.empty()) *x=&fullList[0];
493   else *x=NULL;
494 }
495 
clearFullList()496 void Atoms::clearFullList() {
497   fullList.resize(0);
498 }
499 
init()500 void Atoms::init() {
501 // Default: set domain decomposition to NO-decomposition, waiting for
502 // further instruction
503   if(dd) {
504     setAtomsNlocal(natoms);
505     setAtomsContiguous(0);
506   }
507 }
508 
setDomainDecomposition(Communicator & comm)509 void Atoms::setDomainDecomposition(Communicator& comm) {
510   dd.enable(comm);
511 }
512 
resizeVectors(unsigned n)513 void Atoms::resizeVectors(unsigned n) {
514   positions.resize(n);
515   forces.resize(n);
516   masses.resize(n);
517   charges.resize(n);
518 }
519 
addVirtualAtom(ActionWithVirtualAtom * a)520 AtomNumber Atoms::addVirtualAtom(ActionWithVirtualAtom*a) {
521   unsigned n=positions.size();
522   resizeVectors(n+1);
523   virtualAtomsActions.push_back(a);
524   return AtomNumber::index(n);
525 }
526 
removeVirtualAtom(ActionWithVirtualAtom * a)527 void Atoms::removeVirtualAtom(ActionWithVirtualAtom*a) {
528   unsigned n=positions.size();
529   plumed_massert(a==virtualAtomsActions[virtualAtomsActions.size()-1],"virtual atoms should be destroyed in reverse creation order");
530   resizeVectors(n-1);
531   virtualAtomsActions.pop_back();
532 }
533 
insertGroup(const std::string & name,const std::vector<AtomNumber> & a)534 void Atoms::insertGroup(const std::string&name,const std::vector<AtomNumber>&a) {
535   plumed_massert(groups.count(name)==0,"group named "+name+" already exists");
536   groups[name]=a;
537 }
538 
removeGroup(const std::string & name)539 void Atoms::removeGroup(const std::string&name) {
540   plumed_massert(groups.count(name)==1,"cannot remove group named "+name);
541   groups.erase(name);
542 }
543 
writeBinary(std::ostream & o) const544 void Atoms::writeBinary(std::ostream&o)const {
545   o.write(reinterpret_cast<const char*>(&positions[0][0]),natoms*3*sizeof(double));
546   o.write(reinterpret_cast<const char*>(&box(0,0)),9*sizeof(double));
547   o.write(reinterpret_cast<const char*>(&energy),sizeof(double));
548 }
549 
readBinary(std::istream & i)550 void Atoms::readBinary(std::istream&i) {
551   i.read(reinterpret_cast<char*>(&positions[0][0]),natoms*3*sizeof(double));
552   i.read(reinterpret_cast<char*>(&box(0,0)),9*sizeof(double));
553   i.read(reinterpret_cast<char*>(&energy),sizeof(double));
554   pbc.setBox(box);
555 }
556 
getKBoltzmann() const557 double Atoms::getKBoltzmann()const {
558   if(naturalUnits || MDnaturalUnits) return 1.0;
559   else return kBoltzmann/units.getEnergy();
560 }
561 
getMDKBoltzmann() const562 double Atoms::getMDKBoltzmann()const {
563   if(naturalUnits || MDnaturalUnits) return 1.0;
564   else return kBoltzmann/MDUnits.getEnergy();
565 }
566 
getLocalMasses(std::vector<double> & localMasses)567 void Atoms::getLocalMasses(std::vector<double>& localMasses) {
568   localMasses.resize(gatindex.size());
569   for(unsigned i=0; i<gatindex.size(); i++) localMasses[i] = masses[gatindex[i]];
570 }
571 
getLocalPositions(std::vector<Vector> & localPositions)572 void Atoms::getLocalPositions(std::vector<Vector>& localPositions) {
573   localPositions.resize(gatindex.size());
574   mdatoms->getLocalPositions(localPositions);
575 }
576 
getLocalForces(std::vector<Vector> & localForces)577 void Atoms::getLocalForces(std::vector<Vector>& localForces) {
578   localForces.resize(gatindex.size());
579   for(unsigned i=0; i<gatindex.size(); i++) localForces[i] = forces[gatindex[i]];
580 }
581 
getLocalMDForces(std::vector<Vector> & localForces)582 void Atoms::getLocalMDForces(std::vector<Vector>& localForces) {
583   localForces.resize(gatindex.size());
584   for(unsigned i=0; i<gatindex.size(); i++) {
585     localForces[i] = mdatoms->getMDforces(i);
586   }
587 }
588 
setExtraCV(const std::string & name,void * p)589 void Atoms::setExtraCV(const std::string &name,void*p) {
590   mdatoms->setExtraCV(name,p);
591 }
592 
setExtraCVForce(const std::string & name,void * p)593 void Atoms::setExtraCVForce(const std::string &name,void*p) {
594   mdatoms->setExtraCVForce(name,p);
595 }
596 
getExtraCV(const std::string & name)597 double Atoms::getExtraCV(const std::string &name) {
598   return mdatoms->getExtraCV(name);
599 }
600 
updateExtraCVForce(const std::string & name,double f)601 void Atoms::updateExtraCVForce(const std::string &name,double f) {
602   mdatoms->updateExtraCVForce(name,f);
603 }
604 
605 }
606