1 /*
2 * This file is part of the GROMACS molecular simulation package.
3 *
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
7 * Copyright (c) 2017,2018,2019,2020,2021, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
11 *
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
16 *
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
21 *
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
34 *
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
37 */
38 #include "gmxpre.h"
39
40 #include "mdatoms.h"
41
42 #include <cmath>
43
44 #include <memory>
45
46 #include "gromacs/ewald/pme.h"
47 #include "gromacs/gpu_utils/hostallocator.h"
48 #include "gromacs/math/functions.h"
49 #include "gromacs/mdlib/gmx_omp_nthreads.h"
50 #include "gromacs/mdtypes/inputrec.h"
51 #include "gromacs/mdtypes/md_enums.h"
52 #include "gromacs/mdtypes/mdatom.h"
53 #include "gromacs/topology/mtop_lookup.h"
54 #include "gromacs/topology/mtop_util.h"
55 #include "gromacs/topology/topology.h"
56 #include "gromacs/utility/exceptions.h"
57 #include "gromacs/utility/smalloc.h"
58
59 #define ALMOST_ZERO 1e-30
60
61 namespace gmx
62 {
63
MDAtoms()64 MDAtoms::MDAtoms() : mdatoms_(nullptr) {}
65
~MDAtoms()66 MDAtoms::~MDAtoms()
67 {
68 if (mdatoms_ == nullptr)
69 {
70 return;
71 }
72 sfree(mdatoms_->massA);
73 sfree(mdatoms_->massB);
74 sfree(mdatoms_->massT);
75 gmx::AlignedAllocationPolicy::free(mdatoms_->invmass);
76 sfree(mdatoms_->invMassPerDim);
77 sfree(mdatoms_->typeA);
78 sfree(mdatoms_->typeB);
79 /* mdatoms->chargeA and mdatoms->chargeB point at chargeA_.data()
80 * and chargeB_.data() respectively. They get freed automatically. */
81 sfree(mdatoms_->sqrt_c6A);
82 sfree(mdatoms_->sigmaA);
83 sfree(mdatoms_->sigma3A);
84 sfree(mdatoms_->sqrt_c6B);
85 sfree(mdatoms_->sigmaB);
86 sfree(mdatoms_->sigma3B);
87 sfree(mdatoms_->ptype);
88 sfree(mdatoms_->cTC);
89 sfree(mdatoms_->cENER);
90 sfree(mdatoms_->cACC);
91 sfree(mdatoms_->cFREEZE);
92 sfree(mdatoms_->cVCM);
93 sfree(mdatoms_->cORF);
94 sfree(mdatoms_->bPerturbed);
95 sfree(mdatoms_->cU1);
96 sfree(mdatoms_->cU2);
97 }
98
resizeChargeA(const int newSize)99 void MDAtoms::resizeChargeA(const int newSize)
100 {
101 chargeA_.resizeWithPadding(newSize);
102 mdatoms_->chargeA = chargeA_.data();
103 }
104
resizeChargeB(const int newSize)105 void MDAtoms::resizeChargeB(const int newSize)
106 {
107 chargeB_.resizeWithPadding(newSize);
108 mdatoms_->chargeB = chargeB_.data();
109 }
110
reserveChargeA(const int newCapacity)111 void MDAtoms::reserveChargeA(const int newCapacity)
112 {
113 chargeA_.reserveWithPadding(newCapacity);
114 mdatoms_->chargeA = chargeA_.data();
115 }
116
reserveChargeB(const int newCapacity)117 void MDAtoms::reserveChargeB(const int newCapacity)
118 {
119 chargeB_.reserveWithPadding(newCapacity);
120 mdatoms_->chargeB = chargeB_.data();
121 }
122
makeMDAtoms(FILE * fp,const gmx_mtop_t & mtop,const t_inputrec & ir,const bool rankHasPmeGpuTask)123 std::unique_ptr<MDAtoms> makeMDAtoms(FILE* fp, const gmx_mtop_t& mtop, const t_inputrec& ir, const bool rankHasPmeGpuTask)
124 {
125 auto mdAtoms = std::make_unique<MDAtoms>();
126 // GPU transfers may want to use a suitable pinning mode.
127 if (rankHasPmeGpuTask)
128 {
129 changePinningPolicy(&mdAtoms->chargeA_, pme_get_pinning_policy());
130 changePinningPolicy(&mdAtoms->chargeB_, pme_get_pinning_policy());
131 }
132 t_mdatoms* md;
133 snew(md, 1);
134 mdAtoms->mdatoms_.reset(md);
135
136 md->nenergrp = mtop.groups.groups[SimulationAtomGroupType::EnergyOutput].size();
137 md->bVCMgrps = FALSE;
138 for (int i = 0; i < mtop.natoms; i++)
139 {
140 if (getGroupType(mtop.groups, SimulationAtomGroupType::MassCenterVelocityRemoval, i) > 0)
141 {
142 md->bVCMgrps = TRUE;
143 }
144 }
145
146 /* Determine the total system mass and perturbed atom counts */
147 double totalMassA = 0.0;
148 double totalMassB = 0.0;
149
150 md->haveVsites = FALSE;
151 gmx_mtop_atomloop_block_t aloop = gmx_mtop_atomloop_block_init(&mtop);
152 const t_atom* atom;
153 int nmol;
154 while (gmx_mtop_atomloop_block_next(aloop, &atom, &nmol))
155 {
156 totalMassA += nmol * atom->m;
157 totalMassB += nmol * atom->mB;
158
159 if (atom->ptype == eptVSite)
160 {
161 md->haveVsites = TRUE;
162 }
163
164 if (ir.efep != efepNO && PERTURBED(*atom))
165 {
166 md->nPerturbed++;
167 if (atom->mB != atom->m)
168 {
169 md->nMassPerturbed += nmol;
170 }
171 if (atom->qB != atom->q)
172 {
173 md->nChargePerturbed += nmol;
174 }
175 if (atom->typeB != atom->type)
176 {
177 md->nTypePerturbed += nmol;
178 }
179 }
180 }
181
182 md->tmassA = totalMassA;
183 md->tmassB = totalMassB;
184
185 if (ir.efep != efepNO && fp)
186 {
187 fprintf(fp, "There are %d atoms and %d charges for free energy perturbation\n",
188 md->nPerturbed, md->nChargePerturbed);
189 }
190
191 md->havePartiallyFrozenAtoms = FALSE;
192 for (int g = 0; g < ir.opts.ngfrz; g++)
193 {
194 for (int d = YY; d < DIM; d++)
195 {
196 if (ir.opts.nFreeze[g][d] != ir.opts.nFreeze[g][XX])
197 {
198 md->havePartiallyFrozenAtoms = TRUE;
199 }
200 }
201 }
202
203 md->bOrires = (gmx_mtop_ftype_count(&mtop, F_ORIRES) != 0);
204
205 return mdAtoms;
206 }
207
208 } // namespace gmx
209
atoms2md(const gmx_mtop_t * mtop,const t_inputrec * ir,int nindex,gmx::ArrayRef<int> index,int homenr,gmx::MDAtoms * mdAtoms)210 void atoms2md(const gmx_mtop_t* mtop,
211 const t_inputrec* ir,
212 int nindex,
213 gmx::ArrayRef<int> index,
214 int homenr,
215 gmx::MDAtoms* mdAtoms)
216 {
217 gmx_bool bLJPME;
218 const t_grpopts* opts;
219 int nthreads gmx_unused;
220
221 bLJPME = EVDW_PME(ir->vdwtype);
222
223 opts = &ir->opts;
224
225 const SimulationGroups& groups = mtop->groups;
226
227 auto md = mdAtoms->mdatoms();
228 /* nindex>=0 indicates DD where we use an index */
229 if (nindex >= 0)
230 {
231 md->nr = nindex;
232 }
233 else
234 {
235 md->nr = mtop->natoms;
236 }
237
238 if (md->nr > md->nalloc)
239 {
240 md->nalloc = over_alloc_dd(md->nr);
241
242 if (md->nMassPerturbed)
243 {
244 srenew(md->massA, md->nalloc);
245 srenew(md->massB, md->nalloc);
246 }
247 srenew(md->massT, md->nalloc);
248 /* The SIMD version of the integrator needs this aligned and padded.
249 * The padding needs to be with zeros, which we set later below.
250 */
251 gmx::AlignedAllocationPolicy::free(md->invmass);
252 md->invmass = new (gmx::AlignedAllocationPolicy::malloc(
253 (md->nalloc + GMX_REAL_MAX_SIMD_WIDTH) * sizeof(*md->invmass))) real;
254 srenew(md->invMassPerDim, md->nalloc);
255 // TODO eventually we will have vectors and just resize
256 // everything, but for now the semantics of md->nalloc being
257 // the capacity are preserved by keeping vectors within
258 // mdAtoms having the same properties as the other arrays.
259 mdAtoms->reserveChargeA(md->nalloc);
260 mdAtoms->resizeChargeA(md->nr);
261 if (md->nPerturbed > 0)
262 {
263 mdAtoms->reserveChargeB(md->nalloc);
264 mdAtoms->resizeChargeB(md->nr);
265 }
266 srenew(md->typeA, md->nalloc);
267 if (md->nPerturbed)
268 {
269 srenew(md->typeB, md->nalloc);
270 }
271 if (bLJPME)
272 {
273 srenew(md->sqrt_c6A, md->nalloc);
274 srenew(md->sigmaA, md->nalloc);
275 srenew(md->sigma3A, md->nalloc);
276 if (md->nPerturbed)
277 {
278 srenew(md->sqrt_c6B, md->nalloc);
279 srenew(md->sigmaB, md->nalloc);
280 srenew(md->sigma3B, md->nalloc);
281 }
282 }
283 srenew(md->ptype, md->nalloc);
284 if (opts->ngtc > 1)
285 {
286 srenew(md->cTC, md->nalloc);
287 /* We always copy cTC with domain decomposition */
288 }
289 srenew(md->cENER, md->nalloc);
290 if (opts->ngacc > 1)
291 {
292 srenew(md->cACC, md->nalloc);
293 }
294 if (inputrecFrozenAtoms(ir))
295 {
296 srenew(md->cFREEZE, md->nalloc);
297 }
298 if (md->bVCMgrps)
299 {
300 srenew(md->cVCM, md->nalloc);
301 }
302 if (md->bOrires)
303 {
304 srenew(md->cORF, md->nalloc);
305 }
306 if (md->nPerturbed)
307 {
308 srenew(md->bPerturbed, md->nalloc);
309 }
310
311 /* Note that these user t_mdatoms array pointers are NULL
312 * when there is only one group present.
313 * Therefore, when adding code, the user should use something like:
314 * gprnrU1 = (md->cU1==NULL ? 0 : md->cU1[localatindex])
315 */
316 if (!mtop->groups.groupNumbers[SimulationAtomGroupType::User1].empty())
317 {
318 srenew(md->cU1, md->nalloc);
319 }
320 if (!mtop->groups.groupNumbers[SimulationAtomGroupType::User2].empty())
321 {
322 srenew(md->cU2, md->nalloc);
323 }
324 }
325
326 int molb = 0;
327
328 nthreads = gmx_omp_nthreads_get(emntDefault);
329 #pragma omp parallel for num_threads(nthreads) schedule(static) firstprivate(molb)
330 for (int i = 0; i < md->nr; i++)
331 {
332 try
333 {
334 int g, ag;
335 real mA, mB, fac;
336 real c6, c12;
337
338 if (index.empty())
339 {
340 ag = i;
341 }
342 else
343 {
344 ag = index[i];
345 }
346 const t_atom& atom = mtopGetAtomParameters(mtop, ag, &molb);
347
348 if (md->cFREEZE)
349 {
350 md->cFREEZE[i] = getGroupType(groups, SimulationAtomGroupType::Freeze, ag);
351 }
352 if (EI_ENERGY_MINIMIZATION(ir->eI))
353 {
354 /* Displacement is proportional to F, masses used for constraints */
355 mA = 1.0;
356 mB = 1.0;
357 }
358 else if (ir->eI == eiBD)
359 {
360 /* With BD the physical masses are irrelevant.
361 * To keep the code simple we use most of the normal MD code path
362 * for BD. Thus for constraining the masses should be proportional
363 * to the friction coefficient. We set the absolute value such that
364 * m/2<(dx/dt)^2> = m/2*2kT/fric*dt = kT/2 => m=fric*dt/2
365 * Then if we set the (meaningless) velocity to v=dx/dt, we get the
366 * correct kinetic energy and temperature using the usual code path.
367 * Thus with BD v*dt will give the displacement and the reported
368 * temperature can signal bad integration (too large time step).
369 */
370 if (ir->bd_fric > 0)
371 {
372 mA = 0.5 * ir->bd_fric * ir->delta_t;
373 mB = 0.5 * ir->bd_fric * ir->delta_t;
374 }
375 else
376 {
377 /* The friction coefficient is mass/tau_t */
378 fac = ir->delta_t
379 / opts->tau_t[md->cTC ? groups.groupNumbers[SimulationAtomGroupType::TemperatureCoupling][ag] : 0];
380 mA = 0.5 * atom.m * fac;
381 mB = 0.5 * atom.mB * fac;
382 }
383 }
384 else
385 {
386 mA = atom.m;
387 mB = atom.mB;
388 }
389 if (md->nMassPerturbed)
390 {
391 md->massA[i] = mA;
392 md->massB[i] = mB;
393 }
394 md->massT[i] = mA;
395
396 if (mA == 0.0)
397 {
398 md->invmass[i] = 0;
399 md->invMassPerDim[i][XX] = 0;
400 md->invMassPerDim[i][YY] = 0;
401 md->invMassPerDim[i][ZZ] = 0;
402 }
403 else if (md->cFREEZE)
404 {
405 g = md->cFREEZE[i];
406 GMX_ASSERT(opts->nFreeze != nullptr,
407 "Must have freeze groups to initialize masses");
408 if (opts->nFreeze[g][XX] && opts->nFreeze[g][YY] && opts->nFreeze[g][ZZ])
409 {
410 /* Set the mass of completely frozen particles to ALMOST_ZERO
411 * iso 0 to avoid div by zero in lincs or shake.
412 */
413 md->invmass[i] = ALMOST_ZERO;
414 }
415 else
416 {
417 /* Note: Partially frozen particles use the normal invmass.
418 * If such particles are constrained, the frozen dimensions
419 * should not be updated with the constrained coordinates.
420 */
421 md->invmass[i] = 1.0 / mA;
422 }
423 for (int d = 0; d < DIM; d++)
424 {
425 md->invMassPerDim[i][d] = (opts->nFreeze[g][d] ? 0 : 1.0 / mA);
426 }
427 }
428 else
429 {
430 md->invmass[i] = 1.0 / mA;
431 for (int d = 0; d < DIM; d++)
432 {
433 md->invMassPerDim[i][d] = 1.0 / mA;
434 }
435 }
436
437 md->chargeA[i] = atom.q;
438 md->typeA[i] = atom.type;
439 if (bLJPME)
440 {
441 c6 = mtop->ffparams.iparams[atom.type * (mtop->ffparams.atnr + 1)].lj.c6;
442 c12 = mtop->ffparams.iparams[atom.type * (mtop->ffparams.atnr + 1)].lj.c12;
443 md->sqrt_c6A[i] = sqrt(c6);
444 if (c6 == 0.0 || c12 == 0)
445 {
446 md->sigmaA[i] = 1.0;
447 }
448 else
449 {
450 md->sigmaA[i] = gmx::sixthroot(c12 / c6);
451 }
452 md->sigma3A[i] = 1 / (md->sigmaA[i] * md->sigmaA[i] * md->sigmaA[i]);
453 }
454 if (md->nPerturbed)
455 {
456 md->bPerturbed[i] = PERTURBED(atom);
457 md->chargeB[i] = atom.qB;
458 md->typeB[i] = atom.typeB;
459 if (bLJPME)
460 {
461 c6 = mtop->ffparams.iparams[atom.typeB * (mtop->ffparams.atnr + 1)].lj.c6;
462 c12 = mtop->ffparams.iparams[atom.typeB * (mtop->ffparams.atnr + 1)].lj.c12;
463 md->sqrt_c6B[i] = sqrt(c6);
464 if (c6 == 0.0 || c12 == 0)
465 {
466 md->sigmaB[i] = 1.0;
467 }
468 else
469 {
470 md->sigmaB[i] = gmx::sixthroot(c12 / c6);
471 }
472 md->sigma3B[i] = 1 / (md->sigmaB[i] * md->sigmaB[i] * md->sigmaB[i]);
473 }
474 }
475 md->ptype[i] = atom.ptype;
476 if (md->cTC)
477 {
478 md->cTC[i] = groups.groupNumbers[SimulationAtomGroupType::TemperatureCoupling][ag];
479 }
480 md->cENER[i] = getGroupType(groups, SimulationAtomGroupType::EnergyOutput, ag);
481 if (md->cACC)
482 {
483 md->cACC[i] = groups.groupNumbers[SimulationAtomGroupType::Acceleration][ag];
484 }
485 if (md->cVCM)
486 {
487 md->cVCM[i] = groups.groupNumbers[SimulationAtomGroupType::MassCenterVelocityRemoval][ag];
488 }
489 if (md->cORF)
490 {
491 md->cORF[i] = getGroupType(groups, SimulationAtomGroupType::OrientationRestraintsFit, ag);
492 }
493
494 if (md->cU1)
495 {
496 md->cU1[i] = groups.groupNumbers[SimulationAtomGroupType::User1][ag];
497 }
498 if (md->cU2)
499 {
500 md->cU2[i] = groups.groupNumbers[SimulationAtomGroupType::User2][ag];
501 }
502 }
503 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
504 }
505
506 if (md->nr > 0)
507 {
508 /* Pad invmass with 0 so a SIMD MD update does not change v and x */
509 for (int i = md->nr; i < md->nr + GMX_REAL_MAX_SIMD_WIDTH; i++)
510 {
511 md->invmass[i] = 0;
512 }
513 }
514
515 md->homenr = homenr;
516 /* We set mass, invmass, invMassPerDim and tmass for lambda=0.
517 * For free-energy runs, these should be updated using update_mdatoms().
518 */
519 md->tmass = md->tmassA;
520 md->lambda = 0;
521 }
522
update_mdatoms(t_mdatoms * md,real lambda)523 void update_mdatoms(t_mdatoms* md, real lambda)
524 {
525 if (md->nMassPerturbed && lambda != md->lambda)
526 {
527 real L1 = 1 - lambda;
528
529 /* Update masses of perturbed atoms for the change in lambda */
530 int gmx_unused nthreads = gmx_omp_nthreads_get(emntDefault);
531 #pragma omp parallel for num_threads(nthreads) schedule(static)
532 for (int i = 0; i < md->nr; i++)
533 {
534 if (md->bPerturbed[i])
535 {
536 md->massT[i] = L1 * md->massA[i] + lambda * md->massB[i];
537 /* Atoms with invmass 0 or ALMOST_ZERO are massless or frozen
538 * and their invmass does not depend on lambda.
539 */
540 if (md->invmass[i] > 1.1 * ALMOST_ZERO)
541 {
542 md->invmass[i] = 1.0 / md->massT[i];
543 for (int d = 0; d < DIM; d++)
544 {
545 if (md->invMassPerDim[i][d] > 1.1 * ALMOST_ZERO)
546 {
547 md->invMassPerDim[i][d] = md->invmass[i];
548 }
549 }
550 }
551 }
552 }
553
554 /* Update the system mass for the change in lambda */
555 md->tmass = L1 * md->tmassA + lambda * md->tmassB;
556 }
557
558 md->lambda = lambda;
559 }
560