1 /*
2 * This file is part of the GROMACS molecular simulation package.
3 *
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017 The GROMACS development team.
7 * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
11 *
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
16 *
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
21 *
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
34 *
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
37 */
38 /*! \internal \file
39 *
40 * \brief This file defines integrators for energy minimization
41 *
42 * \author Berk Hess <hess@kth.se>
43 * \author Erik Lindahl <erik@kth.se>
44 * \ingroup module_mdrun
45 */
46 #include "gmxpre.h"
47
48 #include "config.h"
49
50 #include <cmath>
51 #include <cstring>
52 #include <ctime>
53
54 #include <algorithm>
55 #include <limits>
56 #include <vector>
57
58 #include "gromacs/commandline/filenm.h"
59 #include "gromacs/domdec/collect.h"
60 #include "gromacs/domdec/dlbtiming.h"
61 #include "gromacs/domdec/domdec.h"
62 #include "gromacs/domdec/domdec_struct.h"
63 #include "gromacs/domdec/mdsetup.h"
64 #include "gromacs/domdec/partition.h"
65 #include "gromacs/ewald/pme_pp.h"
66 #include "gromacs/fileio/confio.h"
67 #include "gromacs/fileio/mtxio.h"
68 #include "gromacs/gmxlib/network.h"
69 #include "gromacs/gmxlib/nrnb.h"
70 #include "gromacs/imd/imd.h"
71 #include "gromacs/linearalgebra/sparsematrix.h"
72 #include "gromacs/listed_forces/listed_forces.h"
73 #include "gromacs/math/functions.h"
74 #include "gromacs/math/vec.h"
75 #include "gromacs/mdlib/constr.h"
76 #include "gromacs/mdlib/coupling.h"
77 #include "gromacs/mdlib/dispersioncorrection.h"
78 #include "gromacs/mdlib/ebin.h"
79 #include "gromacs/mdlib/enerdata_utils.h"
80 #include "gromacs/mdlib/energyoutput.h"
81 #include "gromacs/mdlib/force.h"
82 #include "gromacs/mdlib/force_flags.h"
83 #include "gromacs/mdlib/forcerec.h"
84 #include "gromacs/mdlib/gmx_omp_nthreads.h"
85 #include "gromacs/mdlib/md_support.h"
86 #include "gromacs/mdlib/mdatoms.h"
87 #include "gromacs/mdlib/stat.h"
88 #include "gromacs/mdlib/tgroup.h"
89 #include "gromacs/mdlib/trajectory_writing.h"
90 #include "gromacs/mdlib/update.h"
91 #include "gromacs/mdlib/vsite.h"
92 #include "gromacs/mdrunutility/handlerestart.h"
93 #include "gromacs/mdrunutility/printtime.h"
94 #include "gromacs/mdtypes/checkpointdata.h"
95 #include "gromacs/mdtypes/commrec.h"
96 #include "gromacs/mdtypes/forcebuffers.h"
97 #include "gromacs/mdtypes/forcerec.h"
98 #include "gromacs/mdtypes/inputrec.h"
99 #include "gromacs/mdtypes/interaction_const.h"
100 #include "gromacs/mdtypes/md_enums.h"
101 #include "gromacs/mdtypes/mdatom.h"
102 #include "gromacs/mdtypes/mdrunoptions.h"
103 #include "gromacs/mdtypes/state.h"
104 #include "gromacs/pbcutil/pbc.h"
105 #include "gromacs/timing/wallcycle.h"
106 #include "gromacs/timing/walltime_accounting.h"
107 #include "gromacs/topology/mtop_util.h"
108 #include "gromacs/topology/topology.h"
109 #include "gromacs/utility/cstringutil.h"
110 #include "gromacs/utility/exceptions.h"
111 #include "gromacs/utility/fatalerror.h"
112 #include "gromacs/utility/logger.h"
113 #include "gromacs/utility/smalloc.h"
114
115 #include "legacysimulator.h"
116 #include "shellfc.h"
117
118 using gmx::ArrayRef;
119 using gmx::MdrunScheduleWorkload;
120 using gmx::RVec;
121 using gmx::VirtualSitesHandler;
122
123 //! Utility structure for manipulating states during EM
124 typedef struct em_state
125 {
126 //! Copy of the global state
127 t_state s;
128 //! Force array
129 gmx::ForceBuffers f;
130 //! Potential energy
131 real epot;
132 //! Norm of the force
133 real fnorm;
134 //! Maximum force
135 real fmax;
136 //! Direction
137 int a_fmax;
138 } em_state_t;
139
140 //! Print the EM starting conditions
print_em_start(FILE * fplog,const t_commrec * cr,gmx_walltime_accounting_t walltime_accounting,gmx_wallcycle_t wcycle,const char * name)141 static void print_em_start(FILE* fplog,
142 const t_commrec* cr,
143 gmx_walltime_accounting_t walltime_accounting,
144 gmx_wallcycle_t wcycle,
145 const char* name)
146 {
147 walltime_accounting_start_time(walltime_accounting);
148 wallcycle_start(wcycle, ewcRUN);
149 print_start(fplog, cr, walltime_accounting, name);
150 }
151
152 //! Stop counting time for EM
em_time_end(gmx_walltime_accounting_t walltime_accounting,gmx_wallcycle_t wcycle)153 static void em_time_end(gmx_walltime_accounting_t walltime_accounting, gmx_wallcycle_t wcycle)
154 {
155 wallcycle_stop(wcycle, ewcRUN);
156
157 walltime_accounting_end_time(walltime_accounting);
158 }
159
160 //! Printing a log file and console header
sp_header(FILE * out,const char * minimizer,real ftol,int nsteps)161 static void sp_header(FILE* out, const char* minimizer, real ftol, int nsteps)
162 {
163 fprintf(out, "\n");
164 fprintf(out, "%s:\n", minimizer);
165 fprintf(out, " Tolerance (Fmax) = %12.5e\n", ftol);
166 fprintf(out, " Number of steps = %12d\n", nsteps);
167 }
168
169 //! Print warning message
warn_step(FILE * fp,real ftol,real fmax,gmx_bool bLastStep,gmx_bool bConstrain)170 static void warn_step(FILE* fp, real ftol, real fmax, gmx_bool bLastStep, gmx_bool bConstrain)
171 {
172 constexpr bool realIsDouble = GMX_DOUBLE;
173 char buffer[2048];
174
175 if (!std::isfinite(fmax))
176 {
177 sprintf(buffer,
178 "\nEnergy minimization has stopped because the force "
179 "on at least one atom is not finite. This usually means "
180 "atoms are overlapping. Modify the input coordinates to "
181 "remove atom overlap or use soft-core potentials with "
182 "the free energy code to avoid infinite forces.\n%s",
183 !realIsDouble ? "You could also be lucky that switching to double precision "
184 "is sufficient to obtain finite forces.\n"
185 : "");
186 }
187 else if (bLastStep)
188 {
189 sprintf(buffer,
190 "\nEnergy minimization reached the maximum number "
191 "of steps before the forces reached the requested "
192 "precision Fmax < %g.\n",
193 ftol);
194 }
195 else
196 {
197 sprintf(buffer,
198 "\nEnergy minimization has stopped, but the forces have "
199 "not converged to the requested precision Fmax < %g (which "
200 "may not be possible for your system). It stopped "
201 "because the algorithm tried to make a new step whose size "
202 "was too small, or there was no change in the energy since "
203 "last step. Either way, we regard the minimization as "
204 "converged to within the available machine precision, "
205 "given your starting configuration and EM parameters.\n%s%s",
206 ftol,
207 !realIsDouble ? "\nDouble precision normally gives you higher accuracy, but "
208 "this is often not needed for preparing to run molecular "
209 "dynamics.\n"
210 : "",
211 bConstrain ? "You might need to increase your constraint accuracy, or turn\n"
212 "off constraints altogether (set constraints = none in mdp file)\n"
213 : "");
214 }
215
216 fputs(wrap_lines(buffer, 78, 0, FALSE), stderr);
217 fputs(wrap_lines(buffer, 78, 0, FALSE), fp);
218 }
219
220 //! Print message about convergence of the EM
print_converged(FILE * fp,const char * alg,real ftol,int64_t count,gmx_bool bDone,int64_t nsteps,const em_state_t * ems,double sqrtNumAtoms)221 static void print_converged(FILE* fp,
222 const char* alg,
223 real ftol,
224 int64_t count,
225 gmx_bool bDone,
226 int64_t nsteps,
227 const em_state_t* ems,
228 double sqrtNumAtoms)
229 {
230 char buf[STEPSTRSIZE];
231
232 if (bDone)
233 {
234 fprintf(fp, "\n%s converged to Fmax < %g in %s steps\n", alg, ftol, gmx_step_str(count, buf));
235 }
236 else if (count < nsteps)
237 {
238 fprintf(fp,
239 "\n%s converged to machine precision in %s steps,\n"
240 "but did not reach the requested Fmax < %g.\n",
241 alg, gmx_step_str(count, buf), ftol);
242 }
243 else
244 {
245 fprintf(fp, "\n%s did not converge to Fmax < %g in %s steps.\n", alg, ftol,
246 gmx_step_str(count, buf));
247 }
248
249 #if GMX_DOUBLE
250 fprintf(fp, "Potential Energy = %21.14e\n", ems->epot);
251 fprintf(fp, "Maximum force = %21.14e on atom %d\n", ems->fmax, ems->a_fmax + 1);
252 fprintf(fp, "Norm of force = %21.14e\n", ems->fnorm / sqrtNumAtoms);
253 #else
254 fprintf(fp, "Potential Energy = %14.7e\n", ems->epot);
255 fprintf(fp, "Maximum force = %14.7e on atom %d\n", ems->fmax, ems->a_fmax + 1);
256 fprintf(fp, "Norm of force = %14.7e\n", ems->fnorm / sqrtNumAtoms);
257 #endif
258 }
259
260 //! Compute the norm and max of the force array in parallel
get_f_norm_max(const t_commrec * cr,t_grpopts * opts,t_mdatoms * mdatoms,gmx::ArrayRef<const gmx::RVec> f,real * fnorm,real * fmax,int * a_fmax)261 static void get_f_norm_max(const t_commrec* cr,
262 t_grpopts* opts,
263 t_mdatoms* mdatoms,
264 gmx::ArrayRef<const gmx::RVec> f,
265 real* fnorm,
266 real* fmax,
267 int* a_fmax)
268 {
269 double fnorm2, *sum;
270 real fmax2, fam;
271 int la_max, a_max, start, end, i, m, gf;
272
273 /* This routine finds the largest force and returns it.
274 * On parallel machines the global max is taken.
275 */
276 fnorm2 = 0;
277 fmax2 = 0;
278 la_max = -1;
279 start = 0;
280 end = mdatoms->homenr;
281 if (mdatoms->cFREEZE)
282 {
283 for (i = start; i < end; i++)
284 {
285 gf = mdatoms->cFREEZE[i];
286 fam = 0;
287 for (m = 0; m < DIM; m++)
288 {
289 if (!opts->nFreeze[gf][m])
290 {
291 fam += gmx::square(f[i][m]);
292 }
293 }
294 fnorm2 += fam;
295 if (fam > fmax2)
296 {
297 fmax2 = fam;
298 la_max = i;
299 }
300 }
301 }
302 else
303 {
304 for (i = start; i < end; i++)
305 {
306 fam = norm2(f[i]);
307 fnorm2 += fam;
308 if (fam > fmax2)
309 {
310 fmax2 = fam;
311 la_max = i;
312 }
313 }
314 }
315
316 if (la_max >= 0 && DOMAINDECOMP(cr))
317 {
318 a_max = cr->dd->globalAtomIndices[la_max];
319 }
320 else
321 {
322 a_max = la_max;
323 }
324 if (PAR(cr))
325 {
326 snew(sum, 2 * cr->nnodes + 1);
327 sum[2 * cr->nodeid] = fmax2;
328 sum[2 * cr->nodeid + 1] = a_max;
329 sum[2 * cr->nnodes] = fnorm2;
330 gmx_sumd(2 * cr->nnodes + 1, sum, cr);
331 fnorm2 = sum[2 * cr->nnodes];
332 /* Determine the global maximum */
333 for (i = 0; i < cr->nnodes; i++)
334 {
335 if (sum[2 * i] > fmax2)
336 {
337 fmax2 = sum[2 * i];
338 a_max = gmx::roundToInt(sum[2 * i + 1]);
339 }
340 }
341 sfree(sum);
342 }
343
344 if (fnorm)
345 {
346 *fnorm = sqrt(fnorm2);
347 }
348 if (fmax)
349 {
350 *fmax = sqrt(fmax2);
351 }
352 if (a_fmax)
353 {
354 *a_fmax = a_max;
355 }
356 }
357
358 //! Compute the norm of the force
get_state_f_norm_max(const t_commrec * cr,t_grpopts * opts,t_mdatoms * mdatoms,em_state_t * ems)359 static void get_state_f_norm_max(const t_commrec* cr, t_grpopts* opts, t_mdatoms* mdatoms, em_state_t* ems)
360 {
361 get_f_norm_max(cr, opts, mdatoms, ems->f.view().force(), &ems->fnorm, &ems->fmax, &ems->a_fmax);
362 }
363
364 //! Initialize the energy minimization
init_em(FILE * fplog,const gmx::MDLogger & mdlog,const char * title,const t_commrec * cr,t_inputrec * ir,gmx::ImdSession * imdSession,pull_t * pull_work,t_state * state_global,const gmx_mtop_t * top_global,em_state_t * ems,gmx_localtop_t * top,t_nrnb * nrnb,t_forcerec * fr,gmx::MDAtoms * mdAtoms,gmx_global_stat_t * gstat,VirtualSitesHandler * vsite,gmx::Constraints * constr,gmx_shellfc_t ** shellfc)365 static void init_em(FILE* fplog,
366 const gmx::MDLogger& mdlog,
367 const char* title,
368 const t_commrec* cr,
369 t_inputrec* ir,
370 gmx::ImdSession* imdSession,
371 pull_t* pull_work,
372 t_state* state_global,
373 const gmx_mtop_t* top_global,
374 em_state_t* ems,
375 gmx_localtop_t* top,
376 t_nrnb* nrnb,
377 t_forcerec* fr,
378 gmx::MDAtoms* mdAtoms,
379 gmx_global_stat_t* gstat,
380 VirtualSitesHandler* vsite,
381 gmx::Constraints* constr,
382 gmx_shellfc_t** shellfc)
383 {
384 real dvdl_constr;
385
386 if (fplog)
387 {
388 fprintf(fplog, "Initiating %s\n", title);
389 }
390
391 if (MASTER(cr))
392 {
393 state_global->ngtc = 0;
394 }
395 int* fep_state = MASTER(cr) ? &state_global->fep_state : nullptr;
396 gmx::ArrayRef<real> lambda = MASTER(cr) ? state_global->lambda : gmx::ArrayRef<real>();
397 initialize_lambdas(fplog, *ir, MASTER(cr), fep_state, lambda);
398
399 if (ir->eI == eiNM)
400 {
401 GMX_ASSERT(shellfc != nullptr, "With NM we always support shells");
402
403 *shellfc =
404 init_shell_flexcon(stdout, top_global, constr ? constr->numFlexibleConstraints() : 0,
405 ir->nstcalcenergy, DOMAINDECOMP(cr), thisRankHasDuty(cr, DUTY_PME));
406 }
407 else
408 {
409 GMX_ASSERT(EI_ENERGY_MINIMIZATION(ir->eI),
410 "This else currently only handles energy minimizers, consider if your algorithm "
411 "needs shell/flexible-constraint support");
412
413 /* With energy minimization, shells and flexible constraints are
414 * automatically minimized when treated like normal DOFS.
415 */
416 if (shellfc != nullptr)
417 {
418 *shellfc = nullptr;
419 }
420 }
421
422 if (DOMAINDECOMP(cr))
423 {
424 dd_init_local_state(cr->dd, state_global, &ems->s);
425
426 /* Distribute the charge groups over the nodes from the master node */
427 dd_partition_system(fplog, mdlog, ir->init_step, cr, TRUE, 1, state_global, *top_global, ir,
428 imdSession, pull_work, &ems->s, &ems->f, mdAtoms, top, fr, vsite,
429 constr, nrnb, nullptr, FALSE);
430 dd_store_state(cr->dd, &ems->s);
431 }
432 else
433 {
434 state_change_natoms(state_global, state_global->natoms);
435 /* Just copy the state */
436 ems->s = *state_global;
437 state_change_natoms(&ems->s, ems->s.natoms);
438
439 mdAlgorithmsSetupAtomData(cr, ir, *top_global, top, fr, &ems->f, mdAtoms, constr, vsite,
440 shellfc ? *shellfc : nullptr);
441 }
442
443 update_mdatoms(mdAtoms->mdatoms(), ems->s.lambda[efptMASS]);
444
445 if (constr)
446 {
447 // TODO how should this cross-module support dependency be managed?
448 if (ir->eConstrAlg == econtSHAKE && gmx_mtop_ftype_count(top_global, F_CONSTR) > 0)
449 {
450 gmx_fatal(FARGS, "Can not do energy minimization with %s, use %s\n",
451 econstr_names[econtSHAKE], econstr_names[econtLINCS]);
452 }
453
454 if (!ir->bContinuation)
455 {
456 /* Constrain the starting coordinates */
457 bool needsLogging = true;
458 bool computeEnergy = true;
459 bool computeVirial = false;
460 dvdl_constr = 0;
461 constr->apply(needsLogging, computeEnergy, -1, 0, 1.0, ems->s.x.arrayRefWithPadding(),
462 ems->s.x.arrayRefWithPadding(), ArrayRef<RVec>(), ems->s.box,
463 ems->s.lambda[efptFEP], &dvdl_constr, gmx::ArrayRefWithPadding<RVec>(),
464 computeVirial, nullptr, gmx::ConstraintVariable::Positions);
465 }
466 }
467
468 if (PAR(cr))
469 {
470 *gstat = global_stat_init(ir);
471 }
472 else
473 {
474 *gstat = nullptr;
475 }
476
477 calc_shifts(ems->s.box, fr->shift_vec);
478 }
479
480 //! Finalize the minimization
finish_em(const t_commrec * cr,gmx_mdoutf_t outf,gmx_walltime_accounting_t walltime_accounting,gmx_wallcycle_t wcycle)481 static void finish_em(const t_commrec* cr,
482 gmx_mdoutf_t outf,
483 gmx_walltime_accounting_t walltime_accounting,
484 gmx_wallcycle_t wcycle)
485 {
486 if (!thisRankHasDuty(cr, DUTY_PME))
487 {
488 /* Tell the PME only node to finish */
489 gmx_pme_send_finish(cr);
490 }
491
492 done_mdoutf(outf);
493
494 em_time_end(walltime_accounting, wcycle);
495 }
496
497 //! Swap two different EM states during minimization
swap_em_state(em_state_t ** ems1,em_state_t ** ems2)498 static void swap_em_state(em_state_t** ems1, em_state_t** ems2)
499 {
500 em_state_t* tmp;
501
502 tmp = *ems1;
503 *ems1 = *ems2;
504 *ems2 = tmp;
505 }
506
507 //! Save the EM trajectory
write_em_traj(FILE * fplog,const t_commrec * cr,gmx_mdoutf_t outf,gmx_bool bX,gmx_bool bF,const char * confout,const gmx_mtop_t * top_global,t_inputrec * ir,int64_t step,em_state_t * state,t_state * state_global,ObservablesHistory * observablesHistory)508 static void write_em_traj(FILE* fplog,
509 const t_commrec* cr,
510 gmx_mdoutf_t outf,
511 gmx_bool bX,
512 gmx_bool bF,
513 const char* confout,
514 const gmx_mtop_t* top_global,
515 t_inputrec* ir,
516 int64_t step,
517 em_state_t* state,
518 t_state* state_global,
519 ObservablesHistory* observablesHistory)
520 {
521 int mdof_flags = 0;
522
523 if (bX)
524 {
525 mdof_flags |= MDOF_X;
526 }
527 if (bF)
528 {
529 mdof_flags |= MDOF_F;
530 }
531
532 /* If we want IMD output, set appropriate MDOF flag */
533 if (ir->bIMD)
534 {
535 mdof_flags |= MDOF_IMD;
536 }
537
538 gmx::WriteCheckpointDataHolder checkpointDataHolder;
539 mdoutf_write_to_trajectory_files(fplog, cr, outf, mdof_flags, top_global->natoms, step,
540 static_cast<double>(step), &state->s, state_global,
541 observablesHistory, state->f.view().force(), &checkpointDataHolder);
542
543 if (confout != nullptr)
544 {
545 if (DOMAINDECOMP(cr))
546 {
547 /* If bX=true, x was collected to state_global in the call above */
548 if (!bX)
549 {
550 auto globalXRef = MASTER(cr) ? state_global->x : gmx::ArrayRef<gmx::RVec>();
551 dd_collect_vec(cr->dd, state->s.ddp_count, state->s.ddp_count_cg_gl, state->s.cg_gl,
552 state->s.x, globalXRef);
553 }
554 }
555 else
556 {
557 /* Copy the local state pointer */
558 state_global = &state->s;
559 }
560
561 if (MASTER(cr))
562 {
563 if (ir->pbcType != PbcType::No && !ir->bPeriodicMols && DOMAINDECOMP(cr))
564 {
565 /* Make molecules whole only for confout writing */
566 do_pbc_mtop(ir->pbcType, state->s.box, top_global, state_global->x.rvec_array());
567 }
568
569 write_sto_conf_mtop(confout, *top_global->name, top_global,
570 state_global->x.rvec_array(), nullptr, ir->pbcType, state->s.box);
571 }
572 }
573 }
574
575 //! \brief Do one minimization step
576 //
577 // \returns true when the step succeeded, false when a constraint error occurred
do_em_step(const t_commrec * cr,t_inputrec * ir,t_mdatoms * md,em_state_t * ems1,real a,gmx::ArrayRefWithPadding<const gmx::RVec> force,em_state_t * ems2,gmx::Constraints * constr,int64_t count)578 static bool do_em_step(const t_commrec* cr,
579 t_inputrec* ir,
580 t_mdatoms* md,
581 em_state_t* ems1,
582 real a,
583 gmx::ArrayRefWithPadding<const gmx::RVec> force,
584 em_state_t* ems2,
585 gmx::Constraints* constr,
586 int64_t count)
587
588 {
589 t_state *s1, *s2;
590 int start, end;
591 real dvdl_constr;
592 int nthreads gmx_unused;
593
594 bool validStep = true;
595
596 s1 = &ems1->s;
597 s2 = &ems2->s;
598
599 if (DOMAINDECOMP(cr) && s1->ddp_count != cr->dd->ddp_count)
600 {
601 gmx_incons("state mismatch in do_em_step");
602 }
603
604 s2->flags = s1->flags;
605
606 if (s2->natoms != s1->natoms)
607 {
608 state_change_natoms(s2, s1->natoms);
609 ems2->f.resize(s2->natoms);
610 }
611 if (DOMAINDECOMP(cr) && s2->cg_gl.size() != s1->cg_gl.size())
612 {
613 s2->cg_gl.resize(s1->cg_gl.size());
614 }
615
616 copy_mat(s1->box, s2->box);
617 /* Copy free energy state */
618 s2->lambda = s1->lambda;
619 copy_mat(s1->box, s2->box);
620
621 start = 0;
622 end = md->homenr;
623
624 nthreads = gmx_omp_nthreads_get(emntUpdate);
625 #pragma omp parallel num_threads(nthreads)
626 {
627 const rvec* x1 = s1->x.rvec_array();
628 rvec* x2 = s2->x.rvec_array();
629 const rvec* f = as_rvec_array(force.unpaddedArrayRef().data());
630
631 int gf = 0;
632 #pragma omp for schedule(static) nowait
633 for (int i = start; i < end; i++)
634 {
635 try
636 {
637 if (md->cFREEZE)
638 {
639 gf = md->cFREEZE[i];
640 }
641 for (int m = 0; m < DIM; m++)
642 {
643 if (ir->opts.nFreeze[gf][m])
644 {
645 x2[i][m] = x1[i][m];
646 }
647 else
648 {
649 x2[i][m] = x1[i][m] + a * f[i][m];
650 }
651 }
652 }
653 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
654 }
655
656 if (s2->flags & (1 << estCGP))
657 {
658 /* Copy the CG p vector */
659 const rvec* p1 = s1->cg_p.rvec_array();
660 rvec* p2 = s2->cg_p.rvec_array();
661 #pragma omp for schedule(static) nowait
662 for (int i = start; i < end; i++)
663 {
664 // Trivial OpenMP block that does not throw
665 copy_rvec(p1[i], p2[i]);
666 }
667 }
668
669 if (DOMAINDECOMP(cr))
670 {
671 /* OpenMP does not supported unsigned loop variables */
672 #pragma omp for schedule(static) nowait
673 for (gmx::index i = 0; i < gmx::ssize(s2->cg_gl); i++)
674 {
675 s2->cg_gl[i] = s1->cg_gl[i];
676 }
677 }
678 }
679
680 if (DOMAINDECOMP(cr))
681 {
682 s2->ddp_count = s1->ddp_count;
683 s2->ddp_count_cg_gl = s1->ddp_count_cg_gl;
684 }
685
686 if (constr)
687 {
688 dvdl_constr = 0;
689 validStep = constr->apply(
690 TRUE, TRUE, count, 0, 1.0, s1->x.arrayRefWithPadding(), s2->x.arrayRefWithPadding(),
691 ArrayRef<RVec>(), s2->box, s2->lambda[efptBONDED], &dvdl_constr,
692 gmx::ArrayRefWithPadding<RVec>(), false, nullptr, gmx::ConstraintVariable::Positions);
693
694 if (cr->nnodes > 1)
695 {
696 /* This global reduction will affect performance at high
697 * parallelization, but we can not really avoid it.
698 * But usually EM is not run at high parallelization.
699 */
700 int reductionBuffer = static_cast<int>(!validStep);
701 gmx_sumi(1, &reductionBuffer, cr);
702 validStep = (reductionBuffer == 0);
703 }
704
705 // We should move this check to the different minimizers
706 if (!validStep && ir->eI != eiSteep)
707 {
708 gmx_fatal(FARGS,
709 "The coordinates could not be constrained. Minimizer '%s' can not handle "
710 "constraint failures, use minimizer '%s' before using '%s'.",
711 EI(ir->eI), EI(eiSteep), EI(ir->eI));
712 }
713 }
714
715 return validStep;
716 }
717
718 //! Prepare EM for using domain decomposition parallellization
em_dd_partition_system(FILE * fplog,const gmx::MDLogger & mdlog,int step,const t_commrec * cr,const gmx_mtop_t * top_global,t_inputrec * ir,gmx::ImdSession * imdSession,pull_t * pull_work,em_state_t * ems,gmx_localtop_t * top,gmx::MDAtoms * mdAtoms,t_forcerec * fr,VirtualSitesHandler * vsite,gmx::Constraints * constr,t_nrnb * nrnb,gmx_wallcycle_t wcycle)719 static void em_dd_partition_system(FILE* fplog,
720 const gmx::MDLogger& mdlog,
721 int step,
722 const t_commrec* cr,
723 const gmx_mtop_t* top_global,
724 t_inputrec* ir,
725 gmx::ImdSession* imdSession,
726 pull_t* pull_work,
727 em_state_t* ems,
728 gmx_localtop_t* top,
729 gmx::MDAtoms* mdAtoms,
730 t_forcerec* fr,
731 VirtualSitesHandler* vsite,
732 gmx::Constraints* constr,
733 t_nrnb* nrnb,
734 gmx_wallcycle_t wcycle)
735 {
736 /* Repartition the domain decomposition */
737 dd_partition_system(fplog, mdlog, step, cr, FALSE, 1, nullptr, *top_global, ir, imdSession, pull_work,
738 &ems->s, &ems->f, mdAtoms, top, fr, vsite, constr, nrnb, wcycle, FALSE);
739 dd_store_state(cr->dd, &ems->s);
740 }
741
742 namespace
743 {
744
745 /*! \brief Class to handle the work of setting and doing an energy evaluation.
746 *
747 * This class is a mere aggregate of parameters to pass to evaluate an
748 * energy, so that future changes to names and types of them consume
749 * less time when refactoring other code.
750 *
751 * Aggregate initialization is used, for which the chief risk is that
752 * if a member is added at the end and not all initializer lists are
753 * updated, then the member will be value initialized, which will
754 * typically mean initialization to zero.
755 *
756 * Use a braced initializer list to construct one of these. */
757 class EnergyEvaluator
758 {
759 public:
760 /*! \brief Evaluates an energy on the state in \c ems.
761 *
762 * \todo In practice, the same objects mu_tot, vir, and pres
763 * are always passed to this function, so we would rather have
764 * them as data members. However, their C-array types are
765 * unsuited for aggregate initialization. When the types
766 * improve, the call signature of this method can be reduced.
767 */
768 void run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst);
769 //! Handles logging (deprecated).
770 FILE* fplog;
771 //! Handles logging.
772 const gmx::MDLogger& mdlog;
773 //! Handles communication.
774 const t_commrec* cr;
775 //! Coordinates multi-simulations.
776 const gmx_multisim_t* ms;
777 //! Holds the simulation topology.
778 const gmx_mtop_t* top_global;
779 //! Holds the domain topology.
780 gmx_localtop_t* top;
781 //! User input options.
782 t_inputrec* inputrec;
783 //! The Interactive Molecular Dynamics session.
784 gmx::ImdSession* imdSession;
785 //! The pull work object.
786 pull_t* pull_work;
787 //! Manages flop accounting.
788 t_nrnb* nrnb;
789 //! Manages wall cycle accounting.
790 gmx_wallcycle_t wcycle;
791 //! Coordinates global reduction.
792 gmx_global_stat_t gstat;
793 //! Handles virtual sites.
794 VirtualSitesHandler* vsite;
795 //! Handles constraints.
796 gmx::Constraints* constr;
797 //! Per-atom data for this domain.
798 gmx::MDAtoms* mdAtoms;
799 //! Handles how to calculate the forces.
800 t_forcerec* fr;
801 //! Schedule of force-calculation work each step for this task.
802 MdrunScheduleWorkload* runScheduleWork;
803 //! Stores the computed energies.
804 gmx_enerdata_t* enerd;
805 };
806
run(em_state_t * ems,rvec mu_tot,tensor vir,tensor pres,int64_t count,gmx_bool bFirst)807 void EnergyEvaluator::run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst)
808 {
809 real t;
810 gmx_bool bNS;
811 tensor force_vir, shake_vir, ekin;
812 real dvdl_constr;
813 real terminate = 0;
814
815 /* Set the time to the initial time, the time does not change during EM */
816 t = inputrec->init_t;
817
818 if (bFirst || (DOMAINDECOMP(cr) && ems->s.ddp_count < cr->dd->ddp_count))
819 {
820 /* This is the first state or an old state used before the last ns */
821 bNS = TRUE;
822 }
823 else
824 {
825 bNS = FALSE;
826 if (inputrec->nstlist > 0)
827 {
828 bNS = TRUE;
829 }
830 }
831
832 if (vsite)
833 {
834 vsite->construct(ems->s.x, 1, {}, ems->s.box);
835 }
836
837 if (DOMAINDECOMP(cr) && bNS)
838 {
839 /* Repartition the domain decomposition */
840 em_dd_partition_system(fplog, mdlog, count, cr, top_global, inputrec, imdSession, pull_work,
841 ems, top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
842 }
843
844 /* Calc force & energy on new trial position */
845 /* do_force always puts the charge groups in the box and shifts again
846 * We do not unshift, so molecules are always whole in congrad.c
847 */
848 do_force(fplog, cr, ms, inputrec, nullptr, nullptr, imdSession, pull_work, count, nrnb, wcycle,
849 top, ems->s.box, ems->s.x.arrayRefWithPadding(), &ems->s.hist, &ems->f.view(), force_vir,
850 mdAtoms->mdatoms(), enerd, ems->s.lambda, fr, runScheduleWork, vsite, mu_tot, t, nullptr,
851 GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES | GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY
852 | (bNS ? GMX_FORCE_NS : 0),
853 DDBalanceRegionHandler(cr));
854
855 /* Clear the unused shake virial and pressure */
856 clear_mat(shake_vir);
857 clear_mat(pres);
858
859 /* Communicate stuff when parallel */
860 if (PAR(cr) && inputrec->eI != eiNM)
861 {
862 wallcycle_start(wcycle, ewcMoveE);
863
864 global_stat(gstat, cr, enerd, force_vir, shake_vir, inputrec, nullptr, nullptr, nullptr, 1,
865 &terminate, nullptr, FALSE, CGLO_ENERGY | CGLO_PRESSURE | CGLO_CONSTRAINT);
866
867 wallcycle_stop(wcycle, ewcMoveE);
868 }
869
870 if (fr->dispersionCorrection)
871 {
872 /* Calculate long range corrections to pressure and energy */
873 const DispersionCorrection::Correction correction =
874 fr->dispersionCorrection->calculate(ems->s.box, ems->s.lambda[efptVDW]);
875
876 enerd->term[F_DISPCORR] = correction.energy;
877 enerd->term[F_EPOT] += correction.energy;
878 enerd->term[F_PRES] += correction.pressure;
879 enerd->term[F_DVDL] += correction.dvdl;
880 }
881 else
882 {
883 enerd->term[F_DISPCORR] = 0;
884 }
885
886 ems->epot = enerd->term[F_EPOT];
887
888 if (constr)
889 {
890 /* Project out the constraint components of the force */
891 bool needsLogging = false;
892 bool computeEnergy = false;
893 bool computeVirial = true;
894 dvdl_constr = 0;
895 auto f = ems->f.view().forceWithPadding();
896 constr->apply(needsLogging, computeEnergy, count, 0, 1.0, ems->s.x.arrayRefWithPadding(), f,
897 f.unpaddedArrayRef(), ems->s.box, ems->s.lambda[efptBONDED], &dvdl_constr,
898 gmx::ArrayRefWithPadding<RVec>(), computeVirial, shake_vir,
899 gmx::ConstraintVariable::ForceDispl);
900 enerd->term[F_DVDL_CONSTR] += dvdl_constr;
901 m_add(force_vir, shake_vir, vir);
902 }
903 else
904 {
905 copy_mat(force_vir, vir);
906 }
907
908 clear_mat(ekin);
909 enerd->term[F_PRES] = calc_pres(fr->pbcType, inputrec->nwall, ems->s.box, ekin, vir, pres);
910
911 if (inputrec->efep != efepNO)
912 {
913 accumulateKineticLambdaComponents(enerd, ems->s.lambda, *inputrec->fepvals);
914 }
915
916 if (EI_ENERGY_MINIMIZATION(inputrec->eI))
917 {
918 get_state_f_norm_max(cr, &(inputrec->opts), mdAtoms->mdatoms(), ems);
919 }
920 }
921
922 } // namespace
923
924 //! Parallel utility summing energies and forces
reorder_partsum(const t_commrec * cr,t_grpopts * opts,const gmx_mtop_t * top_global,const em_state_t * s_min,const em_state_t * s_b)925 static double reorder_partsum(const t_commrec* cr,
926 t_grpopts* opts,
927 const gmx_mtop_t* top_global,
928 const em_state_t* s_min,
929 const em_state_t* s_b)
930 {
931 if (debug)
932 {
933 fprintf(debug, "Doing reorder_partsum\n");
934 }
935
936 auto fm = s_min->f.view().force();
937 auto fb = s_b->f.view().force();
938
939 /* Collect fm in a global vector fmg.
940 * This conflicts with the spirit of domain decomposition,
941 * but to fully optimize this a much more complicated algorithm is required.
942 */
943 const int natoms = top_global->natoms;
944 rvec* fmg;
945 snew(fmg, natoms);
946
947 gmx::ArrayRef<const int> indicesMin = s_min->s.cg_gl;
948 int i = 0;
949 for (int a : indicesMin)
950 {
951 copy_rvec(fm[i], fmg[a]);
952 i++;
953 }
954 gmx_sum(top_global->natoms * 3, fmg[0], cr);
955
956 /* Now we will determine the part of the sum for the cgs in state s_b */
957 gmx::ArrayRef<const int> indicesB = s_b->s.cg_gl;
958
959 double partsum = 0;
960 i = 0;
961 int gf = 0;
962 gmx::ArrayRef<const unsigned char> grpnrFREEZE =
963 top_global->groups.groupNumbers[SimulationAtomGroupType::Freeze];
964 for (int a : indicesB)
965 {
966 if (!grpnrFREEZE.empty())
967 {
968 gf = grpnrFREEZE[i];
969 }
970 for (int m = 0; m < DIM; m++)
971 {
972 if (!opts->nFreeze[gf][m])
973 {
974 partsum += (fb[i][m] - fmg[a][m]) * fb[i][m];
975 }
976 }
977 i++;
978 }
979
980 sfree(fmg);
981
982 return partsum;
983 }
984
985 //! Print some stuff, like beta, whatever that means.
pr_beta(const t_commrec * cr,t_grpopts * opts,t_mdatoms * mdatoms,const gmx_mtop_t * top_global,const em_state_t * s_min,const em_state_t * s_b)986 static real pr_beta(const t_commrec* cr,
987 t_grpopts* opts,
988 t_mdatoms* mdatoms,
989 const gmx_mtop_t* top_global,
990 const em_state_t* s_min,
991 const em_state_t* s_b)
992 {
993 double sum;
994
995 /* This is just the classical Polak-Ribiere calculation of beta;
996 * it looks a bit complicated since we take freeze groups into account,
997 * and might have to sum it in parallel runs.
998 */
999
1000 if (!DOMAINDECOMP(cr)
1001 || (s_min->s.ddp_count == cr->dd->ddp_count && s_b->s.ddp_count == cr->dd->ddp_count))
1002 {
1003 auto fm = s_min->f.view().force();
1004 auto fb = s_b->f.view().force();
1005 sum = 0;
1006 int gf = 0;
1007 /* This part of code can be incorrect with DD,
1008 * since the atom ordering in s_b and s_min might differ.
1009 */
1010 for (int i = 0; i < mdatoms->homenr; i++)
1011 {
1012 if (mdatoms->cFREEZE)
1013 {
1014 gf = mdatoms->cFREEZE[i];
1015 }
1016 for (int m = 0; m < DIM; m++)
1017 {
1018 if (!opts->nFreeze[gf][m])
1019 {
1020 sum += (fb[i][m] - fm[i][m]) * fb[i][m];
1021 }
1022 }
1023 }
1024 }
1025 else
1026 {
1027 /* We need to reorder cgs while summing */
1028 sum = reorder_partsum(cr, opts, top_global, s_min, s_b);
1029 }
1030 if (PAR(cr))
1031 {
1032 gmx_sumd(1, &sum, cr);
1033 }
1034
1035 return sum / gmx::square(s_min->fnorm);
1036 }
1037
1038 namespace gmx
1039 {
1040
do_cg()1041 void LegacySimulator::do_cg()
1042 {
1043 const char* CG = "Polak-Ribiere Conjugate Gradients";
1044
1045 gmx_localtop_t top(top_global->ffparams);
1046 gmx_global_stat_t gstat;
1047 double tmp, minstep;
1048 real stepsize;
1049 real a, b, c, beta = 0.0;
1050 real epot_repl = 0;
1051 real pnorm;
1052 gmx_bool converged, foundlower;
1053 rvec mu_tot = { 0 };
1054 gmx_bool do_log = FALSE, do_ene = FALSE, do_x, do_f;
1055 tensor vir, pres;
1056 int number_steps, neval = 0, nstcg = inputrec->nstcgsteep;
1057 int m, step, nminstep;
1058 auto mdatoms = mdAtoms->mdatoms();
1059
1060 GMX_LOG(mdlog.info)
1061 .asParagraph()
1062 .appendText(
1063 "Note that activating conjugate gradient energy minimization via the "
1064 "integrator .mdp option and the command gmx mdrun may "
1065 "be available in a different form in a future version of GROMACS, "
1066 "e.g. gmx minimize and an .mdp option.");
1067
1068 step = 0;
1069
1070 if (MASTER(cr))
1071 {
1072 // In CG, the state is extended with a search direction
1073 state_global->flags |= (1 << estCGP);
1074
1075 // Ensure the extra per-atom state array gets allocated
1076 state_change_natoms(state_global, state_global->natoms);
1077
1078 // Initialize the search direction to zero
1079 for (RVec& cg_p : state_global->cg_p)
1080 {
1081 cg_p = { 0, 0, 0 };
1082 }
1083 }
1084
1085 /* Create 4 states on the stack and extract pointers that we will swap */
1086 em_state_t s0{}, s1{}, s2{}, s3{};
1087 em_state_t* s_min = &s0;
1088 em_state_t* s_a = &s1;
1089 em_state_t* s_b = &s2;
1090 em_state_t* s_c = &s3;
1091
1092 /* Init em and store the local state in s_min */
1093 init_em(fplog, mdlog, CG, cr, inputrec, imdSession, pull_work, state_global, top_global, s_min,
1094 &top, nrnb, fr, mdAtoms, &gstat, vsite, constr, nullptr);
1095 const bool simulationsShareState = false;
1096 gmx_mdoutf* outf = init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider,
1097 mdModulesNotifier, inputrec, top_global, nullptr, wcycle,
1098 StartingBehavior::NewSimulation, simulationsShareState, ms);
1099 gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf), top_global, inputrec, pull_work,
1100 nullptr, false, StartingBehavior::NewSimulation,
1101 simulationsShareState, mdModulesNotifier);
1102
1103 /* Print to log file */
1104 print_em_start(fplog, cr, walltime_accounting, wcycle, CG);
1105
1106 /* Max number of steps */
1107 number_steps = inputrec->nsteps;
1108
1109 if (MASTER(cr))
1110 {
1111 sp_header(stderr, CG, inputrec->em_tol, number_steps);
1112 }
1113 if (fplog)
1114 {
1115 sp_header(fplog, CG, inputrec->em_tol, number_steps);
1116 }
1117
1118 EnergyEvaluator energyEvaluator{ fplog, mdlog, cr, ms, top_global, &top,
1119 inputrec, imdSession, pull_work, nrnb, wcycle, gstat,
1120 vsite, constr, mdAtoms, fr, runScheduleWork, enerd };
1121 /* Call the force routine and some auxiliary (neighboursearching etc.) */
1122 /* do_force always puts the charge groups in the box and shifts again
1123 * We do not unshift, so molecules are always whole in congrad.c
1124 */
1125 energyEvaluator.run(s_min, mu_tot, vir, pres, -1, TRUE);
1126
1127 if (MASTER(cr))
1128 {
1129 /* Copy stuff to the energy bin for easy printing etc. */
1130 matrix nullBox = {};
1131 energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(step), mdatoms->tmass,
1132 enerd, nullptr, nullptr, nullBox, PTCouplingArrays(), 0,
1133 nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
1134
1135 EnergyOutput::printHeader(fplog, step, step);
1136 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fplog, step,
1137 step, fr->fcdata.get(), nullptr);
1138 }
1139
1140 /* Estimate/guess the initial stepsize */
1141 stepsize = inputrec->em_stepsize / s_min->fnorm;
1142
1143 if (MASTER(cr))
1144 {
1145 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1146 fprintf(stderr, " F-max = %12.5e on atom %d\n", s_min->fmax, s_min->a_fmax + 1);
1147 fprintf(stderr, " F-Norm = %12.5e\n", s_min->fnorm / sqrtNumAtoms);
1148 fprintf(stderr, "\n");
1149 /* and copy to the log file too... */
1150 fprintf(fplog, " F-max = %12.5e on atom %d\n", s_min->fmax, s_min->a_fmax + 1);
1151 fprintf(fplog, " F-Norm = %12.5e\n", s_min->fnorm / sqrtNumAtoms);
1152 fprintf(fplog, "\n");
1153 }
1154 /* Start the loop over CG steps.
1155 * Each successful step is counted, and we continue until
1156 * we either converge or reach the max number of steps.
1157 */
1158 converged = FALSE;
1159 for (step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
1160 {
1161
1162 /* start taking steps in a new direction
1163 * First time we enter the routine, beta=0, and the direction is
1164 * simply the negative gradient.
1165 */
1166
1167 /* Calculate the new direction in p, and the gradient in this direction, gpa */
1168 gmx::ArrayRef<gmx::RVec> pm = s_min->s.cg_p;
1169 gmx::ArrayRef<const gmx::RVec> sfm = s_min->f.view().force();
1170 double gpa = 0;
1171 int gf = 0;
1172 for (int i = 0; i < mdatoms->homenr; i++)
1173 {
1174 if (mdatoms->cFREEZE)
1175 {
1176 gf = mdatoms->cFREEZE[i];
1177 }
1178 for (m = 0; m < DIM; m++)
1179 {
1180 if (!inputrec->opts.nFreeze[gf][m])
1181 {
1182 pm[i][m] = sfm[i][m] + beta * pm[i][m];
1183 gpa -= pm[i][m] * sfm[i][m];
1184 /* f is negative gradient, thus the sign */
1185 }
1186 else
1187 {
1188 pm[i][m] = 0;
1189 }
1190 }
1191 }
1192
1193 /* Sum the gradient along the line across CPUs */
1194 if (PAR(cr))
1195 {
1196 gmx_sumd(1, &gpa, cr);
1197 }
1198
1199 /* Calculate the norm of the search vector */
1200 get_f_norm_max(cr, &(inputrec->opts), mdatoms, pm, &pnorm, nullptr, nullptr);
1201
1202 /* Just in case stepsize reaches zero due to numerical precision... */
1203 if (stepsize <= 0)
1204 {
1205 stepsize = inputrec->em_stepsize / pnorm;
1206 }
1207
1208 /*
1209 * Double check the value of the derivative in the search direction.
1210 * If it is positive it must be due to the old information in the
1211 * CG formula, so just remove that and start over with beta=0.
1212 * This corresponds to a steepest descent step.
1213 */
1214 if (gpa > 0)
1215 {
1216 beta = 0;
1217 step--; /* Don't count this step since we are restarting */
1218 continue; /* Go back to the beginning of the big for-loop */
1219 }
1220
1221 /* Calculate minimum allowed stepsize, before the average (norm)
1222 * relative change in coordinate is smaller than precision
1223 */
1224 minstep = 0;
1225 auto s_min_x = makeArrayRef(s_min->s.x);
1226 for (int i = 0; i < mdatoms->homenr; i++)
1227 {
1228 for (m = 0; m < DIM; m++)
1229 {
1230 tmp = fabs(s_min_x[i][m]);
1231 if (tmp < 1.0)
1232 {
1233 tmp = 1.0;
1234 }
1235 tmp = pm[i][m] / tmp;
1236 minstep += tmp * tmp;
1237 }
1238 }
1239 /* Add up from all CPUs */
1240 if (PAR(cr))
1241 {
1242 gmx_sumd(1, &minstep, cr);
1243 }
1244
1245 minstep = GMX_REAL_EPS / sqrt(minstep / (3 * top_global->natoms));
1246
1247 if (stepsize < minstep)
1248 {
1249 converged = TRUE;
1250 break;
1251 }
1252
1253 /* Write coordinates if necessary */
1254 do_x = do_per_step(step, inputrec->nstxout);
1255 do_f = do_per_step(step, inputrec->nstfout);
1256
1257 write_em_traj(fplog, cr, outf, do_x, do_f, nullptr, top_global, inputrec, step, s_min,
1258 state_global, observablesHistory);
1259
1260 /* Take a step downhill.
1261 * In theory, we should minimize the function along this direction.
1262 * That is quite possible, but it turns out to take 5-10 function evaluations
1263 * for each line. However, we dont really need to find the exact minimum -
1264 * it is much better to start a new CG step in a modified direction as soon
1265 * as we are close to it. This will save a lot of energy evaluations.
1266 *
1267 * In practice, we just try to take a single step.
1268 * If it worked (i.e. lowered the energy), we increase the stepsize but
1269 * the continue straight to the next CG step without trying to find any minimum.
1270 * If it didn't work (higher energy), there must be a minimum somewhere between
1271 * the old position and the new one.
1272 *
1273 * Due to the finite numerical accuracy, it turns out that it is a good idea
1274 * to even accept a SMALL increase in energy, if the derivative is still downhill.
1275 * This leads to lower final energies in the tests I've done. / Erik
1276 */
1277 s_a->epot = s_min->epot;
1278 a = 0.0;
1279 c = a + stepsize; /* reference position along line is zero */
1280
1281 if (DOMAINDECOMP(cr) && s_min->s.ddp_count < cr->dd->ddp_count)
1282 {
1283 em_dd_partition_system(fplog, mdlog, step, cr, top_global, inputrec, imdSession,
1284 pull_work, s_min, &top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
1285 }
1286
1287 /* Take a trial step (new coords in s_c) */
1288 do_em_step(cr, inputrec, mdatoms, s_min, c, s_min->s.cg_p.constArrayRefWithPadding(), s_c,
1289 constr, -1);
1290
1291 neval++;
1292 /* Calculate energy for the trial step */
1293 energyEvaluator.run(s_c, mu_tot, vir, pres, -1, FALSE);
1294
1295 /* Calc derivative along line */
1296 const rvec* pc = s_c->s.cg_p.rvec_array();
1297 gmx::ArrayRef<const gmx::RVec> sfc = s_c->f.view().force();
1298 double gpc = 0;
1299 for (int i = 0; i < mdatoms->homenr; i++)
1300 {
1301 for (m = 0; m < DIM; m++)
1302 {
1303 gpc -= pc[i][m] * sfc[i][m]; /* f is negative gradient, thus the sign */
1304 }
1305 }
1306 /* Sum the gradient along the line across CPUs */
1307 if (PAR(cr))
1308 {
1309 gmx_sumd(1, &gpc, cr);
1310 }
1311
1312 /* This is the max amount of increase in energy we tolerate */
1313 tmp = std::sqrt(GMX_REAL_EPS) * fabs(s_a->epot);
1314
1315 /* Accept the step if the energy is lower, or if it is not significantly higher
1316 * and the line derivative is still negative.
1317 */
1318 if (s_c->epot < s_a->epot || (gpc < 0 && s_c->epot < (s_a->epot + tmp)))
1319 {
1320 foundlower = TRUE;
1321 /* Great, we found a better energy. Increase step for next iteration
1322 * if we are still going down, decrease it otherwise
1323 */
1324 if (gpc < 0)
1325 {
1326 stepsize *= 1.618034; /* The golden section */
1327 }
1328 else
1329 {
1330 stepsize *= 0.618034; /* 1/golden section */
1331 }
1332 }
1333 else
1334 {
1335 /* New energy is the same or higher. We will have to do some work
1336 * to find a smaller value in the interval. Take smaller step next time!
1337 */
1338 foundlower = FALSE;
1339 stepsize *= 0.618034;
1340 }
1341
1342
1343 /* OK, if we didn't find a lower value we will have to locate one now - there must
1344 * be one in the interval [a=0,c].
1345 * The same thing is valid here, though: Don't spend dozens of iterations to find
1346 * the line minimum. We try to interpolate based on the derivative at the endpoints,
1347 * and only continue until we find a lower value. In most cases this means 1-2 iterations.
1348 *
1349 * I also have a safeguard for potentially really pathological functions so we never
1350 * take more than 20 steps before we give up ...
1351 *
1352 * If we already found a lower value we just skip this step and continue to the update.
1353 */
1354 double gpb;
1355 if (!foundlower)
1356 {
1357 nminstep = 0;
1358
1359 do
1360 {
1361 /* Select a new trial point.
1362 * If the derivatives at points a & c have different sign we interpolate to zero,
1363 * otherwise just do a bisection.
1364 */
1365 if (gpa < 0 && gpc > 0)
1366 {
1367 b = a + gpa * (a - c) / (gpc - gpa);
1368 }
1369 else
1370 {
1371 b = 0.5 * (a + c);
1372 }
1373
1374 /* safeguard if interpolation close to machine accuracy causes errors:
1375 * never go outside the interval
1376 */
1377 if (b <= a || b >= c)
1378 {
1379 b = 0.5 * (a + c);
1380 }
1381
1382 if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
1383 {
1384 /* Reload the old state */
1385 em_dd_partition_system(fplog, mdlog, -1, cr, top_global, inputrec, imdSession, pull_work,
1386 s_min, &top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
1387 }
1388
1389 /* Take a trial step to this new point - new coords in s_b */
1390 do_em_step(cr, inputrec, mdatoms, s_min, b,
1391 s_min->s.cg_p.constArrayRefWithPadding(), s_b, constr, -1);
1392
1393 neval++;
1394 /* Calculate energy for the trial step */
1395 energyEvaluator.run(s_b, mu_tot, vir, pres, -1, FALSE);
1396
1397 /* p does not change within a step, but since the domain decomposition
1398 * might change, we have to use cg_p of s_b here.
1399 */
1400 const rvec* pb = s_b->s.cg_p.rvec_array();
1401 gmx::ArrayRef<const gmx::RVec> sfb = s_b->f.view().force();
1402 gpb = 0;
1403 for (int i = 0; i < mdatoms->homenr; i++)
1404 {
1405 for (m = 0; m < DIM; m++)
1406 {
1407 gpb -= pb[i][m] * sfb[i][m]; /* f is negative gradient, thus the sign */
1408 }
1409 }
1410 /* Sum the gradient along the line across CPUs */
1411 if (PAR(cr))
1412 {
1413 gmx_sumd(1, &gpb, cr);
1414 }
1415
1416 if (debug)
1417 {
1418 fprintf(debug, "CGE: EpotA %f EpotB %f EpotC %f gpb %f\n", s_a->epot, s_b->epot,
1419 s_c->epot, gpb);
1420 }
1421
1422 epot_repl = s_b->epot;
1423
1424 /* Keep one of the intervals based on the value of the derivative at the new point */
1425 if (gpb > 0)
1426 {
1427 /* Replace c endpoint with b */
1428 swap_em_state(&s_b, &s_c);
1429 c = b;
1430 gpc = gpb;
1431 }
1432 else
1433 {
1434 /* Replace a endpoint with b */
1435 swap_em_state(&s_b, &s_a);
1436 a = b;
1437 gpa = gpb;
1438 }
1439
1440 /*
1441 * Stop search as soon as we find a value smaller than the endpoints.
1442 * Never run more than 20 steps, no matter what.
1443 */
1444 nminstep++;
1445 } while ((epot_repl > s_a->epot || epot_repl > s_c->epot) && (nminstep < 20));
1446
1447 if (std::fabs(epot_repl - s_min->epot) < fabs(s_min->epot) * GMX_REAL_EPS || nminstep >= 20)
1448 {
1449 /* OK. We couldn't find a significantly lower energy.
1450 * If beta==0 this was steepest descent, and then we give up.
1451 * If not, set beta=0 and restart with steepest descent before quitting.
1452 */
1453 if (beta == 0.0)
1454 {
1455 /* Converged */
1456 converged = TRUE;
1457 break;
1458 }
1459 else
1460 {
1461 /* Reset memory before giving up */
1462 beta = 0.0;
1463 continue;
1464 }
1465 }
1466
1467 /* Select min energy state of A & C, put the best in B.
1468 */
1469 if (s_c->epot < s_a->epot)
1470 {
1471 if (debug)
1472 {
1473 fprintf(debug, "CGE: C (%f) is lower than A (%f), moving C to B\n", s_c->epot,
1474 s_a->epot);
1475 }
1476 swap_em_state(&s_b, &s_c);
1477 gpb = gpc;
1478 }
1479 else
1480 {
1481 if (debug)
1482 {
1483 fprintf(debug, "CGE: A (%f) is lower than C (%f), moving A to B\n", s_a->epot,
1484 s_c->epot);
1485 }
1486 swap_em_state(&s_b, &s_a);
1487 gpb = gpa;
1488 }
1489 }
1490 else
1491 {
1492 if (debug)
1493 {
1494 fprintf(debug, "CGE: Found a lower energy %f, moving C to B\n", s_c->epot);
1495 }
1496 swap_em_state(&s_b, &s_c);
1497 gpb = gpc;
1498 }
1499
1500 /* new search direction */
1501 /* beta = 0 means forget all memory and restart with steepest descents. */
1502 if (nstcg && ((step % nstcg) == 0))
1503 {
1504 beta = 0.0;
1505 }
1506 else
1507 {
1508 /* s_min->fnorm cannot be zero, because then we would have converged
1509 * and broken out.
1510 */
1511
1512 /* Polak-Ribiere update.
1513 * Change to fnorm2/fnorm2_old for Fletcher-Reeves
1514 */
1515 beta = pr_beta(cr, &inputrec->opts, mdatoms, top_global, s_min, s_b);
1516 }
1517 /* Limit beta to prevent oscillations */
1518 if (fabs(beta) > 5.0)
1519 {
1520 beta = 0.0;
1521 }
1522
1523
1524 /* update positions */
1525 swap_em_state(&s_min, &s_b);
1526 gpa = gpb;
1527
1528 /* Print it if necessary */
1529 if (MASTER(cr))
1530 {
1531 if (mdrunOptions.verbose)
1532 {
1533 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1534 fprintf(stderr, "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n", step,
1535 s_min->epot, s_min->fnorm / sqrtNumAtoms, s_min->fmax, s_min->a_fmax + 1);
1536 fflush(stderr);
1537 }
1538 /* Store the new (lower) energies */
1539 matrix nullBox = {};
1540 energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(step), mdatoms->tmass,
1541 enerd, nullptr, nullptr, nullBox, PTCouplingArrays(), 0,
1542 nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
1543
1544 do_log = do_per_step(step, inputrec->nstlog);
1545 do_ene = do_per_step(step, inputrec->nstenergy);
1546
1547 imdSession->fillEnergyRecord(step, TRUE);
1548
1549 if (do_log)
1550 {
1551 EnergyOutput::printHeader(fplog, step, step);
1552 }
1553 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), do_ene, FALSE, FALSE,
1554 do_log ? fplog : nullptr, step, step,
1555 fr->fcdata.get(), nullptr);
1556 }
1557
1558 /* Send energies and positions to the IMD client if bIMD is TRUE. */
1559 if (MASTER(cr) && imdSession->run(step, TRUE, state_global->box, state_global->x.rvec_array(), 0))
1560 {
1561 imdSession->sendPositionsAndEnergies();
1562 }
1563
1564 /* Stop when the maximum force lies below tolerance.
1565 * If we have reached machine precision, converged is already set to true.
1566 */
1567 converged = converged || (s_min->fmax < inputrec->em_tol);
1568
1569 } /* End of the loop */
1570
1571 if (converged)
1572 {
1573 step--; /* we never took that last step in this case */
1574 }
1575 if (s_min->fmax > inputrec->em_tol)
1576 {
1577 if (MASTER(cr))
1578 {
1579 warn_step(fplog, inputrec->em_tol, s_min->fmax, step - 1 == number_steps, FALSE);
1580 }
1581 converged = FALSE;
1582 }
1583
1584 if (MASTER(cr))
1585 {
1586 /* If we printed energy and/or logfile last step (which was the last step)
1587 * we don't have to do it again, but otherwise print the final values.
1588 */
1589 if (!do_log)
1590 {
1591 /* Write final value to log since we didn't do anything the last step */
1592 EnergyOutput::printHeader(fplog, step, step);
1593 }
1594 if (!do_ene || !do_log)
1595 {
1596 /* Write final energy file entries */
1597 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), !do_ene, FALSE, FALSE,
1598 !do_log ? fplog : nullptr, step, step,
1599 fr->fcdata.get(), nullptr);
1600 }
1601 }
1602
1603 /* Print some stuff... */
1604 if (MASTER(cr))
1605 {
1606 fprintf(stderr, "\nwriting lowest energy coordinates.\n");
1607 }
1608
1609 /* IMPORTANT!
1610 * For accurate normal mode calculation it is imperative that we
1611 * store the last conformation into the full precision binary trajectory.
1612 *
1613 * However, we should only do it if we did NOT already write this step
1614 * above (which we did if do_x or do_f was true).
1615 */
1616 /* Note that with 0 < nstfout != nstxout we can end up with two frames
1617 * in the trajectory with the same step number.
1618 */
1619 do_x = !do_per_step(step, inputrec->nstxout);
1620 do_f = (inputrec->nstfout > 0 && !do_per_step(step, inputrec->nstfout));
1621
1622 write_em_traj(fplog, cr, outf, do_x, do_f, ftp2fn(efSTO, nfile, fnm), top_global, inputrec,
1623 step, s_min, state_global, observablesHistory);
1624
1625
1626 if (MASTER(cr))
1627 {
1628 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1629 print_converged(stderr, CG, inputrec->em_tol, step, converged, number_steps, s_min, sqrtNumAtoms);
1630 print_converged(fplog, CG, inputrec->em_tol, step, converged, number_steps, s_min, sqrtNumAtoms);
1631
1632 fprintf(fplog, "\nPerformed %d energy evaluations in total.\n", neval);
1633 }
1634
1635 finish_em(cr, outf, walltime_accounting, wcycle);
1636
1637 /* To print the actual number of steps we needed somewhere */
1638 walltime_accounting_set_nsteps_done(walltime_accounting, step);
1639 }
1640
1641
do_lbfgs()1642 void LegacySimulator::do_lbfgs()
1643 {
1644 static const char* LBFGS = "Low-Memory BFGS Minimizer";
1645 em_state_t ems;
1646 gmx_localtop_t top(top_global->ffparams);
1647 gmx_global_stat_t gstat;
1648 int ncorr, nmaxcorr, point, cp, neval, nminstep;
1649 double stepsize, step_taken, gpa, gpb, gpc, tmp, minstep;
1650 real * rho, *alpha, *p, *s, **dx, **dg;
1651 real a, b, c, maxdelta, delta;
1652 real diag, Epot0;
1653 real dgdx, dgdg, sq, yr, beta;
1654 gmx_bool converged;
1655 rvec mu_tot = { 0 };
1656 gmx_bool do_log, do_ene, do_x, do_f, foundlower, *frozen;
1657 tensor vir, pres;
1658 int start, end, number_steps;
1659 int i, k, m, n, gf, step;
1660 int mdof_flags;
1661 auto mdatoms = mdAtoms->mdatoms();
1662
1663 GMX_LOG(mdlog.info)
1664 .asParagraph()
1665 .appendText(
1666 "Note that activating L-BFGS energy minimization via the "
1667 "integrator .mdp option and the command gmx mdrun may "
1668 "be available in a different form in a future version of GROMACS, "
1669 "e.g. gmx minimize and an .mdp option.");
1670
1671 if (PAR(cr))
1672 {
1673 gmx_fatal(FARGS, "L-BFGS minimization only supports a single rank");
1674 }
1675
1676 if (nullptr != constr)
1677 {
1678 gmx_fatal(
1679 FARGS,
1680 "The combination of constraints and L-BFGS minimization is not implemented. Either "
1681 "do not use constraints, or use another minimizer (e.g. steepest descent).");
1682 }
1683
1684 n = 3 * state_global->natoms;
1685 nmaxcorr = inputrec->nbfgscorr;
1686
1687 snew(frozen, n);
1688
1689 snew(p, n);
1690 snew(rho, nmaxcorr);
1691 snew(alpha, nmaxcorr);
1692
1693 snew(dx, nmaxcorr);
1694 for (i = 0; i < nmaxcorr; i++)
1695 {
1696 snew(dx[i], n);
1697 }
1698
1699 snew(dg, nmaxcorr);
1700 for (i = 0; i < nmaxcorr; i++)
1701 {
1702 snew(dg[i], n);
1703 }
1704
1705 step = 0;
1706 neval = 0;
1707
1708 /* Init em */
1709 init_em(fplog, mdlog, LBFGS, cr, inputrec, imdSession, pull_work, state_global, top_global,
1710 &ems, &top, nrnb, fr, mdAtoms, &gstat, vsite, constr, nullptr);
1711 const bool simulationsShareState = false;
1712 gmx_mdoutf* outf = init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider,
1713 mdModulesNotifier, inputrec, top_global, nullptr, wcycle,
1714 StartingBehavior::NewSimulation, simulationsShareState, ms);
1715 gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf), top_global, inputrec, pull_work,
1716 nullptr, false, StartingBehavior::NewSimulation,
1717 simulationsShareState, mdModulesNotifier);
1718
1719 start = 0;
1720 end = mdatoms->homenr;
1721
1722 /* We need 4 working states */
1723 em_state_t s0{}, s1{}, s2{}, s3{};
1724 em_state_t* sa = &s0;
1725 em_state_t* sb = &s1;
1726 em_state_t* sc = &s2;
1727 em_state_t* last = &s3;
1728 /* Initialize by copying the state from ems (we could skip x and f here) */
1729 *sa = ems;
1730 *sb = ems;
1731 *sc = ems;
1732
1733 /* Print to log file */
1734 print_em_start(fplog, cr, walltime_accounting, wcycle, LBFGS);
1735
1736 do_log = do_ene = do_x = do_f = TRUE;
1737
1738 /* Max number of steps */
1739 number_steps = inputrec->nsteps;
1740
1741 /* Create a 3*natoms index to tell whether each degree of freedom is frozen */
1742 gf = 0;
1743 for (i = start; i < end; i++)
1744 {
1745 if (mdatoms->cFREEZE)
1746 {
1747 gf = mdatoms->cFREEZE[i];
1748 }
1749 for (m = 0; m < DIM; m++)
1750 {
1751 frozen[3 * i + m] = (inputrec->opts.nFreeze[gf][m] != 0);
1752 }
1753 }
1754 if (MASTER(cr))
1755 {
1756 sp_header(stderr, LBFGS, inputrec->em_tol, number_steps);
1757 }
1758 if (fplog)
1759 {
1760 sp_header(fplog, LBFGS, inputrec->em_tol, number_steps);
1761 }
1762
1763 if (vsite)
1764 {
1765 vsite->construct(state_global->x, 1, {}, state_global->box);
1766 }
1767
1768 /* Call the force routine and some auxiliary (neighboursearching etc.) */
1769 /* do_force always puts the charge groups in the box and shifts again
1770 * We do not unshift, so molecules are always whole
1771 */
1772 neval++;
1773 EnergyEvaluator energyEvaluator{ fplog, mdlog, cr, ms, top_global, &top,
1774 inputrec, imdSession, pull_work, nrnb, wcycle, gstat,
1775 vsite, constr, mdAtoms, fr, runScheduleWork, enerd };
1776 energyEvaluator.run(&ems, mu_tot, vir, pres, -1, TRUE);
1777
1778 if (MASTER(cr))
1779 {
1780 /* Copy stuff to the energy bin for easy printing etc. */
1781 matrix nullBox = {};
1782 energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(step), mdatoms->tmass,
1783 enerd, nullptr, nullptr, nullBox, PTCouplingArrays(), 0,
1784 nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
1785
1786 EnergyOutput::printHeader(fplog, step, step);
1787 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fplog, step,
1788 step, fr->fcdata.get(), nullptr);
1789 }
1790
1791 /* Set the initial step.
1792 * since it will be multiplied by the non-normalized search direction
1793 * vector (force vector the first time), we scale it by the
1794 * norm of the force.
1795 */
1796
1797 if (MASTER(cr))
1798 {
1799 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1800 fprintf(stderr, "Using %d BFGS correction steps.\n\n", nmaxcorr);
1801 fprintf(stderr, " F-max = %12.5e on atom %d\n", ems.fmax, ems.a_fmax + 1);
1802 fprintf(stderr, " F-Norm = %12.5e\n", ems.fnorm / sqrtNumAtoms);
1803 fprintf(stderr, "\n");
1804 /* and copy to the log file too... */
1805 fprintf(fplog, "Using %d BFGS correction steps.\n\n", nmaxcorr);
1806 fprintf(fplog, " F-max = %12.5e on atom %d\n", ems.fmax, ems.a_fmax + 1);
1807 fprintf(fplog, " F-Norm = %12.5e\n", ems.fnorm / sqrtNumAtoms);
1808 fprintf(fplog, "\n");
1809 }
1810
1811 // Point is an index to the memory of search directions, where 0 is the first one.
1812 point = 0;
1813
1814 // Set initial search direction to the force (-gradient), or 0 for frozen particles.
1815 real* fInit = static_cast<real*>(ems.f.view().force().data()[0]);
1816 for (i = 0; i < n; i++)
1817 {
1818 if (!frozen[i])
1819 {
1820 dx[point][i] = fInit[i]; /* Initial search direction */
1821 }
1822 else
1823 {
1824 dx[point][i] = 0;
1825 }
1826 }
1827
1828 // Stepsize will be modified during the search, and actually it is not critical
1829 // (the main efficiency in the algorithm comes from changing directions), but
1830 // we still need an initial value, so estimate it as the inverse of the norm
1831 // so we take small steps where the potential fluctuates a lot.
1832 stepsize = 1.0 / ems.fnorm;
1833
1834 /* Start the loop over BFGS steps.
1835 * Each successful step is counted, and we continue until
1836 * we either converge or reach the max number of steps.
1837 */
1838
1839 ncorr = 0;
1840
1841 /* Set the gradient from the force */
1842 converged = FALSE;
1843 for (step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
1844 {
1845
1846 /* Write coordinates if necessary */
1847 do_x = do_per_step(step, inputrec->nstxout);
1848 do_f = do_per_step(step, inputrec->nstfout);
1849
1850 mdof_flags = 0;
1851 if (do_x)
1852 {
1853 mdof_flags |= MDOF_X;
1854 }
1855
1856 if (do_f)
1857 {
1858 mdof_flags |= MDOF_F;
1859 }
1860
1861 if (inputrec->bIMD)
1862 {
1863 mdof_flags |= MDOF_IMD;
1864 }
1865
1866 gmx::WriteCheckpointDataHolder checkpointDataHolder;
1867 mdoutf_write_to_trajectory_files(fplog, cr, outf, mdof_flags, top_global->natoms, step,
1868 static_cast<real>(step), &ems.s, state_global, observablesHistory,
1869 ems.f.view().force(), &checkpointDataHolder);
1870
1871 /* Do the linesearching in the direction dx[point][0..(n-1)] */
1872
1873 /* make s a pointer to current search direction - point=0 first time we get here */
1874 s = dx[point];
1875
1876 real* xx = static_cast<real*>(ems.s.x.rvec_array()[0]);
1877 real* ff = static_cast<real*>(ems.f.view().force().data()[0]);
1878
1879 // calculate line gradient in position A
1880 for (gpa = 0, i = 0; i < n; i++)
1881 {
1882 gpa -= s[i] * ff[i];
1883 }
1884
1885 /* Calculate minimum allowed stepsize along the line, before the average (norm)
1886 * relative change in coordinate is smaller than precision
1887 */
1888 for (minstep = 0, i = 0; i < n; i++)
1889 {
1890 tmp = fabs(xx[i]);
1891 if (tmp < 1.0)
1892 {
1893 tmp = 1.0;
1894 }
1895 tmp = s[i] / tmp;
1896 minstep += tmp * tmp;
1897 }
1898 minstep = GMX_REAL_EPS / sqrt(minstep / n);
1899
1900 if (stepsize < minstep)
1901 {
1902 converged = TRUE;
1903 break;
1904 }
1905
1906 // Before taking any steps along the line, store the old position
1907 *last = ems;
1908 real* lastx = static_cast<real*>(last->s.x.data()[0]);
1909 real* lastf = static_cast<real*>(last->f.view().force().data()[0]);
1910 Epot0 = ems.epot;
1911
1912 *sa = ems;
1913
1914 /* Take a step downhill.
1915 * In theory, we should find the actual minimum of the function in this
1916 * direction, somewhere along the line.
1917 * That is quite possible, but it turns out to take 5-10 function evaluations
1918 * for each line. However, we dont really need to find the exact minimum -
1919 * it is much better to start a new BFGS step in a modified direction as soon
1920 * as we are close to it. This will save a lot of energy evaluations.
1921 *
1922 * In practice, we just try to take a single step.
1923 * If it worked (i.e. lowered the energy), we increase the stepsize but
1924 * continue straight to the next BFGS step without trying to find any minimum,
1925 * i.e. we change the search direction too. If the line was smooth, it is
1926 * likely we are in a smooth region, and then it makes sense to take longer
1927 * steps in the modified search direction too.
1928 *
1929 * If it didn't work (higher energy), there must be a minimum somewhere between
1930 * the old position and the new one. Then we need to start by finding a lower
1931 * value before we change search direction. Since the energy was apparently
1932 * quite rough, we need to decrease the step size.
1933 *
1934 * Due to the finite numerical accuracy, it turns out that it is a good idea
1935 * to accept a SMALL increase in energy, if the derivative is still downhill.
1936 * This leads to lower final energies in the tests I've done. / Erik
1937 */
1938
1939 // State "A" is the first position along the line.
1940 // reference position along line is initially zero
1941 a = 0.0;
1942
1943 // Check stepsize first. We do not allow displacements
1944 // larger than emstep.
1945 //
1946 do
1947 {
1948 // Pick a new position C by adding stepsize to A.
1949 c = a + stepsize;
1950
1951 // Calculate what the largest change in any individual coordinate
1952 // would be (translation along line * gradient along line)
1953 maxdelta = 0;
1954 for (i = 0; i < n; i++)
1955 {
1956 delta = c * s[i];
1957 if (delta > maxdelta)
1958 {
1959 maxdelta = delta;
1960 }
1961 }
1962 // If any displacement is larger than the stepsize limit, reduce the step
1963 if (maxdelta > inputrec->em_stepsize)
1964 {
1965 stepsize *= 0.1;
1966 }
1967 } while (maxdelta > inputrec->em_stepsize);
1968
1969 // Take a trial step and move the coordinate array xc[] to position C
1970 real* xc = static_cast<real*>(sc->s.x.rvec_array()[0]);
1971 for (i = 0; i < n; i++)
1972 {
1973 xc[i] = lastx[i] + c * s[i];
1974 }
1975
1976 neval++;
1977 // Calculate energy for the trial step in position C
1978 energyEvaluator.run(sc, mu_tot, vir, pres, step, FALSE);
1979
1980 // Calc line gradient in position C
1981 real* fc = static_cast<real*>(sc->f.view().force()[0]);
1982 for (gpc = 0, i = 0; i < n; i++)
1983 {
1984 gpc -= s[i] * fc[i]; /* f is negative gradient, thus the sign */
1985 }
1986 /* Sum the gradient along the line across CPUs */
1987 if (PAR(cr))
1988 {
1989 gmx_sumd(1, &gpc, cr);
1990 }
1991
1992 // This is the max amount of increase in energy we tolerate.
1993 // By allowing VERY small changes (close to numerical precision) we
1994 // frequently find even better (lower) final energies.
1995 tmp = std::sqrt(GMX_REAL_EPS) * fabs(sa->epot);
1996
1997 // Accept the step if the energy is lower in the new position C (compared to A),
1998 // or if it is not significantly higher and the line derivative is still negative.
1999 foundlower = sc->epot < sa->epot || (gpc < 0 && sc->epot < (sa->epot + tmp));
2000 // If true, great, we found a better energy. We no longer try to alter the
2001 // stepsize, but simply accept this new better position. The we select a new
2002 // search direction instead, which will be much more efficient than continuing
2003 // to take smaller steps along a line. Set fnorm based on the new C position,
2004 // which will be used to update the stepsize to 1/fnorm further down.
2005
2006 // If false, the energy is NOT lower in point C, i.e. it will be the same
2007 // or higher than in point A. In this case it is pointless to move to point C,
2008 // so we will have to do more iterations along the same line to find a smaller
2009 // value in the interval [A=0.0,C].
2010 // Here, A is still 0.0, but that will change when we do a search in the interval
2011 // [0.0,C] below. That search we will do by interpolation or bisection rather
2012 // than with the stepsize, so no need to modify it. For the next search direction
2013 // it will be reset to 1/fnorm anyway.
2014
2015 if (!foundlower)
2016 {
2017 // OK, if we didn't find a lower value we will have to locate one now - there must
2018 // be one in the interval [a,c].
2019 // The same thing is valid here, though: Don't spend dozens of iterations to find
2020 // the line minimum. We try to interpolate based on the derivative at the endpoints,
2021 // and only continue until we find a lower value. In most cases this means 1-2 iterations.
2022 // I also have a safeguard for potentially really pathological functions so we never
2023 // take more than 20 steps before we give up.
2024 // If we already found a lower value we just skip this step and continue to the update.
2025 real fnorm = 0;
2026 nminstep = 0;
2027 do
2028 {
2029 // Select a new trial point B in the interval [A,C].
2030 // If the derivatives at points a & c have different sign we interpolate to zero,
2031 // otherwise just do a bisection since there might be multiple minima/maxima
2032 // inside the interval.
2033 if (gpa < 0 && gpc > 0)
2034 {
2035 b = a + gpa * (a - c) / (gpc - gpa);
2036 }
2037 else
2038 {
2039 b = 0.5 * (a + c);
2040 }
2041
2042 /* safeguard if interpolation close to machine accuracy causes errors:
2043 * never go outside the interval
2044 */
2045 if (b <= a || b >= c)
2046 {
2047 b = 0.5 * (a + c);
2048 }
2049
2050 // Take a trial step to point B
2051 real* xb = static_cast<real*>(sb->s.x.rvec_array()[0]);
2052 for (i = 0; i < n; i++)
2053 {
2054 xb[i] = lastx[i] + b * s[i];
2055 }
2056
2057 neval++;
2058 // Calculate energy for the trial step in point B
2059 energyEvaluator.run(sb, mu_tot, vir, pres, step, FALSE);
2060 fnorm = sb->fnorm;
2061
2062 // Calculate gradient in point B
2063 real* fb = static_cast<real*>(sb->f.view().force()[0]);
2064 for (gpb = 0, i = 0; i < n; i++)
2065 {
2066 gpb -= s[i] * fb[i]; /* f is negative gradient, thus the sign */
2067 }
2068 /* Sum the gradient along the line across CPUs */
2069 if (PAR(cr))
2070 {
2071 gmx_sumd(1, &gpb, cr);
2072 }
2073
2074 // Keep one of the intervals [A,B] or [B,C] based on the value of the derivative
2075 // at the new point B, and rename the endpoints of this new interval A and C.
2076 if (gpb > 0)
2077 {
2078 /* Replace c endpoint with b */
2079 c = b;
2080 /* copy state b to c */
2081 *sc = *sb;
2082 }
2083 else
2084 {
2085 /* Replace a endpoint with b */
2086 a = b;
2087 /* copy state b to a */
2088 *sa = *sb;
2089 }
2090
2091 /*
2092 * Stop search as soon as we find a value smaller than the endpoints,
2093 * or if the tolerance is below machine precision.
2094 * Never run more than 20 steps, no matter what.
2095 */
2096 nminstep++;
2097 } while ((sb->epot > sa->epot || sb->epot > sc->epot) && (nminstep < 20));
2098
2099 if (std::fabs(sb->epot - Epot0) < GMX_REAL_EPS || nminstep >= 20)
2100 {
2101 /* OK. We couldn't find a significantly lower energy.
2102 * If ncorr==0 this was steepest descent, and then we give up.
2103 * If not, reset memory to restart as steepest descent before quitting.
2104 */
2105 if (ncorr == 0)
2106 {
2107 /* Converged */
2108 converged = TRUE;
2109 break;
2110 }
2111 else
2112 {
2113 /* Reset memory */
2114 ncorr = 0;
2115 /* Search in gradient direction */
2116 for (i = 0; i < n; i++)
2117 {
2118 dx[point][i] = ff[i];
2119 }
2120 /* Reset stepsize */
2121 stepsize = 1.0 / fnorm;
2122 continue;
2123 }
2124 }
2125
2126 /* Select min energy state of A & C, put the best in xx/ff/Epot
2127 */
2128 if (sc->epot < sa->epot)
2129 {
2130 /* Use state C */
2131 ems = *sc;
2132 step_taken = c;
2133 }
2134 else
2135 {
2136 /* Use state A */
2137 ems = *sa;
2138 step_taken = a;
2139 }
2140 }
2141 else
2142 {
2143 /* found lower */
2144 /* Use state C */
2145 ems = *sc;
2146 step_taken = c;
2147 }
2148
2149 /* Update the memory information, and calculate a new
2150 * approximation of the inverse hessian
2151 */
2152
2153 /* Have new data in Epot, xx, ff */
2154 if (ncorr < nmaxcorr)
2155 {
2156 ncorr++;
2157 }
2158
2159 for (i = 0; i < n; i++)
2160 {
2161 dg[point][i] = lastf[i] - ff[i];
2162 dx[point][i] *= step_taken;
2163 }
2164
2165 dgdg = 0;
2166 dgdx = 0;
2167 for (i = 0; i < n; i++)
2168 {
2169 dgdg += dg[point][i] * dg[point][i];
2170 dgdx += dg[point][i] * dx[point][i];
2171 }
2172
2173 diag = dgdx / dgdg;
2174
2175 rho[point] = 1.0 / dgdx;
2176 point++;
2177
2178 if (point >= nmaxcorr)
2179 {
2180 point = 0;
2181 }
2182
2183 /* Update */
2184 for (i = 0; i < n; i++)
2185 {
2186 p[i] = ff[i];
2187 }
2188
2189 cp = point;
2190
2191 /* Recursive update. First go back over the memory points */
2192 for (k = 0; k < ncorr; k++)
2193 {
2194 cp--;
2195 if (cp < 0)
2196 {
2197 cp = ncorr - 1;
2198 }
2199
2200 sq = 0;
2201 for (i = 0; i < n; i++)
2202 {
2203 sq += dx[cp][i] * p[i];
2204 }
2205
2206 alpha[cp] = rho[cp] * sq;
2207
2208 for (i = 0; i < n; i++)
2209 {
2210 p[i] -= alpha[cp] * dg[cp][i];
2211 }
2212 }
2213
2214 for (i = 0; i < n; i++)
2215 {
2216 p[i] *= diag;
2217 }
2218
2219 /* And then go forward again */
2220 for (k = 0; k < ncorr; k++)
2221 {
2222 yr = 0;
2223 for (i = 0; i < n; i++)
2224 {
2225 yr += p[i] * dg[cp][i];
2226 }
2227
2228 beta = rho[cp] * yr;
2229 beta = alpha[cp] - beta;
2230
2231 for (i = 0; i < n; i++)
2232 {
2233 p[i] += beta * dx[cp][i];
2234 }
2235
2236 cp++;
2237 if (cp >= ncorr)
2238 {
2239 cp = 0;
2240 }
2241 }
2242
2243 for (i = 0; i < n; i++)
2244 {
2245 if (!frozen[i])
2246 {
2247 dx[point][i] = p[i];
2248 }
2249 else
2250 {
2251 dx[point][i] = 0;
2252 }
2253 }
2254
2255 /* Print it if necessary */
2256 if (MASTER(cr))
2257 {
2258 if (mdrunOptions.verbose)
2259 {
2260 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2261 fprintf(stderr, "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n", step,
2262 ems.epot, ems.fnorm / sqrtNumAtoms, ems.fmax, ems.a_fmax + 1);
2263 fflush(stderr);
2264 }
2265 /* Store the new (lower) energies */
2266 matrix nullBox = {};
2267 energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(step), mdatoms->tmass,
2268 enerd, nullptr, nullptr, nullBox, PTCouplingArrays(), 0,
2269 nullptr, nullptr, vir, pres, nullptr, mu_tot, constr);
2270
2271 do_log = do_per_step(step, inputrec->nstlog);
2272 do_ene = do_per_step(step, inputrec->nstenergy);
2273
2274 imdSession->fillEnergyRecord(step, TRUE);
2275
2276 if (do_log)
2277 {
2278 EnergyOutput::printHeader(fplog, step, step);
2279 }
2280 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), do_ene, FALSE, FALSE,
2281 do_log ? fplog : nullptr, step, step,
2282 fr->fcdata.get(), nullptr);
2283 }
2284
2285 /* Send x and E to IMD client, if bIMD is TRUE. */
2286 if (imdSession->run(step, TRUE, state_global->box, state_global->x.rvec_array(), 0) && MASTER(cr))
2287 {
2288 imdSession->sendPositionsAndEnergies();
2289 }
2290
2291 // Reset stepsize in we are doing more iterations
2292 stepsize = 1.0;
2293
2294 /* Stop when the maximum force lies below tolerance.
2295 * If we have reached machine precision, converged is already set to true.
2296 */
2297 converged = converged || (ems.fmax < inputrec->em_tol);
2298
2299 } /* End of the loop */
2300
2301 if (converged)
2302 {
2303 step--; /* we never took that last step in this case */
2304 }
2305 if (ems.fmax > inputrec->em_tol)
2306 {
2307 if (MASTER(cr))
2308 {
2309 warn_step(fplog, inputrec->em_tol, ems.fmax, step - 1 == number_steps, FALSE);
2310 }
2311 converged = FALSE;
2312 }
2313
2314 /* If we printed energy and/or logfile last step (which was the last step)
2315 * we don't have to do it again, but otherwise print the final values.
2316 */
2317 if (!do_log) /* Write final value to log since we didn't do anythin last step */
2318 {
2319 EnergyOutput::printHeader(fplog, step, step);
2320 }
2321 if (!do_ene || !do_log) /* Write final energy file entries */
2322 {
2323 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), !do_ene, FALSE, FALSE,
2324 !do_log ? fplog : nullptr, step, step, fr->fcdata.get(),
2325 nullptr);
2326 }
2327
2328 /* Print some stuff... */
2329 if (MASTER(cr))
2330 {
2331 fprintf(stderr, "\nwriting lowest energy coordinates.\n");
2332 }
2333
2334 /* IMPORTANT!
2335 * For accurate normal mode calculation it is imperative that we
2336 * store the last conformation into the full precision binary trajectory.
2337 *
2338 * However, we should only do it if we did NOT already write this step
2339 * above (which we did if do_x or do_f was true).
2340 */
2341 do_x = !do_per_step(step, inputrec->nstxout);
2342 do_f = !do_per_step(step, inputrec->nstfout);
2343 write_em_traj(fplog, cr, outf, do_x, do_f, ftp2fn(efSTO, nfile, fnm), top_global, inputrec,
2344 step, &ems, state_global, observablesHistory);
2345
2346 if (MASTER(cr))
2347 {
2348 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2349 print_converged(stderr, LBFGS, inputrec->em_tol, step, converged, number_steps, &ems, sqrtNumAtoms);
2350 print_converged(fplog, LBFGS, inputrec->em_tol, step, converged, number_steps, &ems, sqrtNumAtoms);
2351
2352 fprintf(fplog, "\nPerformed %d energy evaluations in total.\n", neval);
2353 }
2354
2355 finish_em(cr, outf, walltime_accounting, wcycle);
2356
2357 /* To print the actual number of steps we needed somewhere */
2358 walltime_accounting_set_nsteps_done(walltime_accounting, step);
2359 }
2360
do_steep()2361 void LegacySimulator::do_steep()
2362 {
2363 const char* SD = "Steepest Descents";
2364 gmx_localtop_t top(top_global->ffparams);
2365 gmx_global_stat_t gstat;
2366 real stepsize;
2367 real ustep;
2368 gmx_bool bDone, bAbort, do_x, do_f;
2369 tensor vir, pres;
2370 rvec mu_tot = { 0 };
2371 int nsteps;
2372 int count = 0;
2373 int steps_accepted = 0;
2374 auto mdatoms = mdAtoms->mdatoms();
2375
2376 GMX_LOG(mdlog.info)
2377 .asParagraph()
2378 .appendText(
2379 "Note that activating steepest-descent energy minimization via the "
2380 "integrator .mdp option and the command gmx mdrun may "
2381 "be available in a different form in a future version of GROMACS, "
2382 "e.g. gmx minimize and an .mdp option.");
2383
2384 /* Create 2 states on the stack and extract pointers that we will swap */
2385 em_state_t s0{}, s1{};
2386 em_state_t* s_min = &s0;
2387 em_state_t* s_try = &s1;
2388
2389 /* Init em and store the local state in s_try */
2390 init_em(fplog, mdlog, SD, cr, inputrec, imdSession, pull_work, state_global, top_global, s_try,
2391 &top, nrnb, fr, mdAtoms, &gstat, vsite, constr, nullptr);
2392 const bool simulationsShareState = false;
2393 gmx_mdoutf* outf = init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider,
2394 mdModulesNotifier, inputrec, top_global, nullptr, wcycle,
2395 StartingBehavior::NewSimulation, simulationsShareState, ms);
2396 gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf), top_global, inputrec, pull_work,
2397 nullptr, false, StartingBehavior::NewSimulation,
2398 simulationsShareState, mdModulesNotifier);
2399
2400 /* Print to log file */
2401 print_em_start(fplog, cr, walltime_accounting, wcycle, SD);
2402
2403 /* Set variables for stepsize (in nm). This is the largest
2404 * step that we are going to make in any direction.
2405 */
2406 ustep = inputrec->em_stepsize;
2407 stepsize = 0;
2408
2409 /* Max number of steps */
2410 nsteps = inputrec->nsteps;
2411
2412 if (MASTER(cr))
2413 {
2414 /* Print to the screen */
2415 sp_header(stderr, SD, inputrec->em_tol, nsteps);
2416 }
2417 if (fplog)
2418 {
2419 sp_header(fplog, SD, inputrec->em_tol, nsteps);
2420 }
2421 EnergyEvaluator energyEvaluator{ fplog, mdlog, cr, ms, top_global, &top,
2422 inputrec, imdSession, pull_work, nrnb, wcycle, gstat,
2423 vsite, constr, mdAtoms, fr, runScheduleWork, enerd };
2424
2425 /**** HERE STARTS THE LOOP ****
2426 * count is the counter for the number of steps
2427 * bDone will be TRUE when the minimization has converged
2428 * bAbort will be TRUE when nsteps steps have been performed or when
2429 * the stepsize becomes smaller than is reasonable for machine precision
2430 */
2431 count = 0;
2432 bDone = FALSE;
2433 bAbort = FALSE;
2434 while (!bDone && !bAbort)
2435 {
2436 bAbort = (nsteps >= 0) && (count == nsteps);
2437
2438 /* set new coordinates, except for first step */
2439 bool validStep = true;
2440 if (count > 0)
2441 {
2442 validStep = do_em_step(cr, inputrec, mdatoms, s_min, stepsize,
2443 s_min->f.view().forceWithPadding(), s_try, constr, count);
2444 }
2445
2446 if (validStep)
2447 {
2448 energyEvaluator.run(s_try, mu_tot, vir, pres, count, count == 0);
2449 }
2450 else
2451 {
2452 // Signal constraint error during stepping with energy=inf
2453 s_try->epot = std::numeric_limits<real>::infinity();
2454 }
2455
2456 if (MASTER(cr))
2457 {
2458 EnergyOutput::printHeader(fplog, count, count);
2459 }
2460
2461 if (count == 0)
2462 {
2463 s_min->epot = s_try->epot;
2464 }
2465
2466 /* Print it if necessary */
2467 if (MASTER(cr))
2468 {
2469 if (mdrunOptions.verbose)
2470 {
2471 fprintf(stderr, "Step=%5d, Dmax= %6.1e nm, Epot= %12.5e Fmax= %11.5e, atom= %d%c",
2472 count, ustep, s_try->epot, s_try->fmax, s_try->a_fmax + 1,
2473 ((count == 0) || (s_try->epot < s_min->epot)) ? '\n' : '\r');
2474 fflush(stderr);
2475 }
2476
2477 if ((count == 0) || (s_try->epot < s_min->epot))
2478 {
2479 /* Store the new (lower) energies */
2480 matrix nullBox = {};
2481 energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(count),
2482 mdatoms->tmass, enerd, nullptr, nullptr, nullBox,
2483 PTCouplingArrays(), 0, nullptr, nullptr, vir, pres,
2484 nullptr, mu_tot, constr);
2485
2486 imdSession->fillEnergyRecord(count, TRUE);
2487
2488 const bool do_dr = do_per_step(steps_accepted, inputrec->nstdisreout);
2489 const bool do_or = do_per_step(steps_accepted, inputrec->nstorireout);
2490 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), TRUE, do_dr, do_or,
2491 fplog, count, count, fr->fcdata.get(), nullptr);
2492 fflush(fplog);
2493 }
2494 }
2495
2496 /* Now if the new energy is smaller than the previous...
2497 * or if this is the first step!
2498 * or if we did random steps!
2499 */
2500
2501 if ((count == 0) || (s_try->epot < s_min->epot))
2502 {
2503 steps_accepted++;
2504
2505 /* Test whether the convergence criterion is met... */
2506 bDone = (s_try->fmax < inputrec->em_tol);
2507
2508 /* Copy the arrays for force, positions and energy */
2509 /* The 'Min' array always holds the coords and forces of the minimal
2510 sampled energy */
2511 swap_em_state(&s_min, &s_try);
2512 if (count > 0)
2513 {
2514 ustep *= 1.2;
2515 }
2516
2517 /* Write to trn, if necessary */
2518 do_x = do_per_step(steps_accepted, inputrec->nstxout);
2519 do_f = do_per_step(steps_accepted, inputrec->nstfout);
2520 write_em_traj(fplog, cr, outf, do_x, do_f, nullptr, top_global, inputrec, count, s_min,
2521 state_global, observablesHistory);
2522 }
2523 else
2524 {
2525 /* If energy is not smaller make the step smaller... */
2526 ustep *= 0.5;
2527
2528 if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
2529 {
2530 /* Reload the old state */
2531 em_dd_partition_system(fplog, mdlog, count, cr, top_global, inputrec, imdSession,
2532 pull_work, s_min, &top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
2533 }
2534 }
2535
2536 // If the force is very small after finishing minimization,
2537 // we risk dividing by zero when calculating the step size.
2538 // So we check first if the minimization has stopped before
2539 // trying to obtain a new step size.
2540 if (!bDone)
2541 {
2542 /* Determine new step */
2543 stepsize = ustep / s_min->fmax;
2544 }
2545
2546 /* Check if stepsize is too small, with 1 nm as a characteristic length */
2547 #if GMX_DOUBLE
2548 if (count == nsteps || ustep < 1e-12)
2549 #else
2550 if (count == nsteps || ustep < 1e-6)
2551 #endif
2552 {
2553 if (MASTER(cr))
2554 {
2555 warn_step(fplog, inputrec->em_tol, s_min->fmax, count == nsteps, constr != nullptr);
2556 }
2557 bAbort = TRUE;
2558 }
2559
2560 /* Send IMD energies and positions, if bIMD is TRUE. */
2561 if (imdSession->run(count, TRUE, MASTER(cr) ? state_global->box : nullptr,
2562 MASTER(cr) ? state_global->x.rvec_array() : nullptr, 0)
2563 && MASTER(cr))
2564 {
2565 imdSession->sendPositionsAndEnergies();
2566 }
2567
2568 count++;
2569 } /* End of the loop */
2570
2571 /* Print some data... */
2572 if (MASTER(cr))
2573 {
2574 fprintf(stderr, "\nwriting lowest energy coordinates.\n");
2575 }
2576 write_em_traj(fplog, cr, outf, TRUE, inputrec->nstfout != 0, ftp2fn(efSTO, nfile, fnm),
2577 top_global, inputrec, count, s_min, state_global, observablesHistory);
2578
2579 if (MASTER(cr))
2580 {
2581 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2582
2583 print_converged(stderr, SD, inputrec->em_tol, count, bDone, nsteps, s_min, sqrtNumAtoms);
2584 print_converged(fplog, SD, inputrec->em_tol, count, bDone, nsteps, s_min, sqrtNumAtoms);
2585 }
2586
2587 finish_em(cr, outf, walltime_accounting, wcycle);
2588
2589 /* To print the actual number of steps we needed somewhere */
2590 inputrec->nsteps = count;
2591
2592 walltime_accounting_set_nsteps_done(walltime_accounting, count);
2593 }
2594
do_nm()2595 void LegacySimulator::do_nm()
2596 {
2597 const char* NM = "Normal Mode Analysis";
2598 int nnodes;
2599 gmx_localtop_t top(top_global->ffparams);
2600 gmx_global_stat_t gstat;
2601 tensor vir, pres;
2602 rvec mu_tot = { 0 };
2603 rvec* dfdx;
2604 gmx_bool bSparse; /* use sparse matrix storage format */
2605 size_t sz;
2606 gmx_sparsematrix_t* sparse_matrix = nullptr;
2607 real* full_matrix = nullptr;
2608
2609 /* added with respect to mdrun */
2610 int row, col;
2611 real der_range = 10.0 * std::sqrt(GMX_REAL_EPS);
2612 real x_min;
2613 bool bIsMaster = MASTER(cr);
2614 auto mdatoms = mdAtoms->mdatoms();
2615
2616 GMX_LOG(mdlog.info)
2617 .asParagraph()
2618 .appendText(
2619 "Note that activating normal-mode analysis via the integrator "
2620 ".mdp option and the command gmx mdrun may "
2621 "be available in a different form in a future version of GROMACS, "
2622 "e.g. gmx normal-modes.");
2623
2624 if (constr != nullptr)
2625 {
2626 gmx_fatal(
2627 FARGS,
2628 "Constraints present with Normal Mode Analysis, this combination is not supported");
2629 }
2630
2631 gmx_shellfc_t* shellfc;
2632
2633 em_state_t state_work{};
2634
2635 /* Init em and store the local state in state_minimum */
2636 init_em(fplog, mdlog, NM, cr, inputrec, imdSession, pull_work, state_global, top_global,
2637 &state_work, &top, nrnb, fr, mdAtoms, &gstat, vsite, constr, &shellfc);
2638 const bool simulationsShareState = false;
2639 gmx_mdoutf* outf = init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider,
2640 mdModulesNotifier, inputrec, top_global, nullptr, wcycle,
2641 StartingBehavior::NewSimulation, simulationsShareState, ms);
2642
2643 std::vector<int> atom_index = get_atom_index(top_global);
2644 std::vector<gmx::RVec> fneg(atom_index.size(), { 0, 0, 0 });
2645 snew(dfdx, atom_index.size());
2646
2647 #if !GMX_DOUBLE
2648 if (bIsMaster)
2649 {
2650 fprintf(stderr,
2651 "NOTE: This version of GROMACS has been compiled in single precision,\n"
2652 " which MIGHT not be accurate enough for normal mode analysis.\n"
2653 " GROMACS now uses sparse matrix storage, so the memory requirements\n"
2654 " are fairly modest even if you recompile in double precision.\n\n");
2655 }
2656 #endif
2657
2658 /* Check if we can/should use sparse storage format.
2659 *
2660 * Sparse format is only useful when the Hessian itself is sparse, which it
2661 * will be when we use a cutoff.
2662 * For small systems (n<1000) it is easier to always use full matrix format, though.
2663 */
2664 if (EEL_FULL(fr->ic->eeltype) || fr->rlist == 0.0)
2665 {
2666 GMX_LOG(mdlog.warning)
2667 .appendText("Non-cutoff electrostatics used, forcing full Hessian format.");
2668 bSparse = FALSE;
2669 }
2670 else if (atom_index.size() < 1000)
2671 {
2672 GMX_LOG(mdlog.warning)
2673 .appendTextFormatted("Small system size (N=%zu), using full Hessian format.",
2674 atom_index.size());
2675 bSparse = FALSE;
2676 }
2677 else
2678 {
2679 GMX_LOG(mdlog.warning).appendText("Using compressed symmetric sparse Hessian format.");
2680 bSparse = TRUE;
2681 }
2682
2683 /* Number of dimensions, based on real atoms, that is not vsites or shell */
2684 sz = DIM * atom_index.size();
2685
2686 fprintf(stderr, "Allocating Hessian memory...\n\n");
2687
2688 if (bSparse)
2689 {
2690 sparse_matrix = gmx_sparsematrix_init(sz);
2691 sparse_matrix->compressed_symmetric = TRUE;
2692 }
2693 else
2694 {
2695 snew(full_matrix, sz * sz);
2696 }
2697
2698 /* Write start time and temperature */
2699 print_em_start(fplog, cr, walltime_accounting, wcycle, NM);
2700
2701 /* fudge nr of steps to nr of atoms */
2702 inputrec->nsteps = atom_index.size() * 2;
2703
2704 if (bIsMaster)
2705 {
2706 fprintf(stderr, "starting normal mode calculation '%s'\n%" PRId64 " steps.\n\n",
2707 *(top_global->name), inputrec->nsteps);
2708 }
2709
2710 nnodes = cr->nnodes;
2711
2712 /* Make evaluate_energy do a single node force calculation */
2713 cr->nnodes = 1;
2714 EnergyEvaluator energyEvaluator{ fplog, mdlog, cr, ms, top_global, &top,
2715 inputrec, imdSession, pull_work, nrnb, wcycle, gstat,
2716 vsite, constr, mdAtoms, fr, runScheduleWork, enerd };
2717 energyEvaluator.run(&state_work, mu_tot, vir, pres, -1, TRUE);
2718 cr->nnodes = nnodes;
2719
2720 /* if forces are not small, warn user */
2721 get_state_f_norm_max(cr, &(inputrec->opts), mdatoms, &state_work);
2722
2723 GMX_LOG(mdlog.warning).appendTextFormatted("Maximum force:%12.5e", state_work.fmax);
2724 if (state_work.fmax > 1.0e-3)
2725 {
2726 GMX_LOG(mdlog.warning)
2727 .appendText(
2728 "The force is probably not small enough to "
2729 "ensure that you are at a minimum.\n"
2730 "Be aware that negative eigenvalues may occur\n"
2731 "when the resulting matrix is diagonalized.");
2732 }
2733
2734 /***********************************************************
2735 *
2736 * Loop over all pairs in matrix
2737 *
2738 * do_force called twice. Once with positive and
2739 * once with negative displacement
2740 *
2741 ************************************************************/
2742
2743 /* Steps are divided one by one over the nodes */
2744 bool bNS = true;
2745 auto state_work_x = makeArrayRef(state_work.s.x);
2746 auto state_work_f = state_work.f.view().force();
2747 for (index aid = cr->nodeid; aid < ssize(atom_index); aid += nnodes)
2748 {
2749 size_t atom = atom_index[aid];
2750 for (size_t d = 0; d < DIM; d++)
2751 {
2752 int64_t step = 0;
2753 int force_flags = GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES;
2754 double t = 0;
2755
2756 x_min = state_work_x[atom][d];
2757
2758 for (unsigned int dx = 0; (dx < 2); dx++)
2759 {
2760 if (dx == 0)
2761 {
2762 state_work_x[atom][d] = x_min - der_range;
2763 }
2764 else
2765 {
2766 state_work_x[atom][d] = x_min + der_range;
2767 }
2768
2769 /* Make evaluate_energy do a single node force calculation */
2770 cr->nnodes = 1;
2771 if (shellfc)
2772 {
2773 /* Now is the time to relax the shells */
2774 relax_shell_flexcon(fplog, cr, ms, mdrunOptions.verbose, nullptr, step, inputrec,
2775 imdSession, pull_work, bNS, force_flags, &top, constr, enerd,
2776 state_work.s.natoms, state_work.s.x.arrayRefWithPadding(),
2777 state_work.s.v.arrayRefWithPadding(), state_work.s.box,
2778 state_work.s.lambda, &state_work.s.hist, &state_work.f.view(),
2779 vir, mdatoms, nrnb, wcycle, shellfc, fr, runScheduleWork, t,
2780 mu_tot, vsite, DDBalanceRegionHandler(nullptr));
2781 bNS = false;
2782 step++;
2783 }
2784 else
2785 {
2786 energyEvaluator.run(&state_work, mu_tot, vir, pres, aid * 2 + dx, FALSE);
2787 }
2788
2789 cr->nnodes = nnodes;
2790
2791 if (dx == 0)
2792 {
2793 std::copy(state_work_f.begin(), state_work_f.begin() + atom_index.size(),
2794 fneg.begin());
2795 }
2796 }
2797
2798 /* x is restored to original */
2799 state_work_x[atom][d] = x_min;
2800
2801 for (size_t j = 0; j < atom_index.size(); j++)
2802 {
2803 for (size_t k = 0; (k < DIM); k++)
2804 {
2805 dfdx[j][k] = -(state_work_f[atom_index[j]][k] - fneg[j][k]) / (2 * der_range);
2806 }
2807 }
2808
2809 if (!bIsMaster)
2810 {
2811 #if GMX_MPI
2812 # define mpi_type GMX_MPI_REAL
2813 MPI_Send(dfdx[0], atom_index.size() * DIM, mpi_type, MASTER(cr), cr->nodeid,
2814 cr->mpi_comm_mygroup);
2815 #endif
2816 }
2817 else
2818 {
2819 for (index node = 0; (node < nnodes && aid + node < ssize(atom_index)); node++)
2820 {
2821 if (node > 0)
2822 {
2823 #if GMX_MPI
2824 MPI_Status stat;
2825 MPI_Recv(dfdx[0], atom_index.size() * DIM, mpi_type, node, node,
2826 cr->mpi_comm_mygroup, &stat);
2827 # undef mpi_type
2828 #endif
2829 }
2830
2831 row = (aid + node) * DIM + d;
2832
2833 for (size_t j = 0; j < atom_index.size(); j++)
2834 {
2835 for (size_t k = 0; k < DIM; k++)
2836 {
2837 col = j * DIM + k;
2838
2839 if (bSparse)
2840 {
2841 if (col >= row && dfdx[j][k] != 0.0)
2842 {
2843 gmx_sparsematrix_increment_value(sparse_matrix, row, col, dfdx[j][k]);
2844 }
2845 }
2846 else
2847 {
2848 full_matrix[row * sz + col] = dfdx[j][k];
2849 }
2850 }
2851 }
2852 }
2853 }
2854
2855 if (mdrunOptions.verbose && fplog)
2856 {
2857 fflush(fplog);
2858 }
2859 }
2860 /* write progress */
2861 if (bIsMaster && mdrunOptions.verbose)
2862 {
2863 fprintf(stderr, "\rFinished step %d out of %td",
2864 std::min<int>(atom + nnodes, atom_index.size()), ssize(atom_index));
2865 fflush(stderr);
2866 }
2867 }
2868
2869 if (bIsMaster)
2870 {
2871 fprintf(stderr, "\n\nWriting Hessian...\n");
2872 gmx_mtxio_write(ftp2fn(efMTX, nfile, fnm), sz, sz, full_matrix, sparse_matrix);
2873 }
2874
2875 finish_em(cr, outf, walltime_accounting, wcycle);
2876
2877 walltime_accounting_set_nsteps_done(walltime_accounting, atom_index.size() * 2);
2878 }
2879
2880 } // namespace gmx
2881