1 /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
2    Copyright (c) 2016-2018 The VES code team
3    (see the PEOPLE-VES file at the root of this folder for a list of names)
4 
5    See http://www.ves-code.org for more information.
6 
7    This file is part of VES code module.
8 
9    The VES code module is free software: you can redistribute it and/or modify
10    it under the terms of the GNU Lesser General Public License as published by
11    the Free Software Foundation, either version 3 of the License, or
12    (at your option) any later version.
13 
14    The VES code module is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU Lesser General Public License for more details.
18 
19    You should have received a copy of the GNU Lesser General Public License
20    along with the VES code module.  If not, see <http://www.gnu.org/licenses/>.
21 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
22 
23 #include "bias/Bias.h"
24 #include "core/PlumedMain.h"
25 #include "core/ActionRegister.h"
26 #include "core/Atoms.h"
27 #include "tools/Communicator.h"
28 #include "tools/Grid.h"
29 #include "tools/File.h"
30 //#include <algorithm> //std::fill
31 
32 namespace PLMD {
33 namespace ves {
34 
35 //+PLUMEDOC VES_BIAS VES_DELTA_F
36 /*
37 Implementation of VES\f$\Delta F\f$ method \cite Invernizzi2019vesdeltaf (step two only).
38 
39 \warning
40   Notice that this is a stand-alone bias Action, it does not need any of the other VES module components
41 
42 First you should create some estimate of the local free energy basins of your system,
43 using e.g. multiple \ref METAD short runs, and combining them with the \ref sum_hills utility.
44 Once you have them, you can use this bias Action to perform the VES optimization part of the method.
45 
46 These \f$N+1\f$ local basins are used to model the global free energy.
47 In particular, given the conditional probabilities \f$P(\mathbf{s}|i)\propto e^{-\beta F_i(\mathbf{s})}\f$
48 and the probabilities of being in a given basin \f$P_i\f$, we can write:
49 \f[
50   e^{-\beta F(\mathbf{s})}\propto P(\mathbf{s})=\sum_{i=0}^N P(\mathbf{s}|i)P_i \, .
51 \f]
52 We use this free energy model and the chosen bias factor \f$\gamma\f$ to build the bias potential:
53 \f$V(\mathbf{s})=-(1-1/\gamma)F(\mathbf{s})\f$.
54 Or, more explicitly:
55 \f[
56   V(\mathbf{s})=(1-1/\gamma)\frac{1}{\beta}\log\left[e^{-\beta F_0(\mathbf{s})}
57   +\sum_{i=1}^{N} e^{-\beta F_i(\mathbf{s})} e^{-\beta \alpha_i}\right] \, ,
58 \f]
59 where the parameters \f$\boldsymbol{\alpha}\f$ are the \f$N\f$ free energy differences (see below) from the \f$F_0\f$ basin.
60 
61 By default the \f$F_i(\mathbf{s})\f$ are shifted so that \f$\min[F_i(\mathbf{s})]=0\f$ for all \f$i=\{0,...,N\}\f$.
62 In this case the optimization parameters \f$\alpha_i\f$ are the difference in height between the minima of the basins.
63 Using the keyword `NORMALIZE`, you can also decide to normalize the local free energies so that
64 \f$\int d\mathbf{s}\, e^{-\beta F_i(\mathbf{s})}=1\f$.
65 In this case the parameters will represent not the difference in height (which depends on the chosen CVs),
66 but the actual free energy difference, \f$\alpha_i=\Delta F_i\f$.
67 
68 However, as discussed in Ref. \cite Invernizzi2019vesdeltaf, a better estimate of \f$\Delta F_i\f$ should be obtained through the reweighting procedure.
69 
70 \par Examples
71 
72 The following performs the optimization of the free energy difference between two metastable basins:
73 
74 \plumedfile
75 cv: DISTANCE ATOMS=1,2
76 ves: VES_DELTA_F ...
77   ARG=cv
78   TEMP=300
79   FILE_F0=fesA.data
80   FILE_F1=fesB.data
81   BIASFACTOR=10.0
82   M_STEP=0.1
83   AV_STRIDE=500
84   PRINT_STRIDE=100
85 ...
86 PRINT FMT=%g STRIDE=500 FILE=Colvar.data ARG=cv,ves.bias,ves.rct
87 \endplumedfile
88 
89 The local FES files can be obtained as described in Sec. 4.2 of Ref. \cite Invernizzi2019vesdeltaf, i.e. for example:
90 - run 4 independent metad runs, all starting from basin A, and kill them as soon as they make the first transition (see e.g. \ref COMMITTOR)
91 - \verbatim cat HILLS* > all_HILLS \endverbatim
92 - \verbatim plumed sum_hills --hills all_HILLS --outfile all_fesA.dat --mintozero --min 0 --max 1 --bin 100 \endverbatim
93 - \verbatim awk -v n_rep=4 '{if($1!="#!" && $1!="") {for(i=1+(NF-1)/2; i<=NF; i++) $i/=n_rep;} print $0}' all_fesA.dat > fesA.data \endverbatim
94 
95 The header of both FES files must be identical, and should be similar to the following:
96 
97 \auxfile{fesA.data}
98 #! FIELDS cv file.free der_cv
99 #! SET min_cv 0
100 #! SET max_cv 1
101 #! SET nbins_cv  100
102 #! SET periodic_cv false
103 0 1000 0
104 [ ... ]
105 \endauxfile
106 \auxfile{fesB.data}
107 #! FIELDS cv file.free der_cv
108 #! SET min_cv 0
109 #! SET max_cv 1
110 #! SET nbins_cv  100
111 #! SET periodic_cv false
112 0 1000 0
113 [ ... ]
114 \endauxfile
115 
116 */
117 //+ENDPLUMEDOC
118 
119 class VesDeltaF : public bias::Bias {
120 
121 private:
122   double beta_;
123   unsigned NumParallel_;
124   unsigned rank_;
125   unsigned NumWalkers_;
126   bool isFirstStep_;
127   bool afterCalculate_;
128 
129 //prob
130   double tot_prob_;
131   std::vector<double> prob_;
132   std::vector< std::vector<double> > der_prob_;
133 
134 //local basins
135   std::vector< std::unique_ptr<Grid> > grid_p_; //pointers because of GridBase::create
136   std::vector<double> norm_;
137 
138 //optimizer-related stuff
139   long unsigned mean_counter_;
140   unsigned mean_weight_tau_;
141   unsigned alpha_size_;
142   unsigned sym_alpha_size_;
143   std::vector<double> mean_alpha_;
144   std::vector<double> inst_alpha_;
145   std::vector<double> past_increment2_;
146   double minimization_step_;
147   bool damping_off_;
148 //'tg' -> 'target distribution'
149   double inv_gamma_;
150   unsigned tg_counter_;
151   unsigned tg_stride_;
152   std::vector<double> tg_dV_dAlpha_;
153   std::vector<double> tg_d2V_dAlpha2_;
154 //'av' -> 'ensemble average'
155   unsigned av_counter_;
156   unsigned av_stride_;
157   std::vector<double> av_dV_dAlpha_;
158   std::vector<double> av_dV_dAlpha_prod_;
159   std::vector<double> av_d2V_dAlpha2_;
160 //printing
161   unsigned print_stride_;
162   OFile alphaOfile_;
163 //other
164   std::vector<double> exp_alpha_;
165   std::vector<double> prev_exp_alpha_;
166   double work_;
167 
168 //functions
169   void update_alpha();
170   void update_tg_and_rct();
171   inline unsigned get_index(const unsigned, const unsigned) const;
172 
173 public:
174   explicit VesDeltaF(const ActionOptions&);
175   void calculate() override;
176   void update() override;
177   static void registerKeywords(Keywords& keys);
178 };
179 
180 PLUMED_REGISTER_ACTION(VesDeltaF,"VES_DELTA_F")
181 
registerKeywords(Keywords & keys)182 void VesDeltaF::registerKeywords(Keywords& keys) {
183   Bias::registerKeywords(keys);
184   keys.use("ARG");
185   keys.add("optional","TEMP","temperature is compulsory, but it can be sometimes fetched from the MD engine");
186 //local free energies
187   keys.add("numbered","FILE_F","names of files containing local free energies and derivatives. "
188            "The first one, FILE_F0, is used as reference for all the free energy differences.");
189   keys.reset_style("FILE_F","compulsory");
190   keys.addFlag("NORMALIZE",false,"normalize all local free energies so that alpha will be (approx) \\f$\\Delta F\\f$");
191   keys.addFlag("NO_MINTOZERO",false,"leave local free energies as provided, without shifting them to zero min");
192 //target distribution
193   keys.add("compulsory","BIASFACTOR","0","the \\f$\\gamma\\f$ bias factor used for well-tempered target \\f$p(\\mathbf{s})\\f$."
194            " Set to 0 for non-tempered flat target");
195   keys.add("optional","TG_STRIDE","( default=1 ) number of AV_STRIDE between updates"
196            " of target \\f$p(\\mathbf{s})\\f$ and reweighing factor \\f$c(t)\\f$");
197 //optimization
198   keys.add("compulsory","M_STEP","1.0","the \\f$\\mu\\f$ step used for the \\f$\\Omega\\f$ functional minimization");
199   keys.add("compulsory","AV_STRIDE","500","number of simulation steps between alpha updates");
200   keys.add("optional","TAU_MEAN","exponentially decaying average for alpha (rescaled using AV_STRIDE)."
201            " Should be used only in very specific cases");
202   keys.add("optional","INITIAL_ALPHA","( default=0 ) an initial guess for the bias potential parameter alpha");
203   keys.addFlag("DAMPING_OFF",false,"do not use an AdaGrad-like term to rescale M_STEP");
204 //output parameters file
205   keys.add("compulsory","ALPHA_FILE","ALPHA","file name for output minimization parameters");
206   keys.add("optional","PRINT_STRIDE","( default=10 ) stride for printing to ALPHA_FILE");
207   keys.add("optional","FMT","specify format for ALPHA_FILE");
208 //debug flags
209   keys.addFlag("SERIAL",false,"perform the calculation in serial even if multiple tasks are available");
210   keys.addFlag("MULTIPLE_WALKERS",false,"use multiple walkers connected via MPI for the optimization");
211   keys.use("RESTART");
212 
213 //output components
214   componentsAreNotOptional(keys);
215   keys.addOutputComponent("rct","default","the reweighting factor \\f$c(t)\\f$");
216   keys.addOutputComponent("work","default","the work done by the bias in one AV_STRIDE");
217 }
218 
VesDeltaF(const ActionOptions & ao)219 VesDeltaF::VesDeltaF(const ActionOptions&ao)
220   : PLUMED_BIAS_INIT(ao)
221   , isFirstStep_(true)
222   , afterCalculate_(false)
223   , mean_counter_(0)
224   , av_counter_(0)
225   , work_(0)
226 {
227 //set beta
228   const double Kb=plumed.getAtoms().getKBoltzmann();
229   double temp=0;
230   parse("TEMP",temp);
231   double KbT=Kb*temp;
232   if(KbT==0)
233   {
234     KbT=plumed.getAtoms().getKbT();
235     plumed_massert(KbT>0,"your MD engine does not pass the temperature to plumed, you must specify it using TEMP");
236   }
237   beta_=1.0/KbT;
238 
239 //initialize probability grids using local free energies
240   bool spline=true;
241   bool sparsegrid=false;
242   std::string funcl="file.free"; //typical name given by sum_hills
243 
244   std::vector<std::string> fes_names;
245   for(unsigned n=0;; n++)//NB: here we start from FILE_F0 not from FILE_F1
246   {
247     std::string filename;
248     if(!parseNumbered("FILE_F",n,filename))
249       break;
250     fes_names.push_back(filename);
251     IFile gridfile;
252     gridfile.open(filename);
253     auto g=GridBase::create(funcl,getArguments(),gridfile,sparsegrid,spline,true);
254 // we assume this cannot be sparse. in case we want it to be sparse, some of the methods
255 // that are available only in Grid should be ported to GridBase
256     auto gg=dynamic_cast<Grid*>(g.get());
257 // if this throws, g is deleted
258     plumed_assert(gg);
259 // release ownership in order to transfer it to emplaced pointer
260     g.release();
261     grid_p_.emplace_back(gg);
262   }
263   plumed_massert(grid_p_.size()>1,"at least 2 basins must be defined, starting from FILE_F0");
264   alpha_size_=grid_p_.size()-1;
265   sym_alpha_size_=alpha_size_*(alpha_size_+1)/2; //useful for symmetric matrix [alpha_size_]x[alpha_size_]
266   //check for consistency with first local free energy
267   for(unsigned n=1; n<grid_p_.size(); n++)
268   {
269     std::string error_tag="FILE_F"+std::to_string(n)+" '"+fes_names[n]+"' not compatible with reference one, FILE_F0";
270     plumed_massert(grid_p_[n]->getSize()==grid_p_[0]->getSize(),error_tag);
271     plumed_massert(grid_p_[n]->getMin()==grid_p_[0]->getMin(),error_tag);
272     plumed_massert(grid_p_[n]->getMax()==grid_p_[0]->getMax(),error_tag);
273     plumed_massert(grid_p_[n]->getBinVolume()==grid_p_[0]->getBinVolume(),error_tag);
274   }
275 
276   bool no_mintozero=false;
277   parseFlag("NO_MINTOZERO",no_mintozero);
278   if(!no_mintozero)
279   {
280     for(unsigned n=0; n<grid_p_.size(); n++)
281       grid_p_[n]->setMinToZero();
282   }
283   bool normalize=false;
284   parseFlag("NORMALIZE",normalize);
285   norm_.resize(grid_p_.size(),0);
286   std::vector<double> c_norm(grid_p_.size());
287   //convert the FESs to probability distributions
288   //NB: the spline interpolation will be done on the probability distributions, not on the given FESs
289   const unsigned ncv=getNumberOfArguments(); //just for ease
290   for(unsigned n=0; n<grid_p_.size(); n++)
291   {
292     for(Grid::index_t t=0; t<grid_p_[n]->getSize(); t++)
293     {
294       std::vector<double> der(ncv);
295       const double val=std::exp(-beta_*grid_p_[n]->getValueAndDerivatives(t,der));
296       for(unsigned s=0; s<ncv; s++)
297         der[s]*=-beta_*val;
298       grid_p_[n]->setValueAndDerivatives(t,val,der);
299       norm_[n]+=val;
300     }
301     c_norm[n]=1./beta_*std::log(norm_[n]);
302     if(normalize)
303     {
304       grid_p_[n]->scaleAllValuesAndDerivatives(1./norm_[n]);
305       norm_[n]=1;
306     }
307   }
308 
309 //get target
310   double biasfactor=0;
311   parse("BIASFACTOR",biasfactor);
312   plumed_massert(biasfactor==0 || biasfactor>1,"BIASFACTOR must be zero (for uniform target) or greater than one");
313   if(biasfactor==0)
314     inv_gamma_=0;
315   else
316     inv_gamma_=1./biasfactor;
317   tg_counter_=0;
318   tg_stride_=1;
319   parse("TG_STRIDE",tg_stride_);
320   tg_dV_dAlpha_.resize(alpha_size_,0);
321   tg_d2V_dAlpha2_.resize(sym_alpha_size_,0);
322 
323 //setup optimization stuff
324   minimization_step_=1;
325   parse("M_STEP",minimization_step_);
326 
327   av_stride_=500;
328   parse("AV_STRIDE",av_stride_);
329   av_dV_dAlpha_.resize(alpha_size_,0);
330   av_dV_dAlpha_prod_.resize(sym_alpha_size_,0);
331   av_d2V_dAlpha2_.resize(sym_alpha_size_,0);
332 
333   mean_weight_tau_=0;
334   parse("TAU_MEAN",mean_weight_tau_);
335   if(mean_weight_tau_!=1) //set it to 1 for basic SGD
336   {
337     plumed_massert((mean_weight_tau_==0 || mean_weight_tau_>av_stride_),"TAU_MEAN is rescaled with AV_STRIDE, so it has to be greater");
338     mean_weight_tau_/=av_stride_; //this way you can look at the number of simulation steps to choose TAU_MEAN
339   }
340 
341   parseVector("INITIAL_ALPHA",mean_alpha_);
342   if(mean_alpha_.size()>0)
343   {
344     plumed_massert(mean_alpha_.size()==alpha_size_,"provide one INITIAL_ALPHA for each basin beyond the first one");
345   }
346   else
347     mean_alpha_.resize(alpha_size_,0);
348   inst_alpha_=mean_alpha_;
349   exp_alpha_.resize(alpha_size_);
350   for(unsigned i=0; i<alpha_size_; i++)
351     exp_alpha_[i]=std::exp(-beta_*mean_alpha_[i]);
352   prev_exp_alpha_=exp_alpha_;
353 
354   damping_off_=false;
355   parseFlag("DAMPING_OFF",damping_off_);
356   if(damping_off_)
357     past_increment2_.resize(alpha_size_,1);
358   else
359     past_increment2_.resize(alpha_size_,0);
360 
361 //file printing options
362   std::string alphaFileName("ALPHA");
363   parse("ALPHA_FILE",alphaFileName);
364   print_stride_=10;
365   parse("PRINT_STRIDE",print_stride_);
366   std::string fmt;
367   parse("FMT",fmt);
368 
369 //other flags, mainly for debugging
370   NumParallel_=comm.Get_size();
371   rank_=comm.Get_rank();
372   bool serial=false;
373   parseFlag("SERIAL",serial);
374   if(serial)
375   {
376     log.printf(" -- SERIAL: running without loop parallelization\n");
377     NumParallel_=1;
378     rank_=0;
379   }
380 
381   bool multiple_walkers=false;
382   parseFlag("MULTIPLE_WALKERS",multiple_walkers);
383   if(!multiple_walkers)
384     NumWalkers_=1;
385   else
386   {
387     if(comm.Get_rank()==0)//multi_sim_comm works well on first rank only
388       NumWalkers_=multi_sim_comm.Get_size();
389     if(comm.Get_size()>1) //if each walker has more than one processor update them all
390       comm.Bcast(NumWalkers_,0);
391   }
392 
393   checkRead();
394 
395 //restart if needed
396   if(getRestart())
397   {
398     IFile ifile;
399     ifile.link(*this);
400     if(NumWalkers_>1)
401       ifile.enforceSuffix("");
402     if(ifile.FileExist(alphaFileName))
403     {
404       log.printf("  Restarting from: %s\n",alphaFileName.c_str());
405       log.printf("    all options (also PRINT_STRIDE) must be consistent!\n");
406       log.printf("    any INITIAL_ALPHA will be overwritten\n");
407       ifile.open(alphaFileName);
408       double time;
409       std::vector<double> damping(alpha_size_);
410       while(ifile.scanField("time",time)) //room for improvements: only last line is important
411       {
412         for(unsigned i=0; i<alpha_size_; i++)
413         {
414           const std::string index(std::to_string(i+1));
415           prev_exp_alpha_[i]=std::exp(-beta_*mean_alpha_[i]);
416           ifile.scanField("alpha_"+index,mean_alpha_[i]);
417           ifile.scanField("auxiliary_"+index,inst_alpha_[i]);
418           ifile.scanField("damping_"+index,damping[i]);
419         }
420         ifile.scanField();
421         mean_counter_+=print_stride_;
422       }
423       for(unsigned i=0; i<alpha_size_; i++)
424       {
425         exp_alpha_[i]=std::exp(-beta_*mean_alpha_[i]);
426         past_increment2_[i]=damping[i]*damping[i];
427       }
428       //sync all walkers and treads. Not sure is mandatory but is no harm
429       comm.Barrier();
430       if(comm.Get_rank()==0)
431         multi_sim_comm.Barrier();
432     }
433     else
434       log.printf("  -- WARNING: restart requested, but no '%s' file found!\n",alphaFileName.c_str());
435   }
436 
437 //setup output file with Alpha values
438   alphaOfile_.link(*this);
439   if(NumWalkers_>1)
440   {
441     if(comm.Get_rank()==0 && multi_sim_comm.Get_rank()>0)
442       alphaFileName="/dev/null"; //only first walker writes on file
443     alphaOfile_.enforceSuffix("");
444   }
445   alphaOfile_.open(alphaFileName);
446   if(fmt.length()>0)
447     alphaOfile_.fmtField(" "+fmt);
448 
449 //add other output components
450   addComponent("rct"); componentIsNotPeriodic("rct");
451   addComponent("work"); componentIsNotPeriodic("work");
452 
453 //print some info
454   log.printf("  Temperature T: %g\n",1./(Kb*beta_));
455   log.printf("  Beta (1/Kb*T): %g\n",beta_);
456   log.printf("  Local free energy basins files and normalization constants:\n");
457   for(unsigned n=0; n<grid_p_.size(); n++)
458     log.printf("    F_%d filename: %s  c_%d=%g\n",n,fes_names[n].c_str(),n,c_norm[n]);
459   if(no_mintozero)
460     log.printf(" -- NO_MINTOZERO: local free energies are not shifted to be zero at minimum\n");
461   if(normalize)
462     log.printf(" -- NORMALIZE: F_n+=c_n, alpha=DeltaF\n");
463   log.printf("  Using target distribution with 1/gamma = %g\n",inv_gamma_);
464   log.printf("    and updated with stride %d\n",tg_stride_);
465   log.printf("  Step for the minimization algorithm: %g\n",minimization_step_);
466   log.printf("  Stride for the ensemble average: %d\n",av_stride_);
467   if(mean_weight_tau_>1)
468     log.printf("  Exponentially decaying average with weight=tau/av_stride=%d\n",mean_weight_tau_);
469   if(mean_weight_tau_==1)
470     log.printf(" +++ WARNING +++ setting TAU_MEAN=1 is equivalent to use simple SGD, without mean alpha nor hessian contribution\n");
471   log.printf("  Initial guess for alpha:\n");
472   for(unsigned i=0; i<alpha_size_; i++)
473     log.printf("    alpha_%d = %g\n",i+1,mean_alpha_[i]);
474   if(damping_off_)
475     log.printf(" -- DAMPING_OFF: the minimization step will NOT become smaller as the simulation goes on\n");
476   log.printf("  Printing on file %s with stride %d\n",alphaFileName.c_str(),print_stride_);
477   if(serial)
478     log.printf(" -- SERIAL: running without loop parallelization\n");
479   if(NumParallel_>1)
480     log.printf("  Using multiple threads per simulation: %d\n",NumParallel_);
481   if(multiple_walkers)
482   {
483     log.printf(" -- MULTIPLE_WALKERS: multiple simulations will combine statistics for the optimization\n");
484     if(NumWalkers_>1)
485     {
486       log.printf("    number of walkers: %d\n",NumWalkers_);
487       log.printf("    walker rank: %d\n",multi_sim_comm.Get_rank()); //only comm.Get_rank()=0 prints, so this is fine
488     }
489     else
490       log.printf(" +++ WARNING +++ only one replica found: are you sure you are running MPI-connected simulations?\n");
491   }
492   log.printf(" Bibliography ");
493   log<<plumed.cite("Invernizzi and Parrinello, J. Chem. Theory Comput. 15, 2187-2194 (2019)");
494   log<<plumed.cite("Valsson and Parrinello, Phys. Rev. Lett. 113, 090601 (2014)");
495   if(inv_gamma_>0)
496     log<<plumed.cite("Valsson and Parrinello, J. Chem. Theory Comput. 11, 1996-2002 (2015)");
497 
498 //last initializations
499   prob_.resize(grid_p_.size());
500   der_prob_.resize(grid_p_.size(),std::vector<double>(getNumberOfArguments()));
501   update_tg_and_rct();
502 }
503 
calculate()504 void VesDeltaF::calculate()
505 {
506 //get CVs
507   const unsigned ncv=getNumberOfArguments(); //just for ease
508   std::vector<double> cv(ncv);
509   for(unsigned s=0; s<ncv; s++)
510     cv[s]=getArgument(s);
511 //get probabilities for each basin, and total one
512   for(unsigned n=0; n<grid_p_.size(); n++)
513     prob_[n]=grid_p_[n]->getValueAndDerivatives(cv,der_prob_[n]);
514   tot_prob_=prob_[0];
515   for(unsigned i=0; i<alpha_size_; i++)
516     tot_prob_+=prob_[i+1]*exp_alpha_[i];
517 
518 //update bias and forces: V=-(1-inv_gamma_)*fes
519   setBias((1-inv_gamma_)/beta_*std::log(tot_prob_));
520   for(unsigned s=0; s<ncv; s++)
521   {
522     double dProb_dCV_s=der_prob_[0][s];
523     for(unsigned i=0; i<alpha_size_; i++)
524       dProb_dCV_s+=der_prob_[i+1][s]*exp_alpha_[i];
525     setOutputForce(s,-(1-inv_gamma_)/beta_/tot_prob_*dProb_dCV_s);
526   }
527   afterCalculate_=true;
528 }
529 
update()530 void VesDeltaF::update()
531 {
532 //skip first step to sync getTime() and av_counter_, as in METAD
533   if(isFirstStep_)
534   {
535     isFirstStep_=false;
536     return;
537   }
538   plumed_massert(afterCalculate_,"VesDeltaF::update() must be called after VesDeltaF::calculate() to work properly");
539   afterCalculate_=false;
540 
541 //calculate derivatives for ensemble averages
542   std::vector<double> dV_dAlpha(alpha_size_);
543   std::vector<double> d2V_dAlpha2(sym_alpha_size_);
544   for(unsigned i=0; i<alpha_size_; i++)
545     dV_dAlpha[i]=-(1-inv_gamma_)/tot_prob_*prob_[i+1]*exp_alpha_[i];
546   for(unsigned i=0; i<alpha_size_; i++)
547   {
548     d2V_dAlpha2[get_index(i,i)]=-beta_*dV_dAlpha[i];
549     for(unsigned j=i; j<alpha_size_; j++)
550       d2V_dAlpha2[get_index(i,j)]-=beta_/(1-inv_gamma_)*dV_dAlpha[i]*dV_dAlpha[j];
551   }
552 //update ensemble averages
553   av_counter_++;
554   for(unsigned i=0; i<alpha_size_; i++)
555   {
556     av_dV_dAlpha_[i]+=(dV_dAlpha[i]-av_dV_dAlpha_[i])/av_counter_;
557     for(unsigned j=i; j<alpha_size_; j++)
558     {
559       const unsigned ij=get_index(i,j);
560       av_dV_dAlpha_prod_[ij]+=(dV_dAlpha[i]*dV_dAlpha[j]-av_dV_dAlpha_prod_[ij])/av_counter_;
561       av_d2V_dAlpha2_[ij]+=(d2V_dAlpha2[ij]-av_d2V_dAlpha2_[ij])/av_counter_;
562     }
563   }
564 //update work
565   double prev_tot_prob=prob_[0];
566   for(unsigned i=0; i<alpha_size_; i++)
567     prev_tot_prob+=prob_[i+1]*prev_exp_alpha_[i];
568   work_+=(1-inv_gamma_)/beta_*std::log(tot_prob_/prev_tot_prob);
569 
570 //update coefficients
571   if(av_counter_==av_stride_)
572   {
573     update_alpha();
574     tg_counter_++;
575     if(tg_counter_==tg_stride_)
576     {
577       update_tg_and_rct();
578       tg_counter_=0;
579     }
580     //reset the ensemble averages
581     av_counter_=0;
582     std::fill(av_dV_dAlpha_.begin(),av_dV_dAlpha_.end(),0);
583     std::fill(av_dV_dAlpha_prod_.begin(),av_dV_dAlpha_prod_.end(),0);
584     std::fill(av_d2V_dAlpha2_.begin(),av_d2V_dAlpha2_.end(),0);
585   }
586 }
587 
update_tg_and_rct()588 void VesDeltaF::update_tg_and_rct()
589 {
590 //calculate target averages
591   double Z_0=norm_[0];
592   for(unsigned i=0; i<alpha_size_; i++)
593     Z_0+=norm_[i+1]*exp_alpha_[i];
594   double Z_tg=0;
595   std::fill(tg_dV_dAlpha_.begin(),tg_dV_dAlpha_.end(),0);
596   std::fill(tg_d2V_dAlpha2_.begin(),tg_d2V_dAlpha2_.end(),0);
597   for(Grid::index_t t=rank_; t<grid_p_[0]->getSize(); t+=NumParallel_)
598   { //TODO can we recycle some code?
599     std::vector<double> prob(grid_p_.size());
600     for(unsigned n=0; n<grid_p_.size(); n++)
601       prob[n]=grid_p_[n]->getValue(t);
602     double tot_prob=prob[0];
603     for(unsigned i=0; i<alpha_size_; i++)
604       tot_prob+=prob[i+1]*exp_alpha_[i];
605     std::vector<double> dV_dAlpha(alpha_size_);
606     std::vector<double> d2V_dAlpha2(sym_alpha_size_);
607     for(unsigned i=0; i<alpha_size_; i++)
608       dV_dAlpha[i]=-(1-inv_gamma_)/tot_prob*prob[i+1]*exp_alpha_[i];
609     for(unsigned i=0; i<alpha_size_; i++)
610     {
611       d2V_dAlpha2[get_index(i,i)]=-beta_*dV_dAlpha[i];
612       for(unsigned j=i; j<alpha_size_; j++)
613         d2V_dAlpha2[get_index(i,j)]-=beta_/(1-inv_gamma_)*dV_dAlpha[i]*dV_dAlpha[j];
614     }
615     const double unnorm_tg_p=std::pow(tot_prob,inv_gamma_);
616     Z_tg+=unnorm_tg_p;
617     for(unsigned i=0; i<alpha_size_; i++)
618       tg_dV_dAlpha_[i]+=unnorm_tg_p*dV_dAlpha[i];
619     for(unsigned ij=0; ij<sym_alpha_size_; ij++)
620       tg_d2V_dAlpha2_[ij]+=unnorm_tg_p*d2V_dAlpha2[ij];
621   }
622   if(NumParallel_>1)
623   {
624     comm.Sum(Z_tg);
625     comm.Sum(tg_dV_dAlpha_);
626     comm.Sum(tg_d2V_dAlpha2_);
627   }
628   for(unsigned i=0; i<alpha_size_; i++)
629     tg_dV_dAlpha_[i]/=Z_tg;
630   for(unsigned ij=0; ij<sym_alpha_size_; ij++)
631     tg_d2V_dAlpha2_[ij]/=Z_tg;
632   getPntrToComponent("rct")->set(-1./beta_*std::log(Z_tg/Z_0)); //Z_tg is the best available estimate of Z_V
633 }
634 
update_alpha()635 void VesDeltaF::update_alpha()
636 {
637 //combining the averages of multiple walkers
638   if(NumWalkers_>1)
639   {
640     if(comm.Get_rank()==0) //sum only once: in the first rank of each walker
641     {
642       multi_sim_comm.Sum(av_dV_dAlpha_);
643       multi_sim_comm.Sum(av_dV_dAlpha_prod_);
644       multi_sim_comm.Sum(av_d2V_dAlpha2_);
645       for(unsigned i=0; i<alpha_size_; i++)
646         av_dV_dAlpha_[i]/=NumWalkers_;
647       for(unsigned ij=0; ij<sym_alpha_size_; ij++)
648       {
649         av_dV_dAlpha_prod_[ij]/=NumWalkers_;
650         av_d2V_dAlpha2_[ij]/=NumWalkers_;
651       }
652     }
653     if(comm.Get_size()>1)//if there are more ranks for each walker, everybody has to know
654     {
655       comm.Bcast(av_dV_dAlpha_,0);
656       comm.Bcast(av_dV_dAlpha_prod_,0);
657       comm.Bcast(av_d2V_dAlpha2_,0);
658     }
659   }
660   //set work and reset it
661   getPntrToComponent("work")->set(work_);
662   work_=0;
663 
664 //build the gradient and the Hessian of the functional
665   std::vector<double> grad_omega(alpha_size_);
666   std::vector<double> hess_omega(sym_alpha_size_);
667   for(unsigned i=0; i<alpha_size_; i++)
668   {
669     grad_omega[i]=tg_dV_dAlpha_[i]-av_dV_dAlpha_[i];
670     for(unsigned j=i; j<alpha_size_; j++)
671     {
672       const unsigned ij=get_index(i,j);
673       hess_omega[ij]=beta_*(av_dV_dAlpha_prod_[ij]-av_dV_dAlpha_[i]*av_dV_dAlpha_[j])+tg_d2V_dAlpha2_[ij]-av_d2V_dAlpha2_[ij];
674     }
675   }
676 //calculate the increment and update alpha
677   mean_counter_++;
678   long unsigned mean_weight=mean_counter_;
679   if(mean_weight_tau_>0 && mean_weight_tau_<mean_counter_)
680     mean_weight=mean_weight_tau_;
681   std::vector<double> damping(alpha_size_);
682   for(unsigned i=0; i<alpha_size_; i++)
683   {
684     double increment_i=grad_omega[i];
685     for(unsigned j=0; j<alpha_size_; j++)
686       increment_i+=hess_omega[get_index(i,j)]*(inst_alpha_[j]-mean_alpha_[j]);
687     if(!damping_off_)
688       past_increment2_[i]+=increment_i*increment_i;
689     damping[i]=std::sqrt(past_increment2_[i]);
690     prev_exp_alpha_[i]=std::exp(-beta_*mean_alpha_[i]);
691     inst_alpha_[i]-=minimization_step_/damping[i]*increment_i;
692     mean_alpha_[i]+=(inst_alpha_[i]-mean_alpha_[i])/mean_weight;
693     exp_alpha_[i]=std::exp(-beta_*mean_alpha_[i]);
694   }
695 
696 //update the Alpha file
697   if(mean_counter_%print_stride_==0)
698   {
699     alphaOfile_.printField("time",getTime());
700     for(unsigned i=0; i<alpha_size_; i++)
701     {
702       const std::string index(std::to_string(i+1));
703       alphaOfile_.printField("alpha_"+index,mean_alpha_[i]);
704       alphaOfile_.printField("auxiliary_"+index,inst_alpha_[i]);
705       alphaOfile_.printField("damping_"+index,damping[i]);
706     }
707     alphaOfile_.printField();
708   }
709 }
710 
711 //mapping of a [alpha_size_]x[alpha_size_] symmetric matrix into a vector of size sym_alpha_size_, useful for the communicator
get_index(const unsigned i,const unsigned j) const712 inline unsigned VesDeltaF::get_index(const unsigned i, const unsigned j) const
713 {
714   if(i<=j)
715     return j+i*(alpha_size_-1)-i*(i-1)/2;
716   else
717     return get_index(j,i);
718 }
719 
720 }
721 }
722