1 ////////////////////////////////////////////////////////////////////////////////////// 2 // This file is distributed under the University of Illinois/NCSA Open Source License. 3 // See LICENSE file in top directory for details. 4 // 5 // Copyright (c) 2020 QMCPACK developers. 6 // 7 // File developed by: Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign 8 // Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory 9 // Mark Dewing, mdewing@anl.gov, Argonne National Laboratory 10 // 11 // File created by: Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign 12 ////////////////////////////////////////////////////////////////////////////////////// 13 14 15 #ifndef QMCPLUSPLUS_TESTDERIVOPTIMIZATION_H 16 #define QMCPLUSPLUS_TESTDERIVOPTIMIZATION_H 17 18 #include "Optimize/OptimizeBase.h" 19 #include "OhmmsData/ParameterSet.h" 20 #include <fstream> 21 22 using qmcplusplus::app_log; 23 24 /** 25 * @file testDerivOptimization.h 26 * Tests for variational parameter derivatives 27 * 28 * This optimization type will compare finite difference derivatives with the analytic derivatives. 29 * It can also output a file that can be used with 'qmca' to get error bars. 30 * 31 * The input is specified with the 'optimize' tag and the 'test' method. 32 * For example: 33 * \code 34 * <loop max="4"> 35 * <qmc method="opt" move="pbyp"> 36 * <optimize method="test" output_param_file="yes"> 37 * </optimize> 38 * ... 39 * </qmc> 40 * </loop> 41 * \endcode 42 * 43 * The output_param_file is optional and defaults to 'no'. 44 * If 'yes', a file named "<project id>.param.s000.scalar.dat" is created. Each iteration of the optimizer 45 * loop outputs one line in the file. (The above example will produce 4 entries in the file). 46 * This is a hack to enable computing error bars on the parameter gradients. 47 * 48 */ 49 50 template<class T> 51 class testDerivOptimization : public MinimizerBase<T> 52 { 53 public: 54 typedef T Return_t; 55 typedef typename MinimizerBase<T>::ObjectFuncType ObjectFuncType; 56 57 ObjectFuncType* TargetFunc; 58 59 testDerivOptimization(const std::string& RootName, ObjectFuncType* atarget = 0) TargetFunc(atarget)60 : TargetFunc(atarget), first_(true), output_param_file_(false), param_deriv_index_(0), RootName_(RootName) 61 { 62 if (atarget) 63 setTarget(atarget); 64 } 65 ~testDerivOptimization()66 ~testDerivOptimization() {} 67 setTarget(ObjectFuncType * fn)68 void setTarget(ObjectFuncType* fn) 69 { 70 TargetFunc = fn; 71 NumParams_ = TargetFunc->getNumParams(); 72 resizeAllArray(NumParams_); 73 for (int i = 0; i < NumParams_; i++) 74 Parms_[i] = std::real(TargetFunc->Params(i)); 75 76 if (output_param_file_ && first_) 77 { 78 int namelen = RootName_.length(); 79 // Assume that the RootName has the series suffix (".s000"). 80 // Remove the series suffix to get the project id 81 std::string fname = RootName_.substr(0, namelen - 5) + ".param.s000.scalar.dat"; 82 param_deriv_file_.open(fname); 83 param_deriv_file_ << "# Index "; 84 for (int i = 0; i < NumParams_; i++) 85 { 86 param_deriv_file_ << " " << TargetFunc->getParamName(i); 87 } 88 param_deriv_file_ << std::endl; 89 first_ = false; 90 } 91 } 92 resizeAllArray(int newSize)93 void resizeAllArray(int newSize) 94 { 95 a_xi_.resize(newSize, 0.0); 96 Parms_.resize(newSize, 0.0); 97 } 98 optimize(ObjectFuncType * fn)99 bool optimize(ObjectFuncType* fn) 100 { 101 setTarget(fn); 102 return optimize(); 103 } 104 optimize()105 bool optimize() 106 { 107 dfunc(Parms_, a_xi_); 108 //make sure wave function has the right parameters for next runs 109 for (int i = 0; i < NumParams_; i++) 110 TargetFunc->Params(i) = Parms_[i]; 111 TargetFunc->Report(); 112 return true; 113 } 114 115 bool get(std::ostream&) const; 116 117 bool put(std::istream&); 118 119 /** Parse the xml file for parameters 120 * 121 */ 122 put(xmlNodePtr cur)123 bool put(xmlNodePtr cur) 124 { 125 std::string output_file("no"); 126 OhmmsAttributeSet attrib; 127 attrib.add(output_file, "output_param_file"); 128 attrib.put(cur); 129 130 output_param_file_ = (output_file != "no"); 131 132 return true; 133 } 134 func(std::vector<Return_t> _p)135 Return_t func(std::vector<Return_t> _p) 136 { 137 for (int i = 0; i < NumParams_; ++i) 138 TargetFunc->Params(i) = _p[i]; 139 return TargetFunc->Cost(); 140 } 141 142 dfunc(const std::vector<Return_t> & RT,std::vector<Return_t> & FG)143 void dfunc(const std::vector<Return_t>& RT, std::vector<Return_t>& FG) 144 { 145 ///To test we simply output the analytic and numeric gradients of the cost function. Make sure they agree. 146 std::vector<Return_t> Dummy(FG); 147 TargetFunc->GradCost(Dummy, RT, 1e-5); 148 TargetFunc->GradCost(FG, RT, 0.0); 149 150 if (output_param_file_) 151 { 152 param_deriv_file_ << param_deriv_index_ << " "; 153 param_deriv_index_++; 154 } 155 156 app_log() << "Param_Name Value Numeric Analytic Percent" << std::endl; 157 for (int k = 0; k < NumParams_; k++) 158 { 159 std::string vname = TargetFunc->getParamName(k); 160 if (Dummy[k] != 0) 161 app_log() << vname << " " << RT[k] << " " << Dummy[k] << " " << FG[k] << " " 162 << 100 * (Dummy[k] - FG[k]) / Dummy[k] << std::endl; 163 else 164 app_log() << vname << " " << RT[k] << " " << Dummy[k] << " " << FG[k] << " inf" << std::endl; 165 if (output_param_file_) 166 param_deriv_file_ << std::setprecision(10) << FG[k] << " "; 167 } 168 if (output_param_file_) 169 param_deriv_file_ << std::endl; 170 app_log() << std::endl; 171 } 172 173 private: 174 int NumParams_; 175 std::vector<Return_t> a_xi_; 176 std::vector<Return_t> Parms_; 177 178 std::ofstream param_deriv_file_; 179 bool first_; 180 bool output_param_file_; 181 int param_deriv_index_; 182 std::string RootName_; 183 }; 184 185 #endif 186