1 /*  _______________________________________________________________________
2 
3     DAKOTA: Design Analysis Kit for Optimization and Terascale Applications
4     Copyright 2014 Sandia Corporation.
5     This software is distributed under the GNU Lesser General Public License.
6     For more information, see the README file in the top Dakota directory.
7     _______________________________________________________________________ */
8 
9 //- Class:       DemoTPLOptimizer
10 //- Description: Wrapper class for Demo_Opt
11 //- Owner:       Russell Hooper
12 //- Checked by:
13 
14 // Dakota headers
15 // This header file provides the database that holds options specified
16 // in the Dakota input file.
17 
18 #include "ProblemDescDB.hpp"
19 
20 // Demo_Opt headers
21 // There are two sets of source files needed for integrating a TPL
22 // into Dakota: the source code for the TPL and the source files that
23 // provide the interface between Dakota and the TPL.  This source file
24 // and the first header comprise the interface.  The second header is
25 // associated the the TPL source.  Replace the second header with any
26 // necessary header files from the TPL that is being integrated.
27 
28 #include "DemoOptimizer.hpp"
29 #include "demo_opt.hpp"
30 
31 //
32 // - DemoTPLOptimizer implementation
33 //
34 
35 // -----------------------------------------------------------------
36 // Some calls to the Optimizer (Demo_Opt) are unique to that particular
37 // TPL, whereas other calls, eg to exchange data, employ general
38 // utilities such as adapters contained within a helper class.
39 //
40 // Calls specific to the TPL are indicated via
41 //
42 //                      TPL_SPECIFIC
43 //
44 // within the associated comments.
45 // -----------------------------------------------------------------
46 
47 
48 
49 // All of the interface source should be included in the Dakota
50 // namespace.
51 
52 namespace Dakota {
53 
54 
55 // -----------------------------------------------------------------
56 /** Implementation of DemoTPLOptimizer class. */
57 
58 // Standard constructor for DemoTPLOptimizer.  Sets up Demo_Opt solver
59 // based on information from the database.  problem_db and model are
60 // Dakota objects from which various information can be accessed.  No
61 // additional implementation is needed in the wrapper.  DemoOptTraits
62 // is also a Dakot object, but it requires some additional
63 // implementation; instructions are in DemoOptimizer.hpp.
64 
65 // Demo_Opt and demoOpt are specific to the TPL being integrated.  In
66 // this example, the assumption is that the TPL is object oriented and
67 // that its main class (Demo_Opt) has a function evaluation method
68 // (ObjectiveFn()) that needs to be implemented.  The TPL solver
69 // (demoOpt) is instantiated.  TPLs do not have to be in C++, nor to
70 // the necessary objects have to be instantiated/initialized in the
71 // constructor as long as they are created before they are accessed.
72 
DemoTPLOptimizer(ProblemDescDB & problem_db,Model & model)73 DemoTPLOptimizer::DemoTPLOptimizer(ProblemDescDB& problem_db, Model& model):
74   Optimizer(problem_db, model, std::shared_ptr<TraitsBase>(new DemoOptTraits())),
75   Demo_Opt::ObjectiveFn(),
76   Demo_Opt::NonlinearEqFn(),
77   Demo_Opt::NonlinearIneqFn(),
78   demoOpt(std::make_shared<Demo_Opt>())
79 {
80   // Call a helper function to set method parameters.  It is
81   // implemented later in this source file.
82 
83   set_demo_parameters();
84 
85   // Register ourself as the callback interface for objective function
86   // evaluations and nonlinear equality constraints.  This assumes that
87   // the TPL makes a function call to do objective function evaluations,
88   // and a pointer to the function must be provided to the TPL.  This code
89   // should be replaced with whatever mechanism the TPL being integrated
90   // uses for setting that function pointer.  There are other ways that
91   // objective functions can be implemented that will be added to future
92   // versions of this example.
93 
94   // ------------------  TPL_SPECIFIC  ------------------
95   demoOpt->register_obj_fn(this);
96 
97   // Conditionally register ourself as a constraint callback depending
98   // on if the problem uses them
99   if( get_num_nln_eq(true) > 0 )
100     demoOpt->register_nln_eq_fn(this);   // TPL_SPECIFIC
101   if( get_num_nln_ineq(true) > 0 )
102     demoOpt->register_nln_ineq_fn(this); // TPL_SPECIFIC
103 }
104 
105 
106 // -----------------------------------------------------------------
107 
108 // core_run redefines the Optimizer virtual function to perform the
109 // optimization using Demo_Opt and catalogue the results.  core_run
110 // will be called by Dakota, and core_run will call the TPL optimizer.
111 
core_run()112 void DemoTPLOptimizer::core_run()
113 {
114   // Call a helper function to set the initial values of the
115   // variables.  It is implemented later in this source file.
116 
117   initialize_variables_and_constraints();
118 
119   // Invoke the TPL's method to actually perform the optimization.
120   // This code should be replaced by whatever the TPL's mechanism is
121   // for running its solver.
122 
123   // ------------------  TPL_SPECIFIC  ------------------
124   demoOpt->execute(true);
125 
126   // The TPL should provide the optimal value of the objective
127   // function and the associated variable values.  For the purposes of
128   // this example, the values should be returned to standard C++ data
129   // types, double for the function value and std::vector<double> for
130   // the variable values.
131 
132   if (!localObjectiveRecast) {
133     // Replace this line with however the TPL being integrated returns
134     // the optimal function value.  To use this demo with minimal
135     // changes, the returned value needs to be (converted to) a
136     // double.
137     double best_f = demoOpt->get_best_f(); // TPL_SPECIFIC
138 
139     // If the TPL defaults to doing minimization, no need to do
140     // anything with this code.  It manages needed sign changes
141     // depending on whether minimize or maximize has been specified in
142     // the Dakota input file.
143     const BoolDeque& max_sense = iteratedModel.primary_response_fn_sense();
144     RealVector best_fns(iteratedModel.response_size());
145 
146     // Get best (single) objcetive value respecting max/min expectations
147     best_fns[0] = (!max_sense.empty() && max_sense[0]) ?  -best_f : best_f;
148 
149     // Get best Nonlinear Equality Constraints from TPL
150     if( numNonlinearEqConstraints > 0 )
151     {
152       auto best_nln_eqs = demoOpt->get_best_nln_eqs();
153       //std::copy( best_nln_eqs.begin(), best_nln_eqs.end(), &best_fns(0)+1);
154       dataTransferHandler->get_best_nonlinear_eq_constraints_from_tpl(
155                                           best_nln_eqs,
156                                           best_fns);
157     }
158 
159     // Get best Nonlinear Inequality Constraints from TPL
160     if( numNonlinearIneqConstraints > 0 )
161     {
162       auto best_nln_ineqs = demoOpt->get_best_nln_ineqs(); // TPL_SPECIFIC
163 
164       dataTransferHandler->get_best_nonlinear_ineq_constraints_from_tpl(
165                                           best_nln_ineqs,
166                                           best_fns);
167     }
168 
169     bestResponseArray.front().function_values(best_fns);
170   }
171 
172   std::vector<double> best_x = demoOpt->get_best_x(); // TPL_SPECIFIC
173 
174   // Set Dakota optimal value data.
175   set_variables<>(best_x, iteratedModel, bestVariablesArray.front());
176 
177 } // core_run
178 
179 
180 // -----------------------------------------------------------------
181 
182 // Dakota will call initialize_run() for any one-time setup.  If the
183 // TPL being integrated requires such, it should be implemented here.
184 // Replace the demoOpt method call with any initialization (or call to
185 // initialization)needed.
186 
initialize_run()187 void DemoTPLOptimizer::initialize_run()
188 {
189   Optimizer::initialize_run();
190   demoOpt->initialize(true); // TPL_SPECIFIC
191 }
192 
193 
194 // -----------------------------------------------------------------
195 
196 // This helper function sets the TPL algorithmic parameters.  For this
197 // initial example, the only mechanism to do this is for the TPL to
198 // directly read in a file with the information.  The formatting and
199 // reading of the file are the TPL's responsibility.  The file name is
200 // specified in the Dakota input file.  This code extracts the
201 // filename and passes it to the TPL.
202 
set_demo_parameters()203 void DemoTPLOptimizer::set_demo_parameters()
204 {
205   int    max_fn_evals;
206   int    max_iters;
207   double conv_tol;
208   double min_var_chg;
209   double obj_target;
210 
211   // Values for common stopping criteria can be obtained from Dakota.
212   // If user has provided values in the input file, those values will
213   // be returned.  Otherwise, Dakota defaults will be returned.
214   get_common_stopping_criteria(max_fn_evals, max_iters, conv_tol, min_var_chg, obj_target );
215 
216   // ------------------  TPL_SPECIFIC  ------------------
217   demoOpt->set_param("Maximum Evaluations", max_fn_evals);
218   demoOpt->set_param("Maximum Iterations",  max_iters);
219   demoOpt->set_param("Function Tolerance",  obj_target);
220 
221   // Check for native Demo_Opt input file.  The file name needs to be
222   // included in the Dakota input file.
223   String adv_opts_file = probDescDB.get_string("method.advanced_options_file");
224   if (!adv_opts_file.empty())
225   {
226     if (!boost::filesystem::exists(adv_opts_file))
227     {
228       Cerr << "\nError: Demo_Opt options_file '" << adv_opts_file
229 	   << "' specified, but file not found.\n";
230       abort_handler(METHOD_ERROR);
231     }
232   }
233 
234   // Replace this line by whatever the TPL being integrated uses to
235   // set its input file name.
236 
237   demoOpt->set_solver_options(adv_opts_file, true); // TPL_SPECIFIC
238 
239 } // set_demo_parameters
240 
241 // -----------------------------------------------------------------
242 
243 // This helper function gets the initial values of the variables and
244 // the values of the bound constraints.  They are returned in standard
245 // C++ data types.  The values are passed on to the TPL.
246 
initialize_variables_and_constraints()247 void DemoTPLOptimizer::initialize_variables_and_constraints()
248 {
249 
250   // Get the number of variables, the initial values, and the values
251   // of bound constraints.  They are returned to standard C++ data
252   // types.  This example considers only continuous variables.  Other
253   // types of variables and constraints will be added at a later time.
254   // Note that double is aliased to Real in Dakota.
255   int num_total_vars = numContinuousVars;
256   std::vector<Real> init_point(num_total_vars);
257   std::vector<Real> lower(num_total_vars),
258                     upper(num_total_vars);
259 
260   // More on DemoOptTraits can be found in DemoOptimizer.hpp.
261   get_variables(iteratedModel, init_point);
262   get_variable_bounds_from_dakota<DemoOptTraits>( lower, upper );
263 
264   // Replace this line by whatever the TPL being integrated uses to
265   // ingest variable values and bounds, including any data type
266   // conversion needed.
267 
268   // ------------------  TPL_SPECIFIC  ------------------
269   demoOpt->set_problem_data(init_point,   //  "Initial Guess"
270                             lower     ,   //  "Lower Bounds"
271                             upper      ); //  "Upper Bounds"
272 
273 } // initialize_variables_and_constraints
274 
275 // -----------------------------------------------------------------
276 
277 // This is the implementation of the objective function evaluation.
278 // This assumes a function callback approach, i.e., the TPL optimizer
279 // calls this function whenever it needs an evaluation done.  Other
280 // ways to interface to function will be added in the future.  This
281 // interface should be replaced with what ever interface the TPL uses.
282 
283 Real
compute_obj(const std::vector<double> & x,bool verbose)284 DemoTPLOptimizer::compute_obj(const std::vector<double> & x, bool verbose)
285 {
286   // Tell Dakota what variable values to use for the function
287   // valuation.  x must be (converted to) a std::vector<double> to use
288   // this demo with minimal changes.
289   set_variables<>(x, iteratedModel, iteratedModel.current_variables());
290 
291   // Evaluate the function at the specified x.
292   iteratedModel.evaluate();
293 
294   // Retrieve the the function value and sign it appropriately based
295   // on whether minimize or maximize has been specified in the Dakota
296   // input file.
297   double f = dataTransferHandler->get_response_value_from_dakota(iteratedModel.current_response());
298 
299   return f;
300 }
301 
302 
303 // -----------------------------------------------------------------
304 
305 // This is the implementation of the nonlinear equality constraint evaluation.
306 // This assumes a function callback approach, i.e., the TPL optimizer
307 // calls this function whenever it needs an evaluation done.  Other
308 // ways to interface to function will be added in the future.  This
309 // interface should be replaced with what ever interface the TPL uses.
310 
311 int
get_num_nln_eq(bool verbose)312 DemoTPLOptimizer::get_num_nln_eq(bool verbose)
313 {
314   return dataTransferHandler->num_dakota_nonlin_eq_constraints();
315 }
316 
317 
318 // -----------------------------------------------------------------
319 
320 void
compute_nln_eq(std::vector<Real> & c,const std::vector<Real> & x,bool verbose)321 DemoTPLOptimizer::compute_nln_eq(std::vector<Real> &c, const std::vector<Real> &x, bool verbose)
322 {
323   // Tell Dakota what variable values to use for the nonlinear constraint
324   // evaluations.  x must be (converted to) a std::vector<double> to use
325   // this demo with minimal changes.
326   set_variables<>(x, iteratedModel, iteratedModel.current_variables());
327 
328   // Evaluate the function at the specified x.
329   iteratedModel.evaluate();
330 
331   // Use an adapter to copy data
332   dataTransferHandler->get_nonlinear_eq_constraints_from_dakota(iteratedModel.current_response(), c);
333 
334 } // nonlinear eq constraints value
335 
336 
337 // -----------------------------------------------------------------
338 
339 // This is the implementation of the nonlinear inequality constraint evaluation.
340 // This assumes a function callback approach, i.e., the TPL optimizer
341 // calls this function whenever it needs an evaluation done.  Other
342 // ways to interface to function will be added in the future.  This
343 // interface should be replaced with what ever interface the TPL uses.
344 
345 int
get_num_nln_ineq(bool verbose)346 DemoTPLOptimizer::get_num_nln_ineq(bool verbose)
347 {
348   return dataTransferHandler->num_tpl_nonlin_ineq_constraints();
349 }
350 
351 // -----------------------------------------------------------------
352 
353 void
compute_nln_ineq(std::vector<Real> & c,const std::vector<Real> & x,bool verbose)354 DemoTPLOptimizer::compute_nln_ineq(std::vector<Real> &c, const std::vector<Real> &x, bool verbose)
355 {
356   // Tell Dakota what variable values to use for the nonlinear constraint
357   // evaluations.  x must be (converted to) a std::vector<double> to use
358   // this demo with minimal changes.
359   set_variables<>(x, iteratedModel, iteratedModel.current_variables());
360 
361   // Evaluate the function at the specified x.
362   iteratedModel.evaluate();
363 
364   // Use an adapter to copy data from Dakota into Demo_Opt
365   dataTransferHandler->get_nonlinear_ineq_constraints_from_dakota(iteratedModel.current_response(), c);
366 
367 } // nonlinear ineq constraints value
368 
369 } // namespace Dakota
370