1 /* _______________________________________________________________________
2
3 DAKOTA: Design Analysis Kit for Optimization and Terascale Applications
4 Copyright 2014-2020 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
5 This software is distributed under the GNU Lesser General Public License.
6 For more information, see the README file in the top Dakota directory.
7 _______________________________________________________________________ */
8
9 //- Class: Minimizer
10 //- Description: Implementation code for the Minimizer class
11 //- Owner: Mike Eldred
12 //- Checked by:
13
14 #include "DakotaMinimizer.hpp"
15 #include "dakota_system_defs.hpp"
16 #include "dakota_data_io.hpp"
17 #include "dakota_tabular_io.hpp"
18 #include "DakotaModel.hpp"
19 #include "ProblemDescDB.hpp"
20 #include "IteratorScheduler.hpp"
21 #include "ParamResponsePair.hpp"
22 #include "PRPMultiIndex.hpp"
23 #include "RecastModel.hpp"
24 #include "DataTransformModel.hpp"
25 #include "ScalingModel.hpp"
26 #include "Teuchos_SerialDenseHelpers.hpp"
27 #include "ExperimentData.hpp"
28
29 #ifdef __SUNPRO_CC
30 #include <math.h> // for std::log
31 #endif // __SUNPRO_CC
32
33 static const char rcsId[]="@(#) $Id: DakotaMinimizer.cpp 7029 2010-10-22 00:17:02Z mseldre $";
34
35
36 namespace Dakota {
37
38 extern PRPCache data_pairs; // global container
39
40 // initialization of static needed by RecastModel
41 Minimizer* Minimizer::minimizerInstance(NULL);
42
43
44 /** This constructor extracts inherited data for the optimizer and least
45 squares branches and performs sanity checking on constraint settings. */
Minimizer(ProblemDescDB & problem_db,Model & model,std::shared_ptr<TraitsBase> traits)46 Minimizer::Minimizer(ProblemDescDB& problem_db, Model& model, std::shared_ptr<TraitsBase> traits):
47 Iterator(BaseConstructor(), problem_db, traits),
48 constraintTol(probDescDB.get_real("method.constraint_tolerance")),
49 bigRealBoundSize(BIG_REAL_BOUND), bigIntBoundSize(1000000000),
50 boundConstraintFlag(false),
51 speculativeFlag(probDescDB.get_bool("method.speculative")),
52 optimizationFlag(true),
53 calibrationDataFlag(probDescDB.get_bool("responses.calibration_data") ||
54 !probDescDB.get_string("responses.scalar_data_filename").empty()),
55 expData(probDescDB, model.current_response().shared_data(), outputLevel),
56 numExperiments(0), numTotalCalibTerms(0),
57 scaleFlag(probDescDB.get_bool("method.scaling"))
58 {
59 iteratedModel = model;
60 update_from_model(iteratedModel); // variable/response counts & checks
61
62 // Re-assign Iterator defaults specialized to Minimizer branch
63 if (maxIterations < 0) // DataMethod default set to -1
64 maxIterations = 100;
65 // Minimizer default number of final solution is 1, unless a
66 // multi-objective frontier-based method
67 if (!numFinalSolutions && methodName != MOGA)
68 numFinalSolutions = 1;
69 }
70
71
Minimizer(unsigned short method_name,Model & model,std::shared_ptr<TraitsBase> traits)72 Minimizer::Minimizer(unsigned short method_name, Model& model, std::shared_ptr<TraitsBase> traits):
73 Iterator(NoDBBaseConstructor(), method_name, model, traits), constraintTol(0.),
74 bigRealBoundSize(1.e+30), bigIntBoundSize(1000000000),
75 boundConstraintFlag(false), speculativeFlag(false), optimizationFlag(true),
76 calibrationDataFlag(false), numExperiments(0), numTotalCalibTerms(0),
77 scaleFlag(false)
78 {
79 update_from_model(iteratedModel); // variable,constraint counts & checks
80 }
81
82
Minimizer(unsigned short method_name,size_t num_lin_ineq,size_t num_lin_eq,size_t num_nln_ineq,size_t num_nln_eq,std::shared_ptr<TraitsBase> traits)83 Minimizer::Minimizer(unsigned short method_name, size_t num_lin_ineq,
84 size_t num_lin_eq, size_t num_nln_ineq, size_t num_nln_eq, std::shared_ptr<TraitsBase> traits):
85 Iterator(NoDBBaseConstructor(), method_name, traits),
86 bigRealBoundSize(1.e+30), bigIntBoundSize(1000000000),
87 numNonlinearIneqConstraints(num_nln_ineq),
88 numNonlinearEqConstraints(num_nln_eq), numLinearIneqConstraints(num_lin_ineq),
89 numLinearEqConstraints(num_lin_eq),
90 numNonlinearConstraints(num_nln_ineq + num_nln_eq),
91 numLinearConstraints(num_lin_ineq + num_lin_eq),
92 numConstraints(numNonlinearConstraints + numLinearConstraints),
93 numUserPrimaryFns(1), numIterPrimaryFns(1), boundConstraintFlag(false),
94 speculativeFlag(false), optimizationFlag(true),
95 calibrationDataFlag(false), numExperiments(0), numTotalCalibTerms(0),
96 scaleFlag(false)
97 { }
98
99
resize()100 bool Minimizer::resize()
101 {
102 bool parent_reinit_comms = Iterator::resize();
103
104 Cerr << "\nError: Resizing is not yet supported in method "
105 << method_enum_to_string(methodName) << "." << std::endl;
106 abort_handler(METHOD_ERROR);
107
108 return parent_reinit_comms;
109 }
110
111
update_from_model(const Model & model)112 void Minimizer::update_from_model(const Model& model)
113 {
114 Iterator::update_from_model(model);
115
116 numContinuousVars = model.cv(); numDiscreteIntVars = model.div();
117 numDiscreteStringVars = model.dsv(); numDiscreteRealVars = model.drv();
118 numFunctions = model.response_size();
119
120 bool err_flag = false;
121 // Check for correct bit associated within methodName
122 if ( !(methodName & MINIMIZER_BIT) ) {
123 Cerr << "\nError: minimizer bit not activated for method instantiation "
124 << "within Minimizer branch." << std::endl;
125 err_flag = true;
126 }
127
128 // Check for active design variables and discrete variable support.
129 // Include explicit checking for COLINOptimizer methods that are not
130 // representative of the majority (i.e. other COLINOptimizer methods)
131 if(( traits()->supports_continuous_variables() &&
132 traits()->supports_discrete_variables()) ||
133 (methodName == COLINY_EA || methodName == COLINY_BETA ))
134 {
135 if (!numContinuousVars && !numDiscreteIntVars && !numDiscreteStringVars &&
136 !numDiscreteRealVars) {
137 Cerr << "\nError: " << method_enum_to_string(methodName)
138 << " requires active variables." << std::endl;
139 err_flag = true;
140 }
141 }
142 else { // methods supporting only continuous design variables
143 if (!numContinuousVars) {
144 Cerr << "\nError: " << method_enum_to_string(methodName)
145 << " requires active continuous variables." << std::endl;
146 err_flag = true;
147 }
148 if (numDiscreteIntVars || numDiscreteStringVars || numDiscreteRealVars)
149 Cerr << "\nWarning: discrete design variables ignored by "
150 << method_enum_to_string(methodName) << std::endl;
151 }
152
153 // Check for response functions
154 if ( numFunctions <= 0 ) {
155 Cerr << "\nError: number of response functions must be greater than zero."
156 << std::endl;
157 err_flag = true;
158 }
159
160 // check for gradient/Hessian/minimizer match: abort with an error for cases
161 // where insufficient derivative data is available (e.g., full Newton methods
162 // require Hessians), but only echo warnings in other cases (e.g., if more
163 // derivative data is specified than is needed --> for example, don't enforce
164 // that analytic Hessians require full Newton methods).
165 const String& grad_type = model.gradient_type();
166 const String& hess_type = model.hessian_type();
167 if (outputLevel >= VERBOSE_OUTPUT)
168 Cout << "Gradient type = " << grad_type << " Hessian type = " << hess_type
169 << '\n';
170 if ( grad_type == "none" && ( ( methodName & LEASTSQ_BIT ) ||
171 ( ( methodName & OPTIMIZER_BIT ) && methodName >= NONLINEAR_CG ) ) ) {
172 Cerr << "\nError: gradient-based minimizers require a gradient "
173 << "specification." << std::endl;
174 err_flag = true;
175 }
176 if ( hess_type != "none" && methodName != OPTPP_NEWTON )
177 Cerr << "\nWarning: Hessians are only utilized by full Newton methods.\n\n";
178 if ( ( grad_type != "none" || hess_type != "none") &&
179 ( ( methodName & OPTIMIZER_BIT ) && methodName < NONLINEAR_CG ) )
180 Cerr << "\nWarning: Gradient/Hessian specification for a nongradient-based "
181 << "optimizer is ignored.\n\n";
182 // TO DO: verify vendor finite differencing support
183 vendorNumericalGradFlag
184 = (grad_type == "numerical" && model.method_source() == "vendor");
185
186 numNonlinearIneqConstraints = model.num_nonlinear_ineq_constraints();
187 numNonlinearEqConstraints = model.num_nonlinear_eq_constraints();
188 numLinearIneqConstraints = model.num_linear_ineq_constraints();
189 numLinearEqConstraints = model.num_linear_eq_constraints();
190 numNonlinearConstraints = numNonlinearIneqConstraints
191 + numNonlinearEqConstraints;
192 numLinearConstraints = numLinearIneqConstraints + numLinearEqConstraints;
193 numConstraints = numNonlinearConstraints + numLinearConstraints;
194 numIterPrimaryFns = numUserPrimaryFns = model.num_primary_fns();
195 if (model.primary_fn_type() == CALIB_TERMS)
196 numTotalCalibTerms = numUserPrimaryFns; // default value
197
198 // TO DO: hard error if not supported; warning if promoted;
199 // quiet if natively supported
200
201
202 // Check for linear constraint support in method selection
203 // Include explicit checking for COLINOptimizer and SNLLOptimizer methods
204 // that are not representative of the respective majority
205 // (i.e. other COLINOptimizer or SNLLOptimizer methods)
206 if ( numLinearEqConstraints && (!traits()->supports_linear_equality() ||
207 ( methodName == OPTPP_CG || methodName == OPTPP_PDS ||
208 methodName == COLINY_SOLIS_WETS ))) {
209 Cerr << "\nError: linear equality constraints not currently supported by "
210 << method_enum_to_string(methodName) << ".\n Please select a "
211 << "different method." << std::endl;
212 err_flag = true;
213 }
214 if ( numLinearIneqConstraints && (!traits()->supports_linear_inequality() ||
215 ( methodName == OPTPP_CG || methodName == OPTPP_PDS ||
216 methodName == COLINY_SOLIS_WETS ))) {
217 Cerr << "\nError: linear inequality constraints not currently supported by "
218 << method_enum_to_string(methodName) << ".\n Please select a "
219 << "different method." << std::endl;
220 err_flag = true;
221 }
222
223 // Check for nonlinear constraint support in method selection
224 // Include explicit checking for SNLLOptimizer methods that are not
225 // representative of the majority (i.e. other SNLLOptimizer methods)
226 if ( numNonlinearEqConstraints && (!traits()->supports_nonlinear_equality() ||
227 ( methodName == OPTPP_CG || methodName == OPTPP_PDS))) {
228 Cerr << "\nError: nonlinear equality constraints not currently supported by "
229 << method_enum_to_string(methodName) << ".\n Please select a "
230 << "different method." << std::endl;
231 err_flag = true;
232 }
233 if ( numNonlinearIneqConstraints && (!traits()->supports_nonlinear_inequality() ||
234 ( methodName == OPTPP_CG || methodName == OPTPP_PDS))) {
235 Cerr << "\nError: nonlinear inequality constraints not currently supported by "
236 << method_enum_to_string(methodName) << ".\n Please select a "
237 << "different method." << std::endl;
238 err_flag = true;
239 }
240
241 if (err_flag)
242 abort_handler(-1);
243
244 // set boundConstraintFlag
245 size_t i;
246 const RealVector& c_l_bnds = model.continuous_lower_bounds();
247 const RealVector& c_u_bnds = model.continuous_upper_bounds();
248 //Cout << "Continuous lower bounds:\n" << c_l_bnds
249 // << "Continuous upper bounds:\n" << c_u_bnds;
250 for (i=0; i<numContinuousVars; ++i)
251 if (c_l_bnds[i] > -bigRealBoundSize || c_u_bnds[i] < bigRealBoundSize)
252 { boundConstraintFlag = true; break; }
253 bool discrete_bounds = (methodName == MOGA || methodName == SOGA ||
254 methodName == COLINY_EA);
255 if (discrete_bounds) {
256 const IntVector& di_l_bnds = model.discrete_int_lower_bounds();
257 const IntVector& di_u_bnds = model.discrete_int_upper_bounds();
258 const RealVector& dr_l_bnds = model.discrete_real_lower_bounds();
259 const RealVector& dr_u_bnds = model.discrete_real_upper_bounds();
260 for (i=0; i<numDiscreteIntVars; ++i)
261 if (di_l_bnds[i] > -bigIntBoundSize || di_u_bnds[i] < bigIntBoundSize)
262 { boundConstraintFlag = true; break; }
263 for (i=0; i<numDiscreteRealVars; ++i)
264 if (dr_l_bnds[i] > -bigRealBoundSize || dr_u_bnds[i] < bigRealBoundSize)
265 { boundConstraintFlag = true; break; }
266 }
267
268 // Configure the TPL data Transfer helper
269 dataTransferHandler.reset(new TPLDataTransfer());
270 dataTransferHandler->configure_data_adapters( methodTraits, model );
271 }
272
273
initialize_run()274 void Minimizer::initialize_run()
275 {
276 // Verify that iteratedModel is not null (default ctor and some
277 // NoDBBaseConstructor ctors leave iteratedModel uninitialized).
278 if (!iteratedModel.is_null()) {
279 // update context data that is outside scope of local DB specifications.
280 // This is needed for reused objects.
281 //iteratedModel.db_scope_reset(); // TO DO: need better name?
282
283 // This is to catch un-initialized models used by local iterators that
284 // are not called through IteratorScheduler::run_iterator(). Within a
285 // recursion, it will correspond to the first initialize_run() with an
286 // uninitialized mapping, such as the outer-iterator on the first pass
287 // of a recursion. On subsequent passes, it may correspond to the inner
288 // iterator. The Iterator scope should not matter for the iteratedModel
289 // mapping initialize/finalize.
290 if (!iteratedModel.mapping_initialized()) {
291 ParLevLIter pl_iter = methodPCIter->mi_parallel_level_iterator();
292 bool var_size_changed = iteratedModel.initialize_mapping(pl_iter);
293 if (var_size_changed)
294 /*bool reinit_comms =*/ resize(); // ignore return value
295 }
296
297 // Do not reset the evaluation reference for sub-iterators
298 // (previously managed via presence/absence of ostream)
299 //if (!subIteratorFlag)
300 if (summaryOutputFlag)
301 iteratedModel.set_evaluation_reference();
302 }
303
304 // Track any previous object instance in case of recursion. Note that
305 // optimizerInstance and minimizerInstance must be tracked separately since
306 // the previous optimizer and previous minimizer could be different instances
307 // (e.g., for MCUU with NL2SOL for NLS and NPSOL for MPP search, the previous
308 // minimizer is NL2SOL and the previous optimizer is NULL).
309 prevMinInstance = minimizerInstance;
310 minimizerInstance = this;
311
312 if (subIteratorFlag) {
313
314 // Catch any updates to all inactive vars. For now, do this in
315 // initialize because derived classes may elect to reimplement
316 // post_run. Needs to happen before derived solvers update their
317 // best points, so it doesn't trample their active variables data.
318
319 // Dive into the originally passed model (could keep a shallow copy of it)
320 // Don't use a reference here as want a shallow copy, not the instance
321 Model usermodel(iteratedModel);
322 for (unsigned short i=1; i<=myModelLayers; ++i) {
323 usermodel = usermodel.subordinate_model();
324 }
325
326 // Could be lighter weight, but don't have a way to update only inactive
327 bestVariablesArray.front().all_continuous_variables(
328 usermodel.all_continuous_variables());
329 bestVariablesArray.front().all_discrete_int_variables(
330 usermodel.all_discrete_int_variables());
331 bestVariablesArray.front().all_discrete_real_variables(
332 usermodel.all_discrete_real_variables());
333 }
334 }
335
336
post_run(std::ostream & s)337 void Minimizer::post_run(std::ostream& s)
338 {
339 archive_best_results();
340 if (summaryOutputFlag) {
341 // Print the function evaluation summary for all Iterators
342 if (!iteratedModel.is_null())
343 iteratedModel.print_evaluation_summary(s); // full hdr, relative counts
344
345 // The remaining final results output varies by iterator branch
346 print_results(s);
347 }
348 }
349
350
finalize_run()351 void Minimizer::finalize_run()
352 {
353 // Restore previous object instance in case of recursion.
354 minimizerInstance = prevMinInstance;
355
356 // Finalize an initialized mapping. This will correspond to the first
357 // finalize_run() with an uninitialized mapping, such as the inner-iterator
358 // in a recursion.
359 if (iteratedModel.mapping_initialized()) {
360 // paired to matching call to Model.initialize_mapping() in
361 // initialize_run() above
362 bool var_size_changed = iteratedModel.finalize_mapping();
363 if (var_size_changed)
364 /*bool reinit_comms =*/ resize(); // ignore return value
365 }
366
367 Iterator::finalize_run(); // included for completeness
368 }
369
370
original_model(unsigned short recasts_left) const371 Model Minimizer::original_model(unsigned short recasts_left) const
372 {
373 // Dive into the originally passed model (could keep a shallow copy of it)
374 // Don't use a reference here as want a shallow copy, not the instance
375 Model usermodel(iteratedModel);
376 for (unsigned short i=1; i<=myModelLayers - recasts_left; ++i) {
377 usermodel = usermodel.subordinate_model();
378 }
379
380 return(usermodel);
381 }
382
383
384 /** Reads observation data to compute least squares residuals. Does
385 not change size of responses, and is the first wrapper, therefore
386 sizes are based on iteratedModel. */
data_transform_model()387 void Minimizer::data_transform_model()
388 {
389 if (outputLevel >= DEBUG_OUTPUT)
390 Cout << "Initializing calibration data transformation" << std::endl;
391
392 // TODO: need better validation of these sizes and data with error msgs
393 numExperiments = probDescDB.get_sizet("responses.num_experiments");
394 if (numExperiments < 1) {
395 Cerr << "Error in number of experiments" << std::endl;
396 abort_handler(-1);
397 }
398 // TODO: verify: we don't want to weight by missing sigma: all = 1.0
399 expData.load_data("Least Squares");
400
401 iteratedModel.
402 assign_rep(std::make_shared<DataTransformModel>(iteratedModel, expData));
403 ++myModelLayers;
404 dataTransformModel = iteratedModel;
405
406 // update sizes in Iterator view from the RecastModel
407 numIterPrimaryFns = numTotalCalibTerms = iteratedModel.num_primary_fns();
408 numFunctions = iteratedModel.response_size();
409 if (outputLevel > NORMAL_OUTPUT)
410 Cout << "Adjusted number of calibration terms: " << numTotalCalibTerms
411 << std::endl;
412
413 // adjust active set vector to reflect new residual size
414 ShortArray asv(numFunctions, 1);
415 activeSet.request_vector(asv);
416
417 // TODO: review where should derivatives happen in data transform
418 // case: where derivatives are computed should be tied to whether
419 // interpolation is present
420
421 // Need to correct a flaw in doing this: estimate derivatives uses a
422 // mix of DB lookups and new evals, but the DB lookups aren't
423 // transformed!
424
425 // In the data transform case, perform numerical derivatives at the
426 // RecastModel level (override the RecastModel default and the
427 // subModel default)
428 // BMA: Disabled until debugged...
429 // iteratedModel.supports_derivative_estimation(true);
430 // RecastModel* recast_model_rep = (RecastModel*) iteratedModel.model_rep();
431 // recast_model_rep->submodel_supports_derivative_estimation(false);
432 }
433
434
435
436 /** Wrap the iteratedModel in a scaling transformation, such that
437 iteratedModel now contains a scaling recast model. Potentially
438 affects variables, primary, and secondary responses */
scale_model()439 void Minimizer::scale_model()
440 {
441 // iteratedModel becomes the sub-model of a RecastModel:
442 iteratedModel.assign_rep(std::make_shared<ScalingModel>(iteratedModel));
443 scalingModel = iteratedModel;
444 ++myModelLayers;
445
446 }
447
448
449 /** The composite objective computation sums up the contributions from
450 one of more primary functions using the primary response fn weights. */
451 Real Minimizer::
objective(const RealVector & fn_vals,const BoolDeque & max_sense,const RealVector & primary_wts) const452 objective(const RealVector& fn_vals, const BoolDeque& max_sense,
453 const RealVector& primary_wts) const
454 {
455 return objective(fn_vals, numUserPrimaryFns, max_sense, primary_wts);
456 }
457
458 /** This "composite" objective is a more general case of the previous
459 objective(), but doesn't presume a reduction map from user to
460 iterated space. Used to apply weights and sense in COLIN results
461 sorting. Leaving as a duplicate implementation pending resolution
462 of COLIN lookups. */
463 Real Minimizer::
objective(const RealVector & fn_vals,size_t num_fns,const BoolDeque & max_sense,const RealVector & primary_wts) const464 objective(const RealVector& fn_vals, size_t num_fns,
465 const BoolDeque& max_sense,
466 const RealVector& primary_wts) const
467 {
468 Real obj_fn = 0.0;
469 if (optimizationFlag) { // MOO
470 bool use_sense = !max_sense.empty();
471 if (primary_wts.empty()) {
472 for (size_t i=0; i<num_fns; ++i)
473 if (use_sense && max_sense[i]) obj_fn -= fn_vals[i];
474 else obj_fn += fn_vals[i];
475 if (num_fns > 1)
476 obj_fn /= (Real)num_fns; // default weight = 1/n
477 }
478 else {
479 for (size_t i=0; i<num_fns; ++i)
480 if (use_sense && max_sense[i]) obj_fn -= primary_wts[i] * fn_vals[i];
481 else obj_fn += primary_wts[i] * fn_vals[i];
482 }
483 }
484 else { // NLS
485 if (primary_wts.empty())
486 for (size_t i=0; i<num_fns; ++i)
487 obj_fn += std::pow(fn_vals[i], 2); // default weight = 1
488 else
489 for (size_t i=0; i<num_fns; ++i)
490 obj_fn += primary_wts[i] * std::pow(fn_vals[i], 2);
491 }
492 return obj_fn;
493 }
494
495 void Minimizer::
objective_gradient(const RealVector & fn_vals,const RealMatrix & fn_grads,const BoolDeque & max_sense,const RealVector & primary_wts,RealVector & obj_grad) const496 objective_gradient(const RealVector& fn_vals, const RealMatrix& fn_grads,
497 const BoolDeque& max_sense, const RealVector& primary_wts,
498 RealVector& obj_grad) const
499 {
500 objective_gradient(fn_vals, numUserPrimaryFns, fn_grads, max_sense,
501 primary_wts, obj_grad);
502 }
503
504 /** The composite objective gradient computation combines the
505 contributions from one of more primary function gradients,
506 including the effect of any primary function weights. In the case
507 of a linear mapping (MOO), only the primary function gradients are
508 required, but in the case of a nonlinear mapping (NLS), primary
509 function values are also needed. Within RecastModel::set_mapping(),
510 the active set requests are automatically augmented to make values
511 available when needed, based on nonlinearRespMapping settings. */
512 void Minimizer::
objective_gradient(const RealVector & fn_vals,size_t num_fns,const RealMatrix & fn_grads,const BoolDeque & max_sense,const RealVector & primary_wts,RealVector & obj_grad) const513 objective_gradient(const RealVector& fn_vals, size_t num_fns,
514 const RealMatrix& fn_grads, const BoolDeque& max_sense,
515 const RealVector& primary_wts, RealVector& obj_grad) const
516 {
517 if (obj_grad.length() != numContinuousVars)
518 obj_grad.sizeUninitialized(numContinuousVars);
519 obj_grad = 0.;
520 if (optimizationFlag) { // MOO
521 bool use_sense = !max_sense.empty();
522 if (primary_wts.empty()) {
523 for (size_t i=0; i<num_fns; ++i) {
524 const Real* fn_grad_i = fn_grads[i];
525 if (use_sense && max_sense[i])
526 for (size_t j=0; j<numContinuousVars; ++j)
527 obj_grad[j] -= fn_grad_i[j];
528 else
529 for (size_t j=0; j<numContinuousVars; ++j)
530 obj_grad[j] += fn_grad_i[j];
531 }
532 if (num_fns > 1)
533 obj_grad.scale(1./(Real)num_fns); // default weight = 1/n
534 }
535 else {
536 for (size_t i=0; i<num_fns; ++i) {
537 const Real& wt_i = primary_wts[i];
538 const Real* fn_grad_i = fn_grads[i];
539 if (use_sense && max_sense[i])
540 for (size_t j=0; j<numContinuousVars; ++j)
541 obj_grad[j] -= wt_i * fn_grad_i[j];
542 else
543 for (size_t j=0; j<numContinuousVars; ++j)
544 obj_grad[j] += wt_i * fn_grad_i[j];
545 }
546 }
547 }
548 else { // NLS
549 for (size_t i=0; i<num_fns; ++i) {
550 Real wt_2_fn_val = 2. * fn_vals[i]; // default weight = 1
551 if (!primary_wts.empty()) wt_2_fn_val *= primary_wts[i];
552 const Real* fn_grad_i = fn_grads[i];
553 for (size_t j=0; j<numContinuousVars; ++j)
554 obj_grad[j] += wt_2_fn_val * fn_grad_i[j];
555 }
556 }
557 }
558
559 void Minimizer::
objective_hessian(const RealVector & fn_vals,const RealMatrix & fn_grads,const RealSymMatrixArray & fn_hessians,const BoolDeque & max_sense,const RealVector & primary_wts,RealSymMatrix & obj_hess) const560 objective_hessian(const RealVector& fn_vals, const RealMatrix& fn_grads,
561 const RealSymMatrixArray& fn_hessians,
562 const BoolDeque& max_sense, const RealVector& primary_wts,
563 RealSymMatrix& obj_hess) const
564 {
565 objective_hessian(fn_vals, numUserPrimaryFns, fn_grads, fn_hessians,
566 max_sense, primary_wts, obj_hess);
567 }
568
569
570 /** The composite objective Hessian computation combines the
571 contributions from one of more primary function Hessians,
572 including the effect of any primary function weights. In the case
573 of a linear mapping (MOO), only the primary function Hessians are
574 required, but in the case of a nonlinear mapping (NLS), primary
575 function values and gradients are also needed in general
576 (gradients only in the case of a Gauss-Newton approximation).
577 Within the default RecastModel::set_mapping(), the active set
578 requests are automatically augmented to make values and gradients
579 available when needed, based on nonlinearRespMapping settings. */
580 void Minimizer::
objective_hessian(const RealVector & fn_vals,size_t num_fns,const RealMatrix & fn_grads,const RealSymMatrixArray & fn_hessians,const BoolDeque & max_sense,const RealVector & primary_wts,RealSymMatrix & obj_hess) const581 objective_hessian(const RealVector& fn_vals, size_t num_fns,
582 const RealMatrix& fn_grads,
583 const RealSymMatrixArray& fn_hessians,
584 const BoolDeque& max_sense, const RealVector& primary_wts,
585 RealSymMatrix& obj_hess) const
586 {
587 if (obj_hess.numRows() != numContinuousVars)
588 obj_hess.shapeUninitialized(numContinuousVars);
589 obj_hess = 0.;
590 size_t i, j, k;
591 if (optimizationFlag) { // MOO
592 bool use_sense = !max_sense.empty();
593 if (primary_wts.empty()) {
594 for (i=0; i<num_fns; ++i) {
595 const RealSymMatrix& fn_hess_i = fn_hessians[i];
596 if (use_sense && max_sense[i])
597 for (j=0; j<numContinuousVars; ++j)
598 for (k=0; k<=j; ++k)
599 obj_hess(j,k) -= fn_hess_i(j,k);
600 else
601 for (j=0; j<numContinuousVars; ++j)
602 for (k=0; k<=j; ++k)
603 obj_hess(j,k) += fn_hess_i(j,k);
604 }
605 if (num_fns > 1)
606 obj_hess *= 1./(Real)num_fns; // default weight = 1/n
607 }
608 else
609 for (i=0; i<num_fns; ++i) {
610 const RealSymMatrix& fn_hess_i = fn_hessians[i];
611 const Real& wt_i = primary_wts[i];
612 if (use_sense && max_sense[i])
613 for (j=0; j<numContinuousVars; ++j)
614 for (k=0; k<=j; ++k)
615 obj_hess(j,k) -= wt_i * fn_hess_i(j,k);
616 else
617 for (j=0; j<numContinuousVars; ++j)
618 for (k=0; k<=j; ++k)
619 obj_hess(j,k) += wt_i * fn_hess_i(j,k);
620 }
621 }
622 else { // NLS
623 if (fn_grads.empty()) {
624 Cerr << "Error: Hessian reduction for NLS requires a minimum of least "
625 << "squares gradients (for Gauss-Newton)." << std::endl;
626 abort_handler(-1);
627 }
628 // Gauss_Newton approx Hessian = J^T J (neglect f*H term)
629 if (fn_hessians.empty() || fn_vals.empty()) {
630 if (primary_wts.empty())
631 for (j=0; j<numContinuousVars; ++j)
632 for (k=0; k<=j; ++k) {
633 Real& sum = obj_hess(j,k); sum = 0.;
634 for (i=0; i<num_fns; ++i)
635 sum += fn_grads(j,i) * fn_grads(k,i); // default weight = 1
636 sum *= 2.;
637 }
638 else
639 for (j=0; j<numContinuousVars; ++j)
640 for (k=0; k<=j; ++k) {
641 Real& sum = obj_hess(j,k); sum = 0.;
642 for (i=0; i<num_fns; ++i)
643 sum += primary_wts[i] * fn_grads(j,i) * fn_grads(k,i);
644 sum *= 2.;
645 }
646 }
647 // full Hessian = f H + J^T J
648 else {
649 if (primary_wts.empty())
650 for (j=0; j<numContinuousVars; ++j)
651 for (k=0; k<=j; ++k) {
652 Real& sum = obj_hess(j,k); sum = 0.;
653 for (i=0; i<num_fns; ++i)
654 sum += fn_grads(j,i) * fn_grads(k,i) +
655 fn_vals[i] * fn_hessians[i](j,k);
656 sum *= 2.;
657 }
658 else
659 for (j=0; j<numContinuousVars; ++j)
660 for (k=0; k<=j; ++k) {
661 Real& sum = obj_hess(j,k); sum = 0.;
662 for (i=0; i<num_fns; ++i)
663 sum += primary_wts[i] * (fn_grads(j,i) * fn_grads(k,i) +
664 fn_vals[i] * fn_hessians[i](j,k));
665 sum *= 2.;
666 }
667 }
668 }
669 }
670
671
archive_best_variables(const bool active_only) const672 void Minimizer::archive_best_variables(const bool active_only) const {
673 if(!resultsDB.active()) return;
674 // archive the best point in the iterator database
675 const StrStrSizet &iterator_id = run_identifier();
676 const size_t num_points = bestVariablesArray.size();
677
678
679 const auto & cv_labels = (active_only) ?
680 variables_results().continuous_variable_labels() :
681 variables_results().all_continuous_variable_labels();
682 const auto & div_labels = (active_only) ?
683 variables_results().discrete_int_variable_labels() :
684 variables_results().all_discrete_int_variable_labels();
685 const auto & dsv_labels = (active_only) ?
686 variables_results().discrete_string_variable_labels() :
687 variables_results().all_discrete_string_variable_labels();
688 const auto & drv_labels = (active_only) ?
689 variables_results().discrete_real_variable_labels() :
690 variables_results().all_discrete_real_variable_labels();
691 // ## legacy text output ##
692 if(numContinuousVars) {
693 // labels
694 resultsDB.insert
695 (iterator_id, resultsNames.cv_labels, cv_labels);
696 // best variables, with labels in metadata
697 MetaDataType md;
698 md["Array Spans"] = make_metadatavalue("Best Sets");
699 md["Row Labels"] =
700 make_metadatavalue(cv_labels);
701 resultsDB.array_allocate<RealVector>
702 (iterator_id, resultsNames.best_cv, num_points, md);
703 }
704 if (numDiscreteIntVars) {
705 // labels
706 resultsDB.insert
707 (iterator_id, resultsNames.div_labels, div_labels);
708 // best variables, with labels in metadata
709 MetaDataType md;
710 md["Array Spans"] = make_metadatavalue("Best Sets");
711 md["Row Labels"] =
712 make_metadatavalue(div_labels);
713 resultsDB.array_allocate<IntVector>
714 (iterator_id, resultsNames.best_div, num_points, md);
715 }
716 if (numDiscreteStringVars) {
717 // labels
718 resultsDB.insert
719 (iterator_id, resultsNames.dsv_labels, dsv_labels);
720 // best variables, with labels in metadata
721 MetaDataType md;
722 md["Array Spans"] = make_metadatavalue("Best Sets");
723 md["Row Labels"] =
724 make_metadatavalue(dsv_labels);
725 resultsDB.array_allocate<StringArray>
726 (iterator_id, resultsNames.best_dsv, num_points, md);
727 }
728 if (numDiscreteRealVars) {
729 // labels
730 resultsDB.insert
731 (iterator_id, resultsNames.drv_labels, drv_labels);
732 // best variables, with labels in metadata
733 MetaDataType md;
734 md["Array Spans"] = make_metadatavalue("Best Sets");
735 md["Row Labels"] =
736 make_metadatavalue(drv_labels);
737 resultsDB.array_allocate<RealVector>
738 (iterator_id, resultsNames.best_drv, num_points, md);
739 }
740
741 DimScaleMap cv_scale;
742 cv_scale.emplace(0, StringScale("variables", cv_labels));
743 DimScaleMap div_scale;
744 div_scale.emplace(0, StringScale("variables", div_labels));
745 DimScaleMap dsv_scale;
746 dsv_scale.emplace(0, StringScale("variables", dsv_labels));
747 DimScaleMap drv_scale;
748 drv_scale.emplace(0, StringScale("variables", drv_labels));
749
750 StringArray location;
751 size_t r_index = 1; // index in location of variable type (e.g. "continuous")
752 if(num_points > 1) {
753 location.push_back("");
754 r_index = 2;
755 }
756 location.push_back("best_parameters");
757 location.push_back("");
758 size_t point_index = 0;
759 for(const auto & best_vars : bestVariablesArray) {
760 if(num_points > 1)
761 location[0] = String("set:") + std::to_string(point_index+1);
762 if (numContinuousVars) {
763 // coreDB backend which will likely be removed in the future - RWH
764 resultsDB.array_insert<RealVector>
765 (run_identifier(), resultsNames.best_cv, point_index,
766 best_vars.continuous_variables());
767
768 // hdf5DB backend
769 location[r_index] = "continuous";
770 if(active_only)
771 resultsDB.insert(iterator_id, location, best_vars.continuous_variables(), cv_scale);
772 else
773 resultsDB.insert(iterator_id, location, best_vars.all_continuous_variables(), cv_scale);
774 }
775
776 if (numDiscreteIntVars) {
777 // coreDB backend which will likely be removed in the future - RWH
778 resultsDB.array_insert<IntVector>
779 (run_identifier(), resultsNames.best_div, point_index,
780 best_vars.discrete_int_variables());
781
782 // hdf5DB backend
783 location[r_index] = "discrete_integer";
784 if(active_only)
785 resultsDB.insert(iterator_id, location, best_vars.discrete_int_variables(), div_scale);
786 else
787 resultsDB.insert(iterator_id, location, best_vars.all_discrete_int_variables(), div_scale);
788 }
789
790 if (numDiscreteStringVars) {
791 // coreDB backend which will likely be removed in the future - RWH
792 resultsDB.array_insert<StringArray>
793 (run_identifier(), resultsNames.best_dsv, point_index,
794 best_vars.discrete_string_variables());
795
796 // hdf5DB backend
797 location[r_index] = "discrete_string";
798 if(active_only)
799 resultsDB.insert(iterator_id, location, best_vars.discrete_string_variables(), dsv_scale);
800 else
801 resultsDB.insert(iterator_id, location, best_vars.all_discrete_string_variables(), dsv_scale);
802 }
803
804 if (numDiscreteRealVars) {
805 // coreDB backend which will likely be removed in the future - RWH
806 resultsDB.array_insert<RealVector>
807 (run_identifier(), resultsNames.best_drv, point_index,
808 best_vars.discrete_real_variables());
809
810 // hdf5DB backend
811 location[r_index] = "discrete_real";
812 if(active_only)
813 resultsDB.insert(iterator_id, location, best_vars.discrete_real_variables(), drv_scale);
814 else
815 resultsDB.insert(iterator_id, location, best_vars.all_discrete_real_variables(), drv_scale);
816 }
817 point_index++;
818 }
819 }
820
821
822 void Minimizer::
archive_best_objective_functions() const823 archive_best_objective_functions() const
824 {
825
826 const size_t num_points = bestResponseArray.size();
827 StrStrSizet iterator_id = run_identifier();
828 // ## legacy text output ##
829 // labels
830 resultsDB.insert(iterator_id, resultsNames.fn_labels,
831 response_results().function_labels());
832 // best functions, with labels in metadata
833 MetaDataType md;
834 md["Array Spans"] = make_metadatavalue("Best Sets");
835 md["Row Labels"] =
836 make_metadatavalue(response_results().function_labels());
837 resultsDB.array_allocate<RealVector>
838 (iterator_id, resultsNames.best_fns, num_points, md);
839
840 size_t point_index = 0;
841 StringArray location;
842 if(num_points > 1)
843 location.push_back("");
844 location.push_back("best_objective_functions");
845 DimScaleMap scale;
846 scale.emplace(0, StringScale("responses", response_results().function_labels()));
847 for(const auto & best_resp : bestResponseArray) {
848 if(num_points > 1)
849 location[0] = String("set:") + std::to_string(point_index + 1);
850 // coreDB-based API Results output
851 resultsDB.array_insert<RealVector>
852 (iterator_id, resultsNames.best_fns, point_index, best_resp.function_values());
853
854 // hdf5DB-based API Results output
855 const RealVector &fvals = best_resp.function_values();
856 Teuchos::SerialDenseVector<int, Real> primary(Teuchos::View, const_cast<Real*>(&fvals[0]), numUserPrimaryFns);
857 resultsDB.insert(iterator_id,location, primary, scale);
858 point_index++;
859 }
860 }
861
862 /// Archive residuals when calibration terms are used
863 void Minimizer::
archive_best_residuals() const864 archive_best_residuals() const {
865 if(!resultsDB.active()) return;
866
867 const RealVector& lsq_weights
868 = original_model().primary_response_fn_weights();
869 const StrStrSizet &iterator_id = run_identifier();
870 size_t num_points = bestResponseArray.size();
871
872 // ## legacy text output ##
873 // labels
874 resultsDB.insert(iterator_id, resultsNames.fn_labels,
875 response_results().function_labels());
876 // best functions, with labels in metadata
877 MetaDataType md;
878 md["Array Spans"] = make_metadatavalue("Best Sets");
879 md["Row Labels"] =
880 make_metadatavalue(response_results().function_labels());
881 resultsDB.array_allocate<RealVector>
882 (iterator_id, resultsNames.best_fns, num_points, md);
883
884 // HDF5 setup
885 StringArray residuals_location;
886 StringArray norm_location;
887 if(num_points > 1) {
888 residuals_location.push_back("");
889 norm_location.push_back("");
890 }
891 residuals_location.push_back("best_residuals");
892 norm_location.push_back("best_norm");
893 size_t point_index = 0;
894 for(const auto &resp : bestResponseArray) {
895 if(num_points > 1) {
896 String set_string = String("set:") + std::to_string(point_index + 1);
897 residuals_location[0] = set_string;
898 norm_location[0] = set_string;
899 }
900 const RealVector &best_terms = resp.function_values();
901 Real wssr = std::sqrt(sum_squared_residuals(numUserPrimaryFns, best_terms, lsq_weights));
902 Teuchos::SerialDenseVector<int, Real> residuals(Teuchos::View,
903 const_cast<Real*>(&best_terms[0]),
904 numUserPrimaryFns);
905 resultsDB.insert(iterator_id, residuals_location, residuals);
906 resultsDB.insert(iterator_id, norm_location, wssr);
907 // coreDB
908 resultsDB.array_insert<RealVector>
909 (iterator_id, resultsNames.best_fns, point_index, resp.function_values());
910 point_index++;
911 }
912 }
913
914 void Minimizer::
archive_best_constraints() const915 archive_best_constraints() const {
916 if(!resultsDB.active() || !numNonlinearConstraints) return;
917 const size_t num_points = bestResponseArray.size();
918 StrStrSizet iterator_id = run_identifier();
919 StringArray location;
920 if(num_points > 1)
921 location.push_back("");
922 location.push_back("best_constraints");
923 DimScaleMap scales;
924 scales.emplace(0, StringScale(String("nonlinear_constraints"),
925 response_results().function_labels(), numUserPrimaryFns, numNonlinearConstraints));
926 size_t point_index = 0;
927 for(const auto & best_resp : bestResponseArray) {
928 if(num_points > 1)
929 location[0] = String("set:") + std::to_string(point_index+1);
930 const RealVector &fvals = best_resp.function_values();
931 Teuchos::SerialDenseVector<int, Real> secondary(Teuchos::View,
932 const_cast<Real*>(&fvals[numUserPrimaryFns]),
933 numNonlinearConstraints);
934 resultsDB.insert(iterator_id, location, secondary, scales);
935 point_index++;
936 }
937 }
938
939 /** Uses data from the innermost model, should any Minimizer recasts be active.
940 Called by multipoint return solvers. Do not directly call resize on the
941 bestVariablesArray object unless you intend to share the internal content
942 (letter) with other objects after assignment. */
resize_best_vars_array(size_t newsize)943 void Minimizer::resize_best_vars_array(size_t newsize)
944 {
945 size_t curr_size = bestVariablesArray.size();
946
947 if(newsize < curr_size) {
948 // If reduction in size, use the standard resize
949 bestVariablesArray.resize(newsize);
950 }
951 else if(newsize > curr_size) {
952
953 // Otherwise, we have to do the iteration ourselves so that we make use
954 // of the model's current variables for envelope-letter requirements.
955
956 // Best point arrays have be sized and scaled in the original user space.
957 Model usermodel = original_model();
958 bestVariablesArray.reserve(newsize);
959 for(size_t i=curr_size; i<newsize; ++i)
960 bestVariablesArray.push_back(usermodel.current_variables().copy());
961 }
962 // else no size change
963
964 }
965
966 /** Uses data from the innermost model, should any Minimizer recasts be active.
967 Called by multipoint return solvers. Do not directly call resize on the
968 bestResponseArray object unless you intend to share the internal content
969 (letter) with other objects after assignment. */
resize_best_resp_array(size_t newsize)970 void Minimizer::resize_best_resp_array(size_t newsize)
971 {
972 size_t curr_size = bestResponseArray.size();
973
974 if (newsize < curr_size) {
975 // If reduction in size, use the standard resize
976 bestResponseArray.resize(newsize);
977 }
978 else if (newsize > curr_size) {
979
980 // Otherwise, we have to do the iteration ourselves so that we make use
981 // of the model's current response for envelope-letter requirements.
982
983 // Best point arrays have be sized and scaled in the original user space.
984 Model usermodel = original_model();
985 bestResponseArray.reserve(newsize);
986 for(size_t i=curr_size; i<newsize; ++i)
987 bestResponseArray.push_back(usermodel.current_response().copy());
988 }
989 // else no size change
990 }
991
992
993 Real Minimizer::
sum_squared_residuals(size_t num_pri_fns,const RealVector & residuals,const RealVector & weights)994 sum_squared_residuals(size_t num_pri_fns, const RealVector& residuals,
995 const RealVector& weights)
996 {
997 if (!weights.empty() && num_pri_fns != weights.length()) {
998 Cerr << "\nError (sum_squared_residuals): incompatible residual and weight "
999 << "lengths." << std::endl;
1000 abort_handler(-1);
1001 }
1002
1003 // TODO: Just call reduce/objective on the data?
1004 Real t = 0.;
1005 for(size_t j=0; j<num_pri_fns; ++j) {
1006 const Real& t1 = residuals[j];
1007 if (weights.empty())
1008 t += t1*t1;
1009 else
1010 t += t1*t1*weights[j];
1011 }
1012
1013 return t;
1014 }
1015
1016
print_model_resp(size_t num_pri_fns,const RealVector & best_fns,size_t num_best,size_t best_index,std::ostream & s)1017 void Minimizer::print_model_resp(size_t num_pri_fns, const RealVector& best_fns,
1018 size_t num_best, size_t best_index,
1019 std::ostream& s)
1020 {
1021 if (num_pri_fns > 1) s << "<<<<< Best model responses ";
1022 else s << "<<<<< Best model response ";
1023 if (num_best > 1) s << "(set " << best_index+1 << ") "; s << "=\n";
1024 write_data_partial(s, (size_t)0, num_pri_fns, best_fns);
1025 }
1026
1027
print_residuals(size_t num_terms,const RealVector & best_terms,const RealVector & weights,size_t num_best,size_t best_index,std::ostream & s)1028 void Minimizer::print_residuals(size_t num_terms, const RealVector& best_terms,
1029 const RealVector& weights,
1030 size_t num_best, size_t best_index,
1031 std::ostream& s)
1032 {
1033 // Print best response functions
1034 if (num_terms > 1) s << "<<<<< Best residual terms ";
1035 else s << "<<<<< Best residual term ";
1036 if (num_best > 1) s << "(set " << best_index+1 << ") "; s << "=\n";
1037 write_data_partial(s, (size_t)0, num_terms, best_terms);
1038
1039 // BMA TODO: if data and scaling are present, this won't print
1040 // correct weighted residual norms
1041
1042 Real wssr = sum_squared_residuals(num_terms, best_terms, weights);
1043
1044 s << "<<<<< Best residual norm ";
1045 if (num_best > 1) s << "(set " << best_index+1 << ") ";
1046 s << "= " << std::setw(write_precision+7)
1047 << std::sqrt(wssr) << "; 0.5 * norm^2 = "
1048 << std::setw(write_precision+7) << 0.5*wssr << '\n';
1049 }
1050
archive_best_results()1051 void Minimizer::archive_best_results() {
1052
1053 if(!resultsDB.active()) return;
1054 size_t i, num_best = bestVariablesArray.size();
1055 if (num_best != bestResponseArray.size()) {
1056 Cerr << "\nError: mismatch in lengths of bestVariables and bestResponses."
1057 << std::endl;
1058 abort_handler(-1);
1059 }
1060
1061 StrStrSizet iterator_id = run_identifier();
1062 // must search in the inbound Model's space (and even that may not
1063 // suffice if there are additional recastings underlying this
1064 // Optimizer's Model) to find the function evaluation ID number
1065 Model orig_model = original_model();
1066 const String& interface_id = orig_model.interface_id();
1067 // use asv = 1's
1068 ActiveSet search_set(orig_model.response_size(), numContinuousVars);
1069 int eval_id;
1070
1071 if(numNonlinearConstraints)
1072 archive_best_constraints();
1073 if(optimizationFlag) {
1074 archive_best_objective_functions();
1075 archive_best_variables();
1076 } else if(!calibrationDataFlag) {
1077 // the original model had least squares terms
1078 archive_best_residuals();
1079 archive_best_variables();
1080 } else { //calibration with data
1081 std::shared_ptr<DataTransformModel> dt_model_rep =
1082 std::static_pointer_cast<DataTransformModel>
1083 (dataTransformModel.model_rep());
1084 if(dt_model_rep->num_config_vars())
1085 archive_best_variables(true);
1086 else
1087 archive_best_variables();
1088 for (i=0; i<num_best; ++i) {
1089 const Variables& best_vars = bestVariablesArray[i];
1090 // output best response
1091 const RealVector& best_fns = bestResponseArray[i].function_values();
1092 if (!optimizationFlag && calibrationDataFlag) {
1093 dt_model_rep->archive_best_responses(resultsDB, iterator_id,
1094 best_vars, bestResponseArray[i],
1095 num_best, i);
1096 }
1097 }
1098 }
1099 // Associate evaluation ids as metadata
1100 String set_string = "set:";
1101 StringArray location(1);
1102 for(i=0; i < num_best; ++i) {
1103 // lookup evaluation id where best occurred. This cannot be catalogued
1104 // directly because the optimizers track the best iterate internally and
1105 // return the best results after iteration completion. Therfore, perform a
1106 // search in data_pairs to extract the evalId for the best fn eval.
1107 const Variables& best_vars = bestVariablesArray[i];
1108 PRPCacheHIter cache_it = lookup_by_val(data_pairs, interface_id,
1109 best_vars, search_set);
1110 if (cache_it == data_pairs.get<hashed>().end())
1111 eval_id = 0;
1112 else
1113 eval_id = cache_it->eval_id();
1114 AttributeArray attrs = {ResultAttribute<int>("evaluation_id", eval_id)};
1115 if(num_best > 1) {
1116 location[0] = set_string+std::to_string(i+1);
1117 resultsDB.add_metadata_to_object(iterator_id,location, attrs);
1118 } else
1119 resultsDB.add_metadata_to_execution(iterator_id, attrs);
1120 }
1121 }
1122
1123 /** Retrieve a MOO/NLS response based on the data returned by a single
1124 objective optimizer by performing a data_pairs search. This may
1125 get called even for a single user-specified function, since we may
1126 be recasting a single NLS residual into a squared
1127 objective. Always returns best data in the space of the original
1128 inbound Model. */
1129 void Minimizer::
local_recast_retrieve(const Variables & vars,Response & response) const1130 local_recast_retrieve(const Variables& vars, Response& response) const
1131 {
1132 // TODO: could omit constraints for solvers populating them (there
1133 // may not exist a single DB eval with both functions, constraints)
1134 ActiveSet lookup_set(response.active_set());
1135 PRPCacheHIter cache_it
1136 = lookup_by_val(data_pairs, iteratedModel.interface_id(), vars, lookup_set);
1137 if (cache_it == data_pairs.get<hashed>().end())
1138 Cerr << "Warning: failure in recovery of final values for locally recast "
1139 << "optimization." << std::endl;
1140 else
1141 response.update(cache_it->response());
1142 }
1143
1144
1145 } // namespace Dakota
1146