1 /*  _______________________________________________________________________
2 
3     DAKOTA: Design Analysis Kit for Optimization and Terascale Applications
4     Copyright 2014-2020 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
5     This software is distributed under the GNU Lesser General Public License.
6     For more information, see the README file in the top Dakota directory.
7     _______________________________________________________________________ */
8 
9 //- Class:        Model
10 //- Description:  Class implementation
11 //- Owner:        Mike Eldred
12 
13 #include "dakota_system_defs.hpp"
14 #include "DakotaModel.hpp"
15 #include "ParamResponsePair.hpp"
16 #include "PRPMultiIndex.hpp"
17 #include "ParallelLibrary.hpp"
18 #include "ProblemDescDB.hpp"
19 #include "SimulationModel.hpp"
20 #include "NestedModel.hpp"
21 #include "DataFitSurrModel.hpp"
22 #include "HierarchSurrModel.hpp"
23 #include "ActiveSubspaceModel.hpp"
24 #include "AdaptedBasisModel.hpp"
25 #include "RandomFieldModel.hpp"
26 #include "MarginalsCorrDistribution.hpp"
27 #include "DakotaGraphics.hpp"
28 #include "pecos_stat_util.hpp"
29 #include "EvaluationStore.hpp"
30 
31 static const char rcsId[]="@(#) $Id: DakotaModel.cpp 7029 2010-10-22 00:17:02Z mseldre $";
32 
33 
34 namespace Dakota
35 {
36 extern PRPCache        data_pairs;
37 extern ParallelLibrary dummy_lib;       // defined in dakota_global_defs.cpp
38 extern ProblemDescDB   dummy_db;        // defined in dakota_global_defs.cpp
39 extern EvaluationStore evaluation_store_db; // defined in dakota_global_defs.cpp
40 
41 // These globals defined here rather than in dakota_global_defs.cpp in order to
42 // minimize dakota_restart_util object file dependencies
43 Interface dummy_interface; ///< dummy Interface object used for mandatory
44                            ///< reference initialization or default virtual
45                            ///< function return by reference when a real
46                            ///< Interface instance is unavailable
47 Model     dummy_model;     ///< dummy Model object used for mandatory reference
48                            ///< initialization or default virtual function
49                            ///< return by reference when a real Model instance
50                            ///< is unavailable
51 Iterator  dummy_iterator;  ///< dummy Iterator object used for mandatory
52                            ///< reference initialization or default virtual
53                            ///< function return by reference when a real
54                            ///< Iterator instance is unavailable
55 
56 // Initialization of static model ID counters
57 size_t Model::noSpecIdNum = 0;
58 
59 
60 /** This constructor builds the base class data for all inherited
61     models.  get_model() instantiates a derived class and the derived
62     class selects this base class constructor in its initialization
63     list (to avoid the recursion of the base class constructor calling
64     get_model() again).  Since the letter IS the representation, its
65     representation pointer is set to NULL. */
Model(BaseConstructor,ProblemDescDB & problem_db)66 Model::Model(BaseConstructor, ProblemDescDB& problem_db):
67   currentVariables(problem_db.get_variables()),
68   numDerivVars(currentVariables.cv()),
69   currentResponse(
70     problem_db.get_response(SIMULATION_RESPONSE, currentVariables)),
71   numFns(currentResponse.num_functions()),
72   userDefinedConstraints(problem_db, currentVariables.shared_data()),
73   evaluationsDB(evaluation_store_db),
74   modelType(problem_db.get_string("model.type")),
75   surrogateType(problem_db.get_string("model.surrogate.type")),
76   gradientType(problem_db.get_string("responses.gradient_type")),
77   methodSource(problem_db.get_string("responses.method_source")),
78   intervalType(problem_db.get_string("responses.interval_type")),
79   fdGradStepSize(problem_db.get_rv("responses.fd_gradient_step_size")),
80   fdGradStepType(problem_db.get_string("responses.fd_gradient_step_type")),
81   gradIdAnalytic(problem_db.get_is("responses.gradients.mixed.id_analytic")),
82   gradIdNumerical(problem_db.get_is("responses.gradients.mixed.id_numerical")),
83   hessianType(problem_db.get_string("responses.hessian_type")),
84   quasiHessType(problem_db.get_string("responses.quasi_hessian_type")),
85   fdHessByFnStepSize(problem_db.get_rv("responses.fd_hessian_step_size")),
86   fdHessByGradStepSize(problem_db.get_rv("responses.fd_hessian_step_size")),
87   fdHessStepType(problem_db.get_string("responses.fd_hessian_step_type")),
88   hessIdAnalytic(problem_db.get_is("responses.hessians.mixed.id_analytic")),
89   hessIdNumerical(problem_db.get_is("responses.hessians.mixed.id_numerical")),
90   hessIdQuasi(problem_db.get_is("responses.hessians.mixed.id_quasi")),
91   warmStartFlag(false), supportsEstimDerivs(true), mappingInitialized(false),
92   probDescDB(problem_db), parallelLib(problem_db.parallel_library()),
93   modelPCIter(parallelLib.parallel_configuration_iterator()),
94   componentParallelMode(NO_PARALLEL_MODE), asynchEvalFlag(false),
95   evaluationCapacity(1),
96   // See base constructor in DakotaIterator.cpp for full discussion of output
97   // verbosity.  For models, QUIET_OUTPUT turns off response reporting and
98   // SILENT_OUTPUT additionally turns off fd_gradient parameter set reporting.
99   outputLevel(problem_db.get_short("method.output")),
100   primaryRespFnWts(probDescDB.get_rv("responses.primary_response_fn_weights")),
101   hierarchicalTagging(probDescDB.get_bool("model.hierarchical_tags")),
102   scalingOpts(problem_db, currentResponse.shared_data()),
103   modelEvaluationsDBState(EvaluationsDBState::UNINITIALIZED),
104   interfEvaluationsDBState(EvaluationsDBState::UNINITIALIZED),
105   modelId(problem_db.get_string("model.id")), modelEvalCntr(0),
106   estDerivsFlag(false), initCommsBcastFlag(false), modelAutoGraphicsFlag(false),
107   prevDSIView(EMPTY_VIEW), prevDSSView(EMPTY_VIEW), prevDSRView(EMPTY_VIEW)
108 {
109   // weights have length group if given; expand if fields present
110   expand_for_fields_sdv(currentResponse.shared_data(),
111 			probDescDB.get_rv("responses.primary_response_fn_weights"),
112 			"primary response weights", false, primaryRespFnWts);
113 
114   initialize_distribution(mvDist);
115   initialize_distribution_parameters(mvDist);
116 
117   if (modelId.empty())
118     modelId = user_auto_id();
119 
120   // TODO: Latent bug here as sense will be size 1 or group (must acct for fields)
121   // Define primaryRespFnSense BoolDeque from DB StringArray
122   StringArray db_sense
123     = problem_db.get_sa("responses.primary_response_fn_sense");
124   if (!db_sense.empty()) {
125     size_t i, num_sense = db_sense.size(), num_primary = num_primary_fns();
126     primaryRespFnSense.resize(num_primary);
127     if (num_sense == num_primary)
128       for (i=0; i<num_primary; ++i)
129 	primaryRespFnSense[i] = strbegins(strtolower(db_sense[i]), "max");
130     else if (num_sense == 1)
131       primaryRespFnSense.assign(num_primary,
132 				strbegins(strtolower(db_sense[0]), "max"));
133     else {
134       Cerr << "Error: wrong length in sense array.  Expected 0, 1, or "
135 	   << num_primary << " but saw " << num_sense << "." << std::endl;
136       abort_handler(MODEL_ERROR);
137     }
138   }
139 
140   // Promote fdGradStepSize/fdHessByFnStepSize/fdHessByGradStepSize to defaults
141   // if needed.  Note: the fdStepSize arrays specialize by variable, whereas
142   // mixed grads/Hessians specialize by function.
143   if ( gradientType == "numerical" ||
144        ( gradientType == "mixed" && !gradIdNumerical.empty() ) ) {
145     if (fdGradStepSize.empty()) {
146       fdGradStepSize.resize(1);
147       fdGradStepSize[0] = 0.001;
148     }
149   }
150   if ( hessianType == "numerical" ||
151        ( hessianType == "mixed" && !hessIdNumerical.empty() ) ) {
152     // fdHessByFnStepSize and fdHessByGradStepSize can only differ currently
153     // in the case of assignment of default values, since the same
154     // fd_hessian_step_size input is reused for both first- and second-order
155     // differencing.  If needed in the future (numerical Hessians with mixed
156     // gradients require both first- and second-order step sizes), separate
157     // inputs could be added and easily accomodated here.
158     if (fdHessByFnStepSize.empty()) {
159       fdHessByFnStepSize.resize(1);
160       fdHessByFnStepSize[0] = 0.002;
161     }
162     if (fdHessByGradStepSize.empty()) {
163       fdHessByGradStepSize.resize(1);
164       fdHessByGradStepSize[0] = 0.001;
165     }
166   }
167 
168   /*
169   // Populate gradient/Hessian attributes for use within the iterator hierarchy.
170   // Note: the fd step size arrays specialize by variable, whereas the mixed
171   // grads/Hessians specialize by function.
172   if (outputLevel >= VERBOSE_OUTPUT)
173     Cout << "gradientType = " << gradientType << '\n';
174   if (gradientType == "numerical") {
175     if (methodSource == "vendor") {
176       const RealVector& fdgss
177 	= probDescDB.get_rv("responses.fd_gradient_step_size");
178       if (fdgss.length()) // else use default from initializer list
179 	fdGradStepSize = fdgss[0];
180     }
181     if (outputLevel >= VERBOSE_OUTPUT)
182       Cout << "Numerical gradients using " << intervalType
183 	   << " differences\nto be calculated by the " << methodSource
184 	   << " finite difference routine.\n";
185   }
186   else if (gradientType == "mixed" && outputLevel >= VERBOSE_OUTPUT) {
187     // Vendor numerical is no good in mixed mode except maybe for NPSOL/NLSSOL
188     if (methodSource == "vendor") {
189       Cerr << "Error: Mixed gradient specification not currently valid with "
190            << "vendor numerical.\nSelect dakota as method_source instead."
191 	   << std::endl;
192       abort_handler(MODEL_ERROR);
193     }
194     Cout << "Mixed gradients: analytic gradients for functions { ";
195     for (ILCIter cit=mixed_grad_analytic_ids.begin();
196 	 cit!=mixed_grad_analytic_ids.end(); cit++)
197       Cout << *cit << ' ';
198     Cout << "} and\nnumerical gradients for functions { ";
199     for (ILCIter cit=mixed_grad_numerical_ids.begin();
200 	 cit!=mixed_grad_numerical_ids.end(); cit++)
201       Cout << *cit << ' ';
202     Cout << "} using " << intervalType << " differences\ncalculated by the "
203 	 << methodSource << " routine.\n";
204   }
205   Cout << "hessianType = " << hessianType << '\n';
206   if ( hessianType == "numerical" || ( hessianType == "mixed" &&
207       !probDescDB.get_is("responses.hessians.mixed.id_numerical").empty() ) ) {
208     const RealVector& fdhss
209       = probDescDB.get_rv("responses.fd_hessian_step_size");
210     if (fdhss.length()) // else use defaults from initializer list
211       fdHessByGradStepSize = fdHessByFnStepSize = fdhss[0];
212   }
213   */
214 }
215 
216 
217 Model::
Model(LightWtBaseConstructor,ProblemDescDB & problem_db,ParallelLibrary & parallel_lib,const SharedVariablesData & svd,bool share_svd,const SharedResponseData & srd,bool share_srd,const ActiveSet & set,short output_level)218 Model(LightWtBaseConstructor, ProblemDescDB& problem_db,
219       ParallelLibrary& parallel_lib,
220       const SharedVariablesData& svd, bool share_svd,
221       const SharedResponseData&  srd, bool share_srd,
222       const ActiveSet& set, short output_level):
223   numDerivVars(set.derivative_vector().size()),
224   numFns(set.request_vector().size()), evaluationsDB(evaluation_store_db),
225   fdGradStepType("relative"), fdHessStepType("relative"), warmStartFlag(false),
226   supportsEstimDerivs(true), mappingInitialized(false), probDescDB(problem_db),
227   parallelLib(parallel_lib),
228   modelPCIter(parallel_lib.parallel_configuration_iterator()),
229   componentParallelMode(NO_PARALLEL_MODE), asynchEvalFlag(false),
230   evaluationCapacity(1), outputLevel(output_level),
231   mvDist(Pecos::MARGINALS_CORRELATIONS), hierarchicalTagging(false),
232   modelEvaluationsDBState(EvaluationsDBState::UNINITIALIZED),
233   interfEvaluationsDBState(EvaluationsDBState::UNINITIALIZED),
234   modelId(no_spec_id()), // to be replaced by derived ctors
235   modelEvalCntr(0), estDerivsFlag(false), initCommsBcastFlag(false),
236   modelAutoGraphicsFlag(false), prevDSIView(EMPTY_VIEW),
237   prevDSSView(EMPTY_VIEW), prevDSRView(EMPTY_VIEW)
238 {
239   if (share_svd) {
240     currentVariables       =   Variables(svd);
241     userDefinedConstraints = Constraints(svd);
242   }
243   else {
244     SharedVariablesData new_svd(svd.copy());
245     //SharedVariablesData new_svd(svd.view(), svd.components_totals()); // alt
246     currentVariables       =   Variables(new_svd);
247     userDefinedConstraints = Constraints(new_svd);
248   }
249 
250   currentResponse = (share_srd) ?
251     Response(srd, set) : Response(srd.response_type(), set);
252 }
253 
254 
255 /** This constructor also builds the base class data for inherited models.
256     However, it is used for recast models which are instantiated on the fly.
257     Therefore it only initializes a small subset of attributes. */
258 Model::
Model(LightWtBaseConstructor,ProblemDescDB & problem_db,ParallelLibrary & parallel_lib)259 Model(LightWtBaseConstructor, ProblemDescDB& problem_db,
260       ParallelLibrary& parallel_lib):
261   warmStartFlag(false), supportsEstimDerivs(true), mappingInitialized(false),
262   probDescDB(problem_db), parallelLib(parallel_lib),
263   evaluationsDB(evaluation_store_db),
264   modelPCIter(parallel_lib.parallel_configuration_iterator()),
265   componentParallelMode(NO_PARALLEL_MODE), asynchEvalFlag(false),
266   evaluationCapacity(1), outputLevel(NORMAL_OUTPUT),
267   mvDist(Pecos::MARGINALS_CORRELATIONS), hierarchicalTagging(false),
268   modelEvaluationsDBState(EvaluationsDBState::UNINITIALIZED),
269   interfEvaluationsDBState(EvaluationsDBState::UNINITIALIZED),
270   modelId(no_spec_id()), // to be replaced by derived ctors
271   modelEvalCntr(0), estDerivsFlag(false),
272   initCommsBcastFlag(false), modelAutoGraphicsFlag(false),
273   prevDSIView(EMPTY_VIEW), prevDSSView(EMPTY_VIEW), prevDSRView(EMPTY_VIEW)
274 { /* empty ctor */ }
275 
276 
277 /** The default constructor is used in vector<Model> instantiations
278     and for initialization of Model objects contained in Iterator and
279     derived Strategy classes.  modelRep is NULL in this case (a
280     populated problem_db is needed to build a meaningful Model
281     object). */
Model()282 Model::Model():
283   probDescDB(dummy_db), parallelLib(dummy_lib),
284   evaluationsDB(evaluation_store_db)
285 { /* empty ctor */ }
286 
287 
288 /** Used for envelope instantiations within strategy constructors.
289     Envelope constructor only needs to extract enough data to properly
290     execute get_model, since Model(BaseConstructor, problem_db)
291     builds the actual base class data for the derived models. */
Model(ProblemDescDB & problem_db)292 Model::Model(ProblemDescDB& problem_db):
293   probDescDB(problem_db), parallelLib(problem_db.parallel_library()),
294   evaluationsDB(evaluation_store_db), modelRep(get_model(problem_db))
295 {
296   if ( !modelRep ) // bad type or insufficient memory
297     abort_handler(MODEL_ERROR);
298 }
299 
300 
301 /** Used only by the envelope constructor to initialize modelRep to the
302     appropriate derived type, as given by the modelType attribute. */
get_model(ProblemDescDB & problem_db)303 std::shared_ptr<Model> Model::get_model(ProblemDescDB& problem_db)
304 {
305   // These instantiations will NOT recurse on the Model(problem_db)
306   // constructor due to the use of BaseConstructor.
307 
308   const String& model_type = problem_db.get_string("model.type");
309   if ( model_type == "simulation" )
310     return std::make_shared<SimulationModel>(problem_db);
311   else if ( model_type == "nested")
312     return std::make_shared<NestedModel>(problem_db);
313   else if ( model_type == "surrogate") {
314     if (problem_db.get_string("model.surrogate.type") == "hierarchical")
315       return std::make_shared<HierarchSurrModel>(problem_db); // hierarchical approx
316     else
317       return std::make_shared<DataFitSurrModel>(problem_db);  // local/multipt/global approx
318   }
319   else if ( model_type == "active_subspace" )
320     return std::make_shared<ActiveSubspaceModel>(problem_db);
321   else if ( model_type == "adapted_basis" )
322     return std::make_shared<AdaptedBasisModel>(problem_db);
323   else if ( model_type == "random_field" )
324     return std::make_shared<RandomFieldModel>(problem_db);
325   else
326     Cerr << "Invalid model type: " << model_type << std::endl;
327 
328   return std::shared_ptr<Model>();
329 }
330 
331 
332 /** Copy constructor manages sharing of modelRep. */
Model(const Model & model)333 Model::Model(const Model& model): probDescDB(model.problem_description_db()),
334   parallelLib(probDescDB.parallel_library()),
335   evaluationsDB(evaluation_store_db), modelRep(model.modelRep)
336 { /* empty ctor */ }
337 
338 
operator =(const Model & model)339 Model Model::operator=(const Model& model)
340 {
341   modelRep = model.modelRep;
342   return *this;
343 }
344 
345 
~Model()346 Model::~Model()
347 { /* empty dtor */ }
348 
349 
350 /** The assign_rep() function is used for publishing derived class
351     letters to existing envelopes, as opposed to sharing
352     representations among multiple envelopes (in particular,
353     assign_rep is passed a letter object and operator= is passed an
354     envelope object).
355 
356     Use case assumes the incoming letter is instantiated on the fly
357     and has no envelope.  This case is modeled after get_model(): a
358     letter is dynamically allocated and passed into assign_rep (its
359     memory management is passed over to the envelope).
360 
361     If the letter happens to be managed by another envelope, it will
362     persist as long as the last envelope referencing it. */
assign_rep(std::shared_ptr<Model> model_rep)363 void Model::assign_rep(std::shared_ptr<Model> model_rep)
364 {
365   modelRep = model_rep;
366 }
367 
368 
369 /** Build random variable distribution types and active subset.  This
370     function is used when the Model variables are in x-space. */
371 void Model::
initialize_distribution(Pecos::MultivariateDistribution & mv_dist,bool active_only)372 initialize_distribution(Pecos::MultivariateDistribution& mv_dist,
373 			bool active_only)
374 {
375   // Notes:
376   // > Model base instantiates the x-space MultivariateDistribution, while
377   //   derived ProbabilityTransformModel manages a ProbabilityTransform
378   //   (which makes a shallow copy of x-dist and creates a u-dist).
379   // > This fn houses data for discrete design/state and must now be invoked
380   //   in non-UQ contexts.
381 
382   // Previous (transformation-based) logic was restricted to active continuous:
383   //ShortArray x_types(currentVariables.cv()); // active cont
384   //ShortArray rv_types(probDescDB.get_sizet("variables.uncertain")); c/d uv
385   size_t num_rv = (active_only) ?
386     currentVariables.cv()  + currentVariables.div() +
387     currentVariables.dsv() + currentVariables.drv() :
388     currentVariables.tv(); // all vars (active subset defined using BitArray)
389   ShortArray rv_types(num_rv);  BitArray active_vars(num_rv);// init bits to 0
390 
391   bool cdv, ddv, cauv, dauv, ceuv, deuv, csv, dsv;
392   currentVariables.shared_data().active_subsets(cdv,  ddv,  cauv, dauv,
393 						ceuv, deuv, csv,  dsv);
394   size_t i, start_rv = 0;
395 
396   // Implied by call to this function ... ?
397   //switch (mv_dist.type()) {
398   //case Pecos::MARGINALS_CORRELATIONS: {
399 
400   // Continuous design
401 
402   if (!active_only || cdv) {
403     num_rv = probDescDB.get_sizet("variables.continuous_design");
404     if (num_rv) {
405       assign_value(rv_types, Pecos::CONTINUOUS_RANGE, start_rv, num_rv);
406       if (cdv) assign_value(active_vars, true, start_rv, num_rv);
407       start_rv += num_rv;
408     }
409   }
410 
411   // Discrete design
412 
413   if (!active_only || ddv) {
414     num_rv = probDescDB.get_sizet("variables.discrete_design_range");
415     if (num_rv) {
416       assign_value(rv_types, Pecos::DISCRETE_RANGE, start_rv, num_rv);
417       if (ddv) assign_value(active_vars, true, start_rv, num_rv);
418       start_rv += num_rv;
419     }
420     num_rv = probDescDB.get_sizet("variables.discrete_design_set_int");
421     if (num_rv) {
422       assign_value(rv_types, Pecos::DISCRETE_SET_INT, start_rv, num_rv);
423       if (ddv) assign_value(active_vars, true, start_rv, num_rv);
424       start_rv += num_rv;
425     }
426     num_rv = probDescDB.get_sizet("variables.discrete_design_set_string");
427     if (num_rv) {
428       assign_value(rv_types, Pecos::DISCRETE_SET_STRING, start_rv, num_rv);
429       if (ddv) assign_value(active_vars, true, start_rv, num_rv);
430       start_rv += num_rv;
431     }
432     num_rv = probDescDB.get_sizet("variables.discrete_design_set_real");
433     if (num_rv) {
434       assign_value(rv_types, Pecos::DISCRETE_SET_REAL, start_rv, num_rv);
435       if (ddv) assign_value(active_vars, true, start_rv, num_rv);
436       start_rv += num_rv;
437     }
438   }
439 
440   // Continuous aleatory
441 
442   if (!active_only || cauv) {
443     Real dbl_inf = std::numeric_limits<Real>::infinity();
444     num_rv = probDescDB.get_sizet("variables.normal_uncertain");
445     if (num_rv) {
446       const RealVector& n_l_bnds
447 	= probDescDB.get_rv("variables.normal_uncertain.lower_bounds");
448       const RealVector& n_u_bnds
449 	= probDescDB.get_rv("variables.normal_uncertain.upper_bounds");
450       bool l_bnds = !n_l_bnds.empty(), u_bnds = !n_u_bnds.empty();
451       if (!l_bnds && !u_bnds) // won't happen: parser -> +/-inf
452 	assign_value(rv_types, Pecos::NORMAL, start_rv, num_rv);
453       else
454 	for (i=0; i<num_rv; ++i)
455 	  rv_types[start_rv+i] = ( ( l_bnds && n_l_bnds[i] > -dbl_inf ) ||
456 				   ( u_bnds && n_u_bnds[i] <  dbl_inf ) ) ?
457 	    Pecos::BOUNDED_NORMAL : Pecos::NORMAL;
458       if (cauv) assign_value(active_vars, true, start_rv, num_rv);
459       start_rv += num_rv;
460     }
461     num_rv = probDescDB.get_sizet("variables.lognormal_uncertain");
462     if (num_rv) {
463       const RealVector& ln_l_bnds
464 	= probDescDB.get_rv("variables.lognormal_uncertain.lower_bounds");
465       const RealVector& ln_u_bnds
466 	= probDescDB.get_rv("variables.lognormal_uncertain.upper_bounds");
467       bool l_bnds = !ln_l_bnds.empty(), u_bnds = !ln_u_bnds.empty();
468       if (!l_bnds && !u_bnds) // won't happen: parser -> 0/inf
469 	assign_value(rv_types, Pecos::LOGNORMAL, start_rv, num_rv);
470       else
471 	for (i=0; i<num_rv; ++i)
472 	  rv_types[start_rv+i] = ( ( l_bnds && ln_l_bnds[i] > 0. ) ||
473 				   ( u_bnds && ln_u_bnds[i] < dbl_inf ) ) ?
474 	    Pecos::BOUNDED_LOGNORMAL : Pecos::LOGNORMAL;
475       if (cauv) assign_value(active_vars, true, start_rv, num_rv);
476       start_rv += num_rv;
477     }
478     num_rv = probDescDB.get_sizet("variables.uniform_uncertain");
479     if (num_rv) {
480       assign_value(rv_types, Pecos::UNIFORM, start_rv, num_rv);
481       if (cauv) assign_value(active_vars, true, start_rv, num_rv);
482       start_rv += num_rv;
483     }
484     num_rv = probDescDB.get_sizet("variables.loguniform_uncertain");
485     if (num_rv) {
486       assign_value(rv_types, Pecos::LOGUNIFORM, start_rv, num_rv);
487       if (cauv) assign_value(active_vars, true, start_rv, num_rv);
488       start_rv += num_rv;
489     }
490     num_rv = probDescDB.get_sizet("variables.triangular_uncertain");
491     if (num_rv) {
492       assign_value(rv_types, Pecos::TRIANGULAR, start_rv, num_rv);
493       if (cauv) assign_value(active_vars, true, start_rv, num_rv);
494       start_rv += num_rv;
495     }
496     num_rv = probDescDB.get_sizet("variables.exponential_uncertain");
497     if (num_rv) {
498       assign_value(rv_types, Pecos::EXPONENTIAL, start_rv, num_rv);
499       if (cauv) assign_value(active_vars, true, start_rv, num_rv);
500       start_rv += num_rv;
501     }
502     num_rv = probDescDB.get_sizet("variables.beta_uncertain");
503     if (num_rv) {
504       assign_value(rv_types, Pecos::BETA, start_rv, num_rv);
505       if (cauv) assign_value(active_vars, true, start_rv, num_rv);
506       start_rv += num_rv;
507     }
508     num_rv = probDescDB.get_sizet("variables.gamma_uncertain");
509     if (num_rv) {
510       assign_value(rv_types, Pecos::GAMMA, start_rv, num_rv);
511       if (cauv) assign_value(active_vars, true, start_rv, num_rv);
512       start_rv += num_rv;
513     }
514 
515     // Note: Inv gamma is not part of variable spec (calibration hyperparameter)
516 
517     num_rv = probDescDB.get_sizet("variables.gumbel_uncertain");
518     if (num_rv) {
519       assign_value(rv_types, Pecos::GUMBEL, start_rv, num_rv);
520       if (cauv) assign_value(active_vars, true, start_rv, num_rv);
521       start_rv += num_rv;
522     }
523     num_rv = probDescDB.get_sizet("variables.frechet_uncertain");
524     if (num_rv) {
525       assign_value(rv_types, Pecos::FRECHET, start_rv, num_rv);
526       if (cauv) assign_value(active_vars, true, start_rv, num_rv);
527       start_rv += num_rv;
528     }
529     num_rv = probDescDB.get_sizet("variables.weibull_uncertain");
530     if (num_rv) {
531       assign_value(rv_types, Pecos::WEIBULL, start_rv, num_rv);
532       if (cauv) assign_value(active_vars, true, start_rv, num_rv);
533       start_rv += num_rv;
534     }
535     num_rv = probDescDB.get_sizet("variables.histogram_uncertain.bin");
536     if (num_rv) {
537       assign_value(rv_types, Pecos::HISTOGRAM_BIN, start_rv, num_rv);
538       if (cauv) assign_value(active_vars, true, start_rv, num_rv);
539       start_rv += num_rv;
540     }
541   }
542 
543   // Discrete aleatory
544 
545   if (!active_only || dauv) {
546     num_rv = probDescDB.get_sizet("variables.poisson_uncertain");
547     if (num_rv) {
548       assign_value(rv_types, Pecos::POISSON, start_rv, num_rv);
549       if (dauv) assign_value(active_vars, true, start_rv, num_rv);
550       start_rv += num_rv;
551     }
552     num_rv = probDescDB.get_sizet("variables.binomial_uncertain");
553     if (num_rv) {
554       assign_value(rv_types, Pecos::BINOMIAL, start_rv, num_rv);
555       if (dauv) assign_value(active_vars, true, start_rv, num_rv);
556       start_rv += num_rv;
557     }
558     num_rv = probDescDB.get_sizet("variables.negative_binomial_uncertain");
559     if (num_rv) {
560       assign_value(rv_types, Pecos::NEGATIVE_BINOMIAL, start_rv, num_rv);
561       if (dauv) assign_value(active_vars, true, start_rv, num_rv);
562       start_rv += num_rv;
563     }
564     num_rv = probDescDB.get_sizet("variables.geometric_uncertain");
565     if (num_rv) {
566       assign_value(rv_types, Pecos::GEOMETRIC, start_rv, num_rv);
567       if (dauv) assign_value(active_vars, true, start_rv, num_rv);
568       start_rv += num_rv;
569     }
570     num_rv = probDescDB.get_sizet("variables.hypergeometric_uncertain");
571     if (num_rv) {
572       assign_value(rv_types, Pecos::HYPERGEOMETRIC, start_rv, num_rv);
573       if (dauv) assign_value(active_vars, true, start_rv, num_rv);
574       start_rv += num_rv;
575     }
576     num_rv = probDescDB.get_sizet("variables.histogram_uncertain.point_int");
577     if (num_rv) {
578       assign_value(rv_types, Pecos::HISTOGRAM_PT_INT, start_rv, num_rv);
579       if (dauv) assign_value(active_vars, true, start_rv, num_rv);
580       start_rv += num_rv;
581     }
582     num_rv = probDescDB.get_sizet("variables.histogram_uncertain.point_string");
583     if (num_rv) {
584       assign_value(rv_types, Pecos::HISTOGRAM_PT_STRING, start_rv, num_rv);
585       if (dauv) assign_value(active_vars, true, start_rv, num_rv);
586       start_rv += num_rv;
587     }
588     num_rv = probDescDB.get_sizet("variables.histogram_uncertain.point_real");
589     if (num_rv) {
590       assign_value(rv_types, Pecos::HISTOGRAM_PT_REAL, start_rv, num_rv);
591       if (dauv) assign_value(active_vars, true, start_rv, num_rv);
592       start_rv += num_rv;
593     }
594   }
595 
596   // Continuous epistemic
597 
598   if (!active_only || ceuv) {
599     num_rv = probDescDB.get_sizet("variables.continuous_interval_uncertain");
600     if (num_rv) {
601       assign_value(rv_types, Pecos::CONTINUOUS_INTERVAL_UNCERTAIN,
602 		   start_rv, num_rv);
603       if (ceuv) assign_value(active_vars, true, start_rv, num_rv);
604       start_rv += num_rv;
605     }
606   }
607 
608   // Discrete epistemic
609 
610   if (!active_only || deuv) {
611     num_rv = probDescDB.get_sizet("variables.discrete_interval_uncertain");
612     if (num_rv) {
613       assign_value(rv_types,Pecos::DISCRETE_INTERVAL_UNCERTAIN,start_rv,num_rv);
614       if (deuv) assign_value(active_vars, true, start_rv, num_rv);
615       start_rv += num_rv;
616     }
617     num_rv = probDescDB.get_sizet("variables.discrete_uncertain_set_int");
618     if (num_rv) {
619       assign_value(rv_types, Pecos::DISCRETE_UNCERTAIN_SET_INT,start_rv,num_rv);
620       if (deuv) assign_value(active_vars, true, start_rv, num_rv);
621       start_rv += num_rv;
622     }
623     num_rv = probDescDB.get_sizet("variables.discrete_uncertain_set_string");
624     if (num_rv) {
625       assign_value(rv_types, Pecos::DISCRETE_UNCERTAIN_SET_STRING,
626 		   start_rv, num_rv);
627       if (deuv) assign_value(active_vars, true, start_rv, num_rv);
628       start_rv += num_rv;
629     }
630     num_rv = probDescDB.get_sizet("variables.discrete_uncertain_set_real");
631     if (num_rv) {
632       assign_value(rv_types,Pecos::DISCRETE_UNCERTAIN_SET_REAL,start_rv,num_rv);
633       if (deuv) assign_value(active_vars, true, start_rv, num_rv);
634       start_rv += num_rv;
635     }
636   }
637 
638   // Continuous state
639 
640   if (!active_only || csv) {
641     num_rv = probDescDB.get_sizet("variables.continuous_state");
642     if (num_rv) {
643       assign_value(rv_types, Pecos::CONTINUOUS_RANGE, start_rv, num_rv);
644       if (csv) assign_value(active_vars, true, start_rv, num_rv);
645       start_rv += num_rv;
646     }
647 
648     // Discrete state
649 
650     if (!active_only || dsv) {
651       num_rv = probDescDB.get_sizet("variables.discrete_state_range");
652       if (num_rv) {
653 	assign_value(rv_types, Pecos::DISCRETE_RANGE, start_rv, num_rv);
654 	if (dsv) assign_value(active_vars, true, start_rv, num_rv);
655 	start_rv += num_rv;
656       }
657       num_rv = probDescDB.get_sizet("variables.discrete_state_set_int");
658       if (num_rv) {
659 	assign_value(rv_types, Pecos::DISCRETE_SET_INT, start_rv, num_rv);
660 	if (dsv) assign_value(active_vars, true, start_rv, num_rv);
661 	start_rv += num_rv;
662       }
663       num_rv = probDescDB.get_sizet("variables.discrete_state_set_string");
664       if (num_rv) {
665 	assign_value(rv_types, Pecos::DISCRETE_SET_STRING, start_rv, num_rv);
666 	if (dsv) assign_value(active_vars, true, start_rv, num_rv);
667 	start_rv += num_rv;
668       }
669       num_rv = probDescDB.get_sizet("variables.discrete_state_set_real");
670       if (num_rv) {
671 	assign_value(rv_types, Pecos::DISCRETE_SET_REAL, start_rv, num_rv);
672 	if (dsv) assign_value(active_vars, true, start_rv, num_rv);
673 	//start_rv += num_rv;
674       }
675     }
676   }
677 
678   mv_dist = Pecos::MultivariateDistribution(Pecos::MARGINALS_CORRELATIONS);
679   std::shared_ptr<Pecos::MarginalsCorrDistribution> mvd_rep =
680     std::static_pointer_cast<Pecos::MarginalsCorrDistribution>
681     (mv_dist.multivar_dist_rep());
682   mvd_rep->initialize_types(rv_types, active_vars);
683 }
684 
685 
686 void Model::
initialize_distribution_parameters(Pecos::MultivariateDistribution & mv_dist,bool active_only)687 initialize_distribution_parameters(Pecos::MultivariateDistribution& mv_dist,
688 				   bool active_only)
689 {
690   // Implied by call to this function ... ?
691   //switch (mv_dist.type()) {
692   //case Pecos::MARGINALS_CORRELATIONS: {
693 
694     std::shared_ptr<Pecos::MarginalsCorrDistribution> mvd_rep =
695       std::static_pointer_cast<Pecos::MarginalsCorrDistribution>
696       (mv_dist.multivar_dist_rep());
697     size_t start_rv = 0, num_rv = (active_only) ?
698       currentVariables.cv()  + currentVariables.div() +
699       currentVariables.dsv() + currentVariables.drv() :
700       currentVariables.tv(); // all vars (active subset defined using BitArray)
701     BitArray active_corr(num_rv); // init bits to 0; activate c/d auv below
702 
703     bool cdv, ddv, cauv, dauv, ceuv, deuv, csv, dsv;
704     currentVariables.shared_data().active_subsets(cdv,  ddv,  cauv, dauv,
705 						  ceuv, deuv, csv,  dsv);
706 
707     // Continuous design
708     // RANGE type could be design or state, so use count-based API
709 
710     if (!active_only || cdv) {
711       num_rv = probDescDB.get_sizet("variables.continuous_design");
712       if (num_rv) {
713 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::CR_LWR_BND,
714 	  probDescDB.get_rv("variables.continuous_design.lower_bounds"));
715 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::CR_UPR_BND,
716 	  probDescDB.get_rv("variables.continuous_design.upper_bounds"));
717 	start_rv += num_rv;
718       }
719     }
720 
721     // Discrete design
722     // RANGE and SET types could be design or state, so use count-based API
723 
724     if (!active_only || ddv) {
725       num_rv = probDescDB.get_sizet("variables.discrete_design_range");
726       if (num_rv) {
727 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::DR_LWR_BND,
728           probDescDB.get_iv("variables.discrete_design_range.lower_bounds"));
729 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::DR_UPR_BND,
730           probDescDB.get_iv("variables.discrete_design_range.upper_bounds"));
731 	start_rv += num_rv;
732       }
733       num_rv = probDescDB.get_sizet("variables.discrete_design_set_int");
734       if (num_rv) {
735 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::DSI_VALUES,
736           probDescDB.get_isa("variables.discrete_design_set_int.values"));
737 	start_rv += num_rv;
738       }
739       num_rv = probDescDB.get_sizet("variables.discrete_design_set_string");
740       if (num_rv) {
741 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::DSS_VALUES,
742           probDescDB.get_ssa("variables.discrete_design_set_string.values"));
743 	start_rv += num_rv;
744       }
745       num_rv = probDescDB.get_sizet("variables.discrete_design_set_real");
746       if (num_rv) {
747 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::DSR_VALUES,
748           probDescDB.get_rsa("variables.discrete_design_set_real.values"));
749 	start_rv += num_rv;
750       }
751     }
752 
753     // Continuous aleatory
754 
755     if (!active_only || cauv) {
756       // RV type could be {,BOUNDED_}NORMAL, so use count-based API
757       num_rv = probDescDB.get_sizet("variables.normal_uncertain");
758       if (num_rv) {
759 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::N_MEAN,
760           probDescDB.get_rv("variables.normal_uncertain.means"));
761 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::N_STD_DEV,
762           probDescDB.get_rv("variables.normal_uncertain.std_deviations"));
763 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::N_LWR_BND,
764           probDescDB.get_rv("variables.normal_uncertain.lower_bounds"));
765 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::N_UPR_BND,
766           probDescDB.get_rv("variables.normal_uncertain.upper_bounds"));
767 	//N_LOCATION,N_SCALE not mapped from ProblemDescDB
768 	assign_value(active_corr, true, start_rv, num_rv);
769 	start_rv += num_rv;
770       }
771       // RV type could be {,BOUNDED_}LOGNORMAL, so use count-based API
772       num_rv = probDescDB.get_sizet("variables.lognormal_uncertain");
773       if (num_rv) {
774 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::LN_MEAN,
775           probDescDB.get_rv("variables.lognormal_uncertain.means"));
776 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::LN_STD_DEV,
777           probDescDB.get_rv("variables.lognormal_uncertain.std_deviations"));
778 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::LN_LAMBDA,
779           probDescDB.get_rv("variables.lognormal_uncertain.lambdas"));
780 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::LN_ZETA,
781           probDescDB.get_rv("variables.lognormal_uncertain.zetas"));
782 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::LN_ERR_FACT,
783           probDescDB.get_rv("variables.lognormal_uncertain.error_factors"));
784 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::LN_LWR_BND,
785           probDescDB.get_rv("variables.lognormal_uncertain.lower_bounds"));
786 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::LN_UPR_BND,
787           probDescDB.get_rv("variables.lognormal_uncertain.upper_bounds"));
788 	assign_value(active_corr, true, start_rv, num_rv);
789 	start_rv += num_rv;
790       }
791       num_rv = probDescDB.get_sizet("variables.uniform_uncertain");
792       if (num_rv) {
793 	mvd_rep->push_parameters(Pecos::UNIFORM, Pecos::U_LWR_BND,
794           probDescDB.get_rv("variables.uniform_uncertain.lower_bounds"));
795 	mvd_rep->push_parameters(Pecos::UNIFORM, Pecos::U_UPR_BND,
796           probDescDB.get_rv("variables.uniform_uncertain.upper_bounds"));
797 	//U_LOCATION,U_SCALE not mapped from ProblemDescDB
798 	assign_value(active_corr, true, start_rv, num_rv);
799 	start_rv += num_rv;
800       }
801       num_rv = probDescDB.get_sizet("variables.loguniform_uncertain");
802       if (num_rv) {
803 	mvd_rep->push_parameters(Pecos::LOGUNIFORM, Pecos::LU_LWR_BND,
804           probDescDB.get_rv("variables.loguniform_uncertain.lower_bounds"));
805 	mvd_rep->push_parameters(Pecos::LOGUNIFORM, Pecos::LU_UPR_BND,
806           probDescDB.get_rv("variables.loguniform_uncertain.upper_bounds"));
807 	assign_value(active_corr, true, start_rv, num_rv);
808 	start_rv += num_rv;
809       }
810       num_rv = probDescDB.get_sizet("variables.triangular_uncertain");
811       if (num_rv) {
812 	mvd_rep->push_parameters(Pecos::TRIANGULAR, Pecos::T_MODE,
813           probDescDB.get_rv("variables.triangular_uncertain.modes"));
814 	mvd_rep->push_parameters(Pecos::TRIANGULAR, Pecos::T_LWR_BND,
815           probDescDB.get_rv("variables.triangular_uncertain.lower_bounds"));
816 	mvd_rep->push_parameters(Pecos::TRIANGULAR, Pecos::T_UPR_BND,
817           probDescDB.get_rv("variables.triangular_uncertain.upper_bounds"));
818 	//T_LOCATION,T_SCALE not mapped from ProblemDescDB
819 	assign_value(active_corr, true, start_rv, num_rv);
820 	start_rv += num_rv;
821       }
822       num_rv = probDescDB.get_sizet("variables.exponential_uncertain");
823       if (num_rv) {
824 	mvd_rep->push_parameters(Pecos::EXPONENTIAL, Pecos::E_BETA,
825           probDescDB.get_rv("variables.exponential_uncertain.betas"));
826 	assign_value(active_corr, true, start_rv, num_rv);
827 	start_rv += num_rv;
828       }
829       num_rv = probDescDB.get_sizet("variables.beta_uncertain");
830       if (num_rv) {
831 	mvd_rep->push_parameters(Pecos::BETA, Pecos::BE_ALPHA,
832           probDescDB.get_rv("variables.beta_uncertain.alphas"));
833 	mvd_rep->push_parameters(Pecos::BETA, Pecos::BE_BETA,
834           probDescDB.get_rv("variables.beta_uncertain.betas"));
835 	mvd_rep->push_parameters(Pecos::BETA, Pecos::BE_LWR_BND,
836           probDescDB.get_rv("variables.beta_uncertain.lower_bounds"));
837 	mvd_rep->push_parameters(Pecos::BETA, Pecos::BE_UPR_BND,
838           probDescDB.get_rv("variables.beta_uncertain.upper_bounds"));
839 	assign_value(active_corr, true, start_rv, num_rv);
840 	start_rv += num_rv;
841       }
842       num_rv = probDescDB.get_sizet("variables.gamma_uncertain");
843       if (num_rv) {
844 	mvd_rep->push_parameters(Pecos::GAMMA, Pecos::GA_ALPHA,
845           probDescDB.get_rv("variables.gamma_uncertain.alphas"));
846 	mvd_rep->push_parameters(Pecos::GAMMA, Pecos::GA_BETA,
847           probDescDB.get_rv("variables.gamma_uncertain.betas"));
848 	assign_value(active_corr, true, start_rv, num_rv);
849 	start_rv += num_rv;
850       }
851 
852       // Inverse gamma is not part of variable spec (calibration hyperparameter)
853 
854       num_rv = probDescDB.get_sizet("variables.gumbel_uncertain");
855       if (num_rv) {
856 	mvd_rep->push_parameters(Pecos::GUMBEL, Pecos::GU_ALPHA,
857           probDescDB.get_rv("variables.gumbel_uncertain.alphas"));
858 	mvd_rep->push_parameters(Pecos::GUMBEL, Pecos::GU_BETA,
859           probDescDB.get_rv("variables.gumbel_uncertain.betas"));
860 	assign_value(active_corr, true, start_rv, num_rv);
861 	start_rv += num_rv;
862       }
863       num_rv = probDescDB.get_sizet("variables.frechet_uncertain");
864       if (num_rv) {
865 	mvd_rep->push_parameters(Pecos::FRECHET, Pecos::F_ALPHA,
866           probDescDB.get_rv("variables.frechet_uncertain.alphas"));
867 	mvd_rep->push_parameters(Pecos::FRECHET, Pecos::F_BETA,
868           probDescDB.get_rv("variables.frechet_uncertain.betas"));
869 	assign_value(active_corr, true, start_rv, num_rv);
870 	start_rv += num_rv;
871       }
872       num_rv = probDescDB.get_sizet("variables.weibull_uncertain");
873       if (num_rv) {
874 	mvd_rep->push_parameters(Pecos::WEIBULL, Pecos::W_ALPHA,
875           probDescDB.get_rv("variables.weibull_uncertain.alphas"));
876 	mvd_rep->push_parameters(Pecos::WEIBULL, Pecos::W_BETA,
877           probDescDB.get_rv("variables.weibull_uncertain.betas"));
878 	assign_value(active_corr, true, start_rv, num_rv);
879 	start_rv += num_rv;
880       }
881       num_rv = probDescDB.get_sizet("variables.histogram_uncertain.bin");
882       if (num_rv) {
883 	mvd_rep->push_parameters(Pecos::HISTOGRAM_BIN, Pecos::H_BIN_PAIRS,
884           probDescDB.get_rrma("variables.histogram_uncertain.bin_pairs"));
885 	assign_value(active_corr, true, start_rv, num_rv);
886 	start_rv += num_rv;
887       }
888     }
889 
890     // Discrete aleatory
891 
892     if (!active_only || dauv) {
893       num_rv = probDescDB.get_sizet("variables.poisson_uncertain");
894       if (num_rv) {
895 	mvd_rep->push_parameters(Pecos::POISSON, Pecos::P_LAMBDA,
896           probDescDB.get_rv("variables.poisson_uncertain.lambdas"));
897 	assign_value(active_corr, true, start_rv, num_rv);
898 	start_rv += num_rv;
899       }
900       num_rv = probDescDB.get_sizet("variables.binomial_uncertain");
901       if (num_rv) {
902 	mvd_rep->push_parameters(Pecos::BINOMIAL, Pecos::BI_P_PER_TRIAL,
903           probDescDB.get_rv("variables.binomial_uncertain.prob_per_trial"));
904 	UIntArray num_tr;
905 	copy_data(probDescDB.get_iv(
906 	  "variables.binomial_uncertain.num_trials"), num_tr);
907 	mvd_rep->push_parameters(Pecos::BINOMIAL, Pecos::BI_TRIALS, num_tr);
908 	assign_value(active_corr, true, start_rv, num_rv);
909 	start_rv += num_rv;
910       }
911       num_rv = probDescDB.get_sizet("variables.negative_binomial_uncertain");
912       if (num_rv) {
913 	mvd_rep->push_parameters(Pecos::NEGATIVE_BINOMIAL,
914 	  Pecos::NBI_P_PER_TRIAL, probDescDB.get_rv(
915           "variables.negative_binomial_uncertain.prob_per_trial"));
916 	UIntArray num_tr;
917 	copy_data(probDescDB.get_iv(
918 	  "variables.negative_binomial_uncertain.num_trials"), num_tr);
919 	mvd_rep->
920 	  push_parameters(Pecos::NEGATIVE_BINOMIAL, Pecos::NBI_TRIALS, num_tr);
921 	assign_value(active_corr, true, start_rv, num_rv);
922 	start_rv += num_rv;
923       }
924       num_rv = probDescDB.get_sizet("variables.geometric_uncertain");
925       if (num_rv) {
926 	mvd_rep->push_parameters(Pecos::GEOMETRIC, Pecos::GE_P_PER_TRIAL,
927           probDescDB.get_rv("variables.geometric_uncertain.prob_per_trial"));
928 	assign_value(active_corr, true, start_rv, num_rv);
929 	start_rv += num_rv;
930       }
931       num_rv = probDescDB.get_sizet("variables.hypergeometric_uncertain");
932       if (num_rv) {
933 	UIntArray tot_pop, sel_pop, num_drawn;
934 	copy_data(probDescDB.get_iv(
935 	  "variables.hypergeometric_uncertain.total_population"), tot_pop);
936 	copy_data(probDescDB.get_iv(
937           "variables.hypergeometric_uncertain.selected_population"), sel_pop);
938 	copy_data(probDescDB.get_iv(
939 	  "variables.hypergeometric_uncertain.num_drawn"), num_drawn);
940 	mvd_rep->
941 	  push_parameters(Pecos::HYPERGEOMETRIC, Pecos::HGE_TOT_POP, tot_pop);
942 	mvd_rep->
943 	  push_parameters(Pecos::HYPERGEOMETRIC, Pecos::HGE_SEL_POP, sel_pop);
944 	mvd_rep->
945 	  push_parameters(Pecos::HYPERGEOMETRIC, Pecos::HGE_DRAWN, num_drawn);
946 	assign_value(active_corr, true, start_rv, num_rv);
947 	start_rv += num_rv;
948       }
949       num_rv = probDescDB.get_sizet("variables.histogram_uncertain.point_int");
950       if (num_rv) {
951 	mvd_rep->push_parameters(Pecos::HISTOGRAM_PT_INT, Pecos::H_PT_INT_PAIRS,
952           probDescDB.get_irma("variables.histogram_uncertain.point_int_pairs"));
953 	assign_value(active_corr, true, start_rv, num_rv);
954 	start_rv += num_rv;
955       }
956       num_rv = probDescDB.get_sizet(
957 	"variables.histogram_uncertain.point_string");
958       if (num_rv) {
959 	mvd_rep->push_parameters(Pecos::HISTOGRAM_PT_STRING,
960 	  Pecos::H_PT_STR_PAIRS, probDescDB.get_srma(
961 	  "variables.histogram_uncertain.point_string_pairs"));
962 	assign_value(active_corr, true, start_rv, num_rv);
963 	start_rv += num_rv;
964       }
965       num_rv = probDescDB.get_sizet("variables.histogram_uncertain.point_real");
966       if (num_rv) {
967 	mvd_rep->push_parameters(Pecos::HISTOGRAM_PT_REAL,
968 	  Pecos::H_PT_REAL_PAIRS, probDescDB.get_rrma(
969 	  "variables.histogram_uncertain.point_real_pairs"));
970 	assign_value(active_corr, true, start_rv, num_rv);
971 	start_rv += num_rv;
972       }
973     }
974 
975     // Continuous epistemic
976 
977     if (!active_only || ceuv) {
978       num_rv = probDescDB.get_sizet("variables.continuous_interval_uncertain");
979       if (num_rv) {
980 	mvd_rep->push_parameters(Pecos::CONTINUOUS_INTERVAL_UNCERTAIN,
981           Pecos::CIU_BPA, probDescDB.get_rrrma(
982           "variables.continuous_interval_uncertain.basic_probs"));
983 	start_rv += num_rv;
984       }
985     }
986 
987     // Discrete epistemic
988 
989     if (!active_only || deuv) {
990       num_rv = probDescDB.get_sizet("variables.discrete_interval_uncertain");
991       if (num_rv) {
992 	mvd_rep->push_parameters(Pecos::DISCRETE_INTERVAL_UNCERTAIN,
993           Pecos::DIU_BPA, probDescDB.get_iirma(
994           "variables.discrete_interval_uncertain.basic_probs"));
995 	start_rv += num_rv;
996       }
997       num_rv = probDescDB.get_sizet("variables.discrete_uncertain_set_int");
998       if (num_rv) {
999 	mvd_rep->push_parameters(Pecos::DISCRETE_UNCERTAIN_SET_INT,
1000           Pecos::DUSI_VALUES_PROBS, probDescDB.get_irma(
1001           "variables.discrete_uncertain_set_int.values_probs"));
1002 	start_rv += num_rv;
1003       }
1004       num_rv = probDescDB.get_sizet("variables.discrete_uncertain_set_string");
1005       if (num_rv) {
1006 	mvd_rep->push_parameters(Pecos::DISCRETE_UNCERTAIN_SET_STRING,
1007           Pecos::DUSS_VALUES_PROBS, probDescDB.get_srma(
1008           "variables.discrete_uncertain_set_string.values_probs"));
1009 	start_rv += num_rv;
1010       }
1011       num_rv = probDescDB.get_sizet("variables.discrete_uncertain_set_real");
1012       if (num_rv) {
1013 	mvd_rep->push_parameters(Pecos::DISCRETE_UNCERTAIN_SET_REAL,
1014           Pecos::DUSR_VALUES_PROBS, probDescDB.get_rrma(
1015           "variables.discrete_uncertain_set_real.values_probs"));
1016 	start_rv += num_rv;
1017       }
1018     }
1019 
1020     // Continuous state
1021     // RANGE type could be design or state, so use count-based API
1022 
1023     if (!active_only || csv) {
1024       num_rv = probDescDB.get_sizet("variables.continuous_state");
1025       if (num_rv) {
1026 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::CR_LWR_BND,
1027           probDescDB.get_rv("variables.continuous_state.lower_bounds"));
1028 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::CR_UPR_BND,
1029           probDescDB.get_rv("variables.continuous_state.upper_bounds"));
1030 	start_rv += num_rv;
1031       }
1032     }
1033 
1034     // Discrete state
1035     // RANGE and SET types could be design or state, so use count-based API
1036 
1037     if (!active_only || dsv) {
1038       num_rv = probDescDB.get_sizet("variables.discrete_state_range");
1039       if (num_rv) {
1040 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::DR_LWR_BND,
1041           probDescDB.get_iv("variables.discrete_state_range.lower_bounds"));
1042 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::DR_UPR_BND,
1043           probDescDB.get_iv("variables.discrete_state_range.upper_bounds"));
1044 	start_rv += num_rv;
1045       }
1046       num_rv = probDescDB.get_sizet("variables.discrete_state_set_int");
1047       if (num_rv) {
1048 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::DSI_VALUES,
1049           probDescDB.get_isa("variables.discrete_state_set_int.values"));
1050 	start_rv += num_rv;
1051       }
1052       num_rv = probDescDB.get_sizet("variables.discrete_state_set_string");
1053       if (num_rv) {
1054 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::DSS_VALUES,
1055           probDescDB.get_ssa("variables.discrete_state_set_string.values"));
1056 	start_rv += num_rv;
1057       }
1058       num_rv = probDescDB.get_sizet("variables.discrete_state_set_real");
1059       if (num_rv) {
1060 	mvd_rep->push_parameters(start_rv, num_rv, Pecos::DSR_VALUES,
1061           probDescDB.get_rsa("variables.discrete_state_set_real.values"));
1062 	//start_rv += num_rv;
1063       }
1064     }
1065 
1066     mvd_rep->initialize_correlations(
1067       probDescDB.get_rsm("variables.uncertain.correlation_matrix"),
1068       active_corr);
1069 
1070   //  break;
1071   //}
1072 }
1073 
1074 
1075 SizetMultiArrayConstView
initialize_x0_bounds(const SizetArray & original_dvv,bool & active_derivs,bool & inactive_derivs,RealVector & x0,RealVector & fd_lb,RealVector & fd_ub) const1076 Model::initialize_x0_bounds(const SizetArray& original_dvv,
1077 			    bool& active_derivs, bool& inactive_derivs,
1078 			    RealVector& x0,
1079 			    RealVector& fd_lb, RealVector& fd_ub) const
1080 {
1081   // Are derivatives w.r.t. active or inactive variables?
1082   active_derivs = inactive_derivs = false;
1083   if (original_dvv == currentVariables.continuous_variable_ids()) {
1084     active_derivs = true;
1085     copy_data(currentVariables.continuous_variables(), x0);        // view->copy
1086   }
1087   else if (original_dvv ==
1088 	   currentVariables.inactive_continuous_variable_ids()) {
1089     inactive_derivs = true;
1090     copy_data(currentVariables.inactive_continuous_variables(), x0);//view->copy
1091   }
1092   else // general derivatives
1093     copy_data(currentVariables.all_continuous_variables(), x0);    // view->copy
1094 
1095   // define c_l_bnds, c_u_bnds, cv_ids, cv_types
1096   const RealVector& c_l_bnds = (active_derivs) ? continuous_lower_bounds() :
1097     ( (inactive_derivs) ? inactive_continuous_lower_bounds() :
1098       all_continuous_lower_bounds() );
1099   const RealVector& c_u_bnds = (active_derivs) ? continuous_upper_bounds() :
1100     ( (inactive_derivs) ? inactive_continuous_upper_bounds() :
1101       all_continuous_upper_bounds() );
1102   SizetMultiArrayConstView cv_ids = (active_derivs) ?
1103     continuous_variable_ids() :
1104     ( (inactive_derivs) ? inactive_continuous_variable_ids() :
1105       all_continuous_variable_ids() );
1106   UShortMultiArrayConstView cv_types = (active_derivs) ?
1107     continuous_variable_types() :
1108     ( (inactive_derivs) ? inactive_continuous_variable_types() :
1109       all_continuous_variable_types() );
1110 
1111   // if not respecting bounds, leave at +/- infinity
1112   size_t num_deriv_vars = original_dvv.size();
1113   fd_lb.resize(num_deriv_vars);  fd_ub.resize(num_deriv_vars);
1114   Real dbl_inf = std::numeric_limits<Real>::infinity();
1115   if (ignoreBounds)
1116     { fd_lb = -dbl_inf;  fd_ub = dbl_inf; }
1117   else { // manage global/inferred vs. distribution bounds
1118     std::shared_ptr<Pecos::MarginalsCorrDistribution> mvd_rep =
1119       std::static_pointer_cast<Pecos::MarginalsCorrDistribution>
1120       (mvDist.multivar_dist_rep());
1121     for (size_t j=0; j<num_deriv_vars; j++) {
1122       size_t cv_index = find_index(cv_ids, original_dvv[j]);
1123       switch (cv_types[cv_index]) {
1124       case NORMAL_UNCERTAIN: {    // +/-infinity or user-specified
1125 	size_t rv_index = original_dvv[j] - 1;// id to index (full variable set)
1126 	fd_lb[j] = mvd_rep->pull_parameter<Real>(rv_index, Pecos::N_LWR_BND);
1127 	fd_ub[j] = mvd_rep->pull_parameter<Real>(rv_index, Pecos::N_UPR_BND);
1128 	break;
1129       }
1130       case LOGNORMAL_UNCERTAIN: { // 0/inf or user-specified
1131 	size_t rv_index = original_dvv[j] - 1;// id to index (full variable set)
1132 	fd_lb[j] = mvd_rep->pull_parameter<Real>(rv_index, Pecos::LN_LWR_BND);
1133 	fd_ub[j] = mvd_rep->pull_parameter<Real>(rv_index, Pecos::LN_UPR_BND);
1134 	break;
1135       }
1136       case EXPONENTIAL_UNCERTAIN: case GAMMA_UNCERTAIN:
1137       case FRECHET_UNCERTAIN:     case WEIBULL_UNCERTAIN:
1138 	fd_lb[j] = c_l_bnds[cv_index]; fd_ub[j] = dbl_inf;            break;
1139       case GUMBEL_UNCERTAIN:
1140 	fd_lb[j] = -dbl_inf;           fd_ub[j] = dbl_inf;            break;
1141       default:
1142 	fd_lb[j] = c_l_bnds[cv_index]; fd_ub[j] = c_u_bnds[cv_index]; break;
1143       }
1144     }
1145   }
1146 
1147   return cv_ids;
1148 }
1149 
1150 
1151 // compute a forward step for fd gradients; can't be const
forward_grad_step(size_t num_deriv_vars,size_t xj_index,Real x0_j,Real lb_j,Real ub_j)1152 Real Model::forward_grad_step(size_t num_deriv_vars, size_t xj_index,
1153 			      Real x0_j, Real lb_j, Real ub_j)
1154 {
1155   // Compute the offset for the ith gradient variable.
1156   // Enforce a minimum delta of fdgss*.01
1157   Real fdgss = (fdGradStepSize.length() == num_deriv_vars)
1158     ? fdGradStepSize[xj_index] : fdGradStepSize[0];
1159   //Real h = FDstep1(x0_j, lb_j, ub_j, fdgss*std::max(std::fabs(x0_j),.01));
1160   Real h = FDstep1(x0_j, lb_j, ub_j,
1161 		   initialize_h(x0_j, lb_j, ub_j, fdgss, fdGradStepType));
1162   return h;
1163 }
1164 
1165 
1166 
evaluate()1167 void Model::evaluate()
1168 {
1169   if (modelRep) // envelope fwd to letter
1170     modelRep->evaluate();
1171   else { // letter
1172     ++modelEvalCntr;
1173     if(modelEvaluationsDBState == EvaluationsDBState::UNINITIALIZED) {
1174      modelEvaluationsDBState = evaluationsDB.model_allocate(modelId, modelType,
1175           currentVariables, mvDist, currentResponse, default_active_set());
1176       if(modelEvaluationsDBState == EvaluationsDBState::ACTIVE)
1177         declare_sources();
1178     }
1179 
1180     // Define default ActiveSet for iterators which don't pass one
1181     ActiveSet temp_set = currentResponse.active_set(); // copy
1182     temp_set.request_values(1); // function values only
1183 
1184     if(modelEvaluationsDBState == EvaluationsDBState::ACTIVE)
1185       evaluationsDB.store_model_variables(modelId, modelType, modelEvalCntr,
1186           temp_set, currentVariables);
1187 
1188     if (derived_master_overload()) {
1189       // prevents error of trying to run a multiproc. direct job on the master
1190       derived_evaluate_nowait(temp_set);
1191       currentResponse = derived_synchronize().begin()->second;
1192     }
1193     else // perform a normal synchronous map
1194       derived_evaluate(temp_set);
1195 
1196     if (modelAutoGraphicsFlag) {
1197       OutputManager& output_mgr = parallelLib.output_manager();
1198       output_mgr.add_datapoint(currentVariables, interface_id(),
1199 			       currentResponse);
1200     }
1201     if(modelEvaluationsDBState == EvaluationsDBState::ACTIVE)
1202       evaluationsDB.store_model_response(modelId, modelType, modelEvalCntr, currentResponse);
1203   }
1204 }
1205 
1206 
evaluate(const ActiveSet & set)1207 void Model::evaluate(const ActiveSet& set)
1208 {
1209   if (modelRep) // envelope fwd to letter
1210     modelRep->evaluate(set);
1211   else { // letter
1212     ++modelEvalCntr;
1213 
1214     if(modelEvaluationsDBState == EvaluationsDBState::UNINITIALIZED) {
1215       modelEvaluationsDBState = evaluationsDB.model_allocate(modelId, modelType,
1216           currentVariables, mvDist, currentResponse, default_active_set());
1217       if(modelEvaluationsDBState == EvaluationsDBState::ACTIVE)
1218         declare_sources();
1219     }
1220 
1221     if(modelEvaluationsDBState == EvaluationsDBState::ACTIVE)
1222       evaluationsDB.store_model_variables(modelId, modelType, modelEvalCntr,
1223           set, currentVariables);
1224 
1225     // Derivative estimation support goes here and is not replicated in the
1226     // default asv version of evaluate -> a good reason for using an
1227     // overloaded function design rather than a default parameter design.
1228     ShortArray map_asv(numFns, 0), fd_grad_asv(numFns, 0),
1229       fd_hess_asv(numFns, 0), quasi_hess_asv(numFns, 0);
1230     // Manage map/estimate_derivs for a particular asv based on responses spec.
1231     bool use_est_deriv = manage_asv(set, map_asv, fd_grad_asv,
1232 				    fd_hess_asv, quasi_hess_asv);
1233 
1234     if (use_est_deriv) {
1235       // Compute requested derivatives not available from the simulation (also
1236       // perform initial map for parallel load balance).  estimate_derivatives()
1237       // may involve asynch evals depending on asynchEvalFlag.
1238       estimate_derivatives(map_asv, fd_grad_asv, fd_hess_asv, quasi_hess_asv,
1239 			   set, asynchEvalFlag);
1240       if (asynchEvalFlag) { // concatenate asynch map calls into 1 response
1241         const IntResponseMap& fd_responses = derived_synchronize();
1242         synchronize_derivatives(currentVariables, fd_responses, currentResponse,
1243 				fd_grad_asv, fd_hess_asv, quasi_hess_asv, set);
1244       }
1245     }
1246     else if (derived_master_overload()) {
1247       // This map must be asynchronous since it prevents the error of trying
1248       // to run a multiprocessor direct job on the master.
1249       derived_evaluate_nowait(set);
1250       currentResponse = derived_synchronize().begin()->second;
1251     }
1252     else
1253       // Perform synchronous eval
1254       derived_evaluate(set);
1255 
1256     if (modelAutoGraphicsFlag) {
1257       OutputManager& output_mgr = parallelLib.output_manager();
1258       output_mgr.add_datapoint(currentVariables, interface_id(),
1259 			       currentResponse);
1260     }
1261     if(modelEvaluationsDBState == EvaluationsDBState::ACTIVE)
1262       evaluationsDB.store_model_response(modelId, modelType, modelEvalCntr, currentResponse);
1263 
1264   }
1265 }
1266 
1267 
evaluate_nowait()1268 void Model::evaluate_nowait()
1269 {
1270   if (modelRep) // envelope fwd to letter
1271     modelRep->evaluate_nowait();
1272   else { // letter
1273     ++modelEvalCntr;
1274     if(modelEvaluationsDBState == EvaluationsDBState::UNINITIALIZED) {
1275       modelEvaluationsDBState = evaluationsDB.model_allocate(modelId, modelType,
1276           currentVariables, mvDist, currentResponse, default_active_set());
1277       if(modelEvaluationsDBState == EvaluationsDBState::ACTIVE)
1278         declare_sources();
1279     }
1280 
1281     // Define default ActiveSet for iterators which don't pass one
1282     ActiveSet temp_set = currentResponse.active_set(); // copy
1283     temp_set.request_values(1); // function values only
1284 
1285     if(modelEvaluationsDBState == EvaluationsDBState::ACTIVE)
1286       evaluationsDB.store_model_variables(modelId, modelType, modelEvalCntr,
1287           temp_set, currentVariables);
1288     // perform an asynchronous parameter-to-response mapping
1289     derived_evaluate_nowait(temp_set);
1290 
1291     rawEvalIdMap[derived_evaluation_id()] = modelEvalCntr;
1292     numFDEvalsMap[modelEvalCntr] = -1;//no deriv est; distinguish from QN update
1293 
1294     // history of vars must be catalogued for use in synchronize()
1295     if (modelAutoGraphicsFlag)
1296       varsMap[modelEvalCntr] = currentVariables.copy();
1297   }
1298 }
1299 
1300 
evaluate_nowait(const ActiveSet & set)1301 void Model::evaluate_nowait(const ActiveSet& set)
1302 {
1303   if (modelRep) // envelope fwd to letter
1304     modelRep->evaluate_nowait(set);
1305   else { // letter
1306     ++modelEvalCntr;
1307 
1308     if(modelEvaluationsDBState == EvaluationsDBState::UNINITIALIZED) {
1309       modelEvaluationsDBState = evaluationsDB.model_allocate(modelId, modelType,
1310           currentVariables, mvDist, currentResponse, default_active_set());
1311       if(modelEvaluationsDBState == EvaluationsDBState::ACTIVE)
1312         declare_sources();
1313     }
1314 
1315     if(modelEvaluationsDBState == EvaluationsDBState::ACTIVE)
1316       evaluationsDB.store_model_variables(modelId, modelType, modelEvalCntr,
1317           set, currentVariables);
1318 
1319     // derived evaluation_id() not yet incremented (for first of several if est
1320     // derivs); want the key for id map to be the first raw eval of the set
1321     rawEvalIdMap[derived_evaluation_id() + 1] = modelEvalCntr;
1322 
1323     // Manage use of estimate_derivatives() for a particular asv based on
1324     // the user's gradients/Hessians spec.
1325     ShortArray map_asv(numFns, 0),    fd_grad_asv(numFns, 0),
1326            fd_hess_asv(numFns, 0), quasi_hess_asv(numFns, 0);
1327     bool use_est_deriv = manage_asv(set, map_asv, fd_grad_asv,
1328 				    fd_hess_asv, quasi_hess_asv);
1329     int num_fd_evals;
1330     if (use_est_deriv) {
1331       // Compute requested derivatives not available from the simulation.
1332       // Since we expect multiple evaluate_nowait()/estimate_derivatives()
1333       // calls prior to synchronize()/synchronize_derivatives(), we must perform
1334       // some additional bookkeeping so that the response arrays can be properly
1335       // recombined into estimated gradients/Hessians.
1336       estDerivsFlag = true; // flipped once per set of asynch evals
1337       asvList.push_back(fd_grad_asv);     asvList.push_back(fd_hess_asv);
1338       asvList.push_back(quasi_hess_asv);  setList.push_back(set);
1339       num_fd_evals
1340 	= estimate_derivatives(map_asv, fd_grad_asv, fd_hess_asv,
1341 			       quasi_hess_asv, set, true); // always asynch
1342     }
1343     else {
1344       derived_evaluate_nowait(set);
1345       num_fd_evals = -1; // no deriv est; distinguish from QN update
1346     }
1347     numFDEvalsMap[modelEvalCntr] = num_fd_evals;
1348 
1349     // history of vars must be catalogued for use in synchronize
1350     if (modelAutoGraphicsFlag || num_fd_evals >= 0)
1351       varsMap[modelEvalCntr] = currentVariables.copy();
1352   }
1353 }
1354 
1355 
synchronize()1356 const IntResponseMap& Model::synchronize()
1357 {
1358   if (modelRep) // envelope fwd to letter
1359     return modelRep->synchronize();
1360   else { // letter
1361     responseMap.clear();
1362 
1363     const IntResponseMap& raw_resp_map = derived_synchronize();
1364     IntVarsMIter v_it; IntRespMCIter r_cit; IntIntMIter id_it;
1365 
1366     if (estDerivsFlag) { // merge several responses into response gradients
1367       if (outputLevel > QUIET_OUTPUT)
1368         Cout <<"-----------------------------------------\n"
1369              << "Raw asynchronous response data captured.\n"
1370 	     << "Merging data to estimate derivatives:\n"
1371 	     << "----------------------------------------\n\n";
1372       id_it = rawEvalIdMap.begin(); IntIntMIter fd_it = numFDEvalsMap.begin();
1373       while (id_it != rawEvalIdMap.end() && fd_it != numFDEvalsMap.end()) {
1374 	int raw_id = id_it->first;
1375 	r_cit = raw_resp_map.find(raw_id);
1376 	if (r_cit != raw_resp_map.end()) {
1377 	  int model_id = fd_it->first, num_fd_evals = fd_it->second;
1378 	  if (num_fd_evals >= 0) {
1379 	    // estimate_derivatives() was used: merge raw FD responses into 1
1380 	    // response or augment response with quasi-Hessian updating
1381 	    if (outputLevel > QUIET_OUTPUT) {
1382 	      //if (num_fd_evals > 1) // inconclusive due to initial_map lookup
1383 		Cout << "Merging asynchronous responses " << raw_id
1384 		     << " through " << raw_id + num_fd_evals - 1 << '\n';
1385 	      //else
1386 	      //  Cout << "Augmenting asynchronous response " << raw_id
1387 	      //       << " with quasi-Hessian updating\n";
1388 	    }
1389 	    v_it = varsMap.find(model_id);
1390 	    IntRespMCIter re = r_cit; std::advance(re, num_fd_evals);
1391 	    IntResponseMap tmp_response_map(r_cit, re);
1392 	    // Recover fd_grad/fd_hess/quasi_hess asv's from asvList and
1393 	    // orig_set from setList
1394 	    ShortArray fd_grad_asv    = asvList.front(); asvList.pop_front();
1395 	    ShortArray fd_hess_asv    = asvList.front(); asvList.pop_front();
1396 	    ShortArray quasi_hess_asv = asvList.front(); asvList.pop_front();
1397 	    ActiveSet  orig_set       = setList.front(); setList.pop_front();
1398 	    synchronize_derivatives(v_it->second, tmp_response_map,
1399 				    responseMap[model_id], fd_grad_asv,
1400 				    fd_hess_asv, quasi_hess_asv, orig_set);
1401 	    // cleanup
1402 	    if (!modelAutoGraphicsFlag) varsMap.erase(v_it);
1403 	  }
1404 	  else { // number of maps==1, derivs not estimated
1405 	    if (outputLevel > QUIET_OUTPUT)
1406 	      Cout << "Asynchronous response " << raw_id
1407 		   << " does not require merging.\n";
1408 	    responseMap[model_id] = r_cit->second;
1409 	  }
1410 	  // cleanup: postfix increment manages iterator invalidation
1411 	  numFDEvalsMap.erase(fd_it++); rawEvalIdMap.erase(id_it++);
1412 	}
1413 	else // preserve bookkeeping for a subsequent synchronization pass
1414 	  { ++fd_it; ++id_it; }
1415       }
1416       // reset flags
1417       estDerivsFlag = false;
1418     }
1419     else // no calls to estimate_derivatives()
1420       // rekey the raw response map (lower level evaluation ids may be offset
1421       // from modelEvalCntr if previous finite differencing occurred)
1422       //rekey_response_map(raw_resp_map, rawEvalIdMap, responseMap);
1423       for (r_cit = raw_resp_map.begin(); r_cit != raw_resp_map.end(); ++r_cit) {
1424 	id_it = rawEvalIdMap.find(r_cit->first);
1425 	if (id_it != rawEvalIdMap.end()) {
1426 	  int model_id = id_it->second;
1427 	  responseMap[model_id] = r_cit->second;
1428 	  rawEvalIdMap.erase(id_it);
1429 	  numFDEvalsMap.erase(model_id);
1430 	}
1431       }
1432 
1433     // update graphics
1434     if (modelAutoGraphicsFlag) {
1435       OutputManager& output_mgr = parallelLib.output_manager();
1436       for (r_cit = responseMap.begin(); r_cit != responseMap.end(); ++r_cit) {
1437 	v_it = varsMap.find(r_cit->first);
1438 	output_mgr.add_datapoint(v_it->second, interface_id(), r_cit->second);
1439 	varsMap.erase(v_it);
1440       }
1441     }
1442 
1443     // Now augment rekeyed response map with locally cached evals.  If
1444     // these are not matched in a higher-level rekey process used by the
1445     // calling context, then they are returned to cachedResponseMap
1446     // using Model::cache_unmatched_response().
1447     responseMap.insert(cachedResponseMap.begin(), cachedResponseMap.end());
1448     cachedResponseMap.clear();
1449 
1450     if(modelEvaluationsDBState == EvaluationsDBState::ACTIVE) {
1451       for(const auto  &id_r : responseMap)
1452         evaluationsDB.store_model_response(modelId, modelType, id_r.first, id_r.second);
1453     }
1454     // return final map
1455     return responseMap;
1456   }
1457 }
1458 
1459 
synchronize_nowait()1460 const IntResponseMap& Model::synchronize_nowait()
1461 {
1462   if (modelRep) // envelope fwd to letter
1463     return modelRep->synchronize_nowait();
1464   else { // letter
1465     responseMap.clear();
1466 
1467     if (estDerivsFlag) {
1468       // This will require caching of evals until a merging set is complete.
1469       // While this would be straightforward to implement (using similar code
1470       // to the graphics caching below), there are no current use cases since
1471       // all gradient-based methods use blocking synchronization (you'd need
1472       // something like a parallel greedy gradient-based line search).
1473       Cerr << "Error: finite differencing within asynch evaluations not "
1474 	   << "currently supported by Model::synchronize_nowait()" << std::endl;
1475       abort_handler(MODEL_ERROR);
1476     }
1477 
1478     const IntResponseMap& raw_resp_map = derived_synchronize_nowait();
1479 
1480     // rekey and cleanup.
1481     // Note 1: rekeying is needed for the case of mixed usage of synchronize()
1482     // and synchronize_nowait(), since the former can introduce offsets.
1483     // Note 2: if estimate_derivatives() support is added, then rawEvalIdMap
1484     // data input must be expanded to include FD evals.
1485     IntRespMCIter r_cit; IntIntMIter id_it;
1486     for (r_cit = raw_resp_map.begin(); r_cit != raw_resp_map.end(); ++r_cit) {
1487       id_it = rawEvalIdMap.find(r_cit->first);
1488       if (id_it != rawEvalIdMap.end()) {
1489 	int model_id = id_it->second;
1490 	responseMap[model_id] = r_cit->second;
1491 	rawEvalIdMap.erase(id_it);
1492 	numFDEvalsMap.erase(model_id);
1493       }
1494     }
1495 
1496     // Update graphics.  There are two possible ways to do this:
1497     // (1) propagate separate eval id bookkeeping to Model level and extend
1498     //     Dakota::Graphics to allow nonsequential input with id's (where data
1499     //     is reordered such that lines connect properly).
1500     // (2) cache data here and input sequentially when enough data has been
1501     //     returned to allow an unbroken sequence.
1502     // Since SciPlot will not directly support (1), approach (2) is taken.
1503     if (modelAutoGraphicsFlag) {
1504       // add new completions to graphics cache.
1505       graphicsRespMap.insert(responseMap.begin(), responseMap.end());
1506       // search for next response set(s) in sequence
1507       bool found = true;
1508       while (found) {
1509 	OutputManager& output_mgr = parallelLib.output_manager();
1510 	int graphics_cntr = output_mgr.graphics_counter();
1511 	// find() is not really necessary due to Map ordering
1512 	//g_it = graphicsRespMap.begin();
1513 	//if (g_it == graphicsRespMap.end() || g_it->first != graphics_cntr)
1514 	IntRespMIter g_it = graphicsRespMap.find(graphics_cntr);
1515 	if (g_it == graphicsRespMap.end())
1516 	  found = false;
1517 	else {
1518 	  IntVarsMIter v_it = varsMap.find(graphics_cntr);
1519 	  output_mgr.add_datapoint(v_it->second, interface_id(), g_it->second);
1520 	  varsMap.erase(v_it); graphicsRespMap.erase(g_it);
1521 	}
1522       }
1523     }
1524 
1525     // Now augment rekeyed response map with locally cached evals.  If
1526     // these are not matched in a higher-level rekey process used by the
1527     // calling context, then they are returned to cachedResponseMap
1528     // using Model::cache_unmatched_response().
1529     responseMap.insert(cachedResponseMap.begin(), cachedResponseMap.end());
1530     cachedResponseMap.clear();
1531     if(modelEvaluationsDBState == EvaluationsDBState::ACTIVE) {
1532       for(const auto  &id_r : responseMap)
1533         evaluationsDB.store_model_response(modelId, modelType, id_r.first, id_r.second);
1534     }
1535     return responseMap;
1536   }
1537 }
1538 
1539 
1540 /** Auxiliary function to determine initial finite difference h
1541     (before step length adjustment) based on type of step desired. */
initialize_h(Real x_j,Real lb_j,Real ub_j,Real step_size,String step_type) const1542 Real Model::initialize_h(Real x_j, Real lb_j, Real ub_j, Real step_size,
1543 			 String step_type) const
1544 {
1545   Real h;
1546   if (step_type == "absolute")
1547     h = std::max(step_size, std::sqrt(DBL_MIN));
1548   else if (step_type == "bounds")
1549     h = step_size*std::max((ub_j-lb_j), std::sqrt(DBL_MIN));
1550   else     // relative
1551     h = step_size*std::max(std::fabs(x_j),.01);
1552 
1553   return h;
1554 }
1555 
1556 
1557 /** Auxiliary function to compute forward or first central-difference
1558     step size, honoring bounds.  The first step is away from zero,
1559     when possible.  Flips the direction or updates shortStep if can't
1560     take the full requested step h_mag. */
FDstep1(Real x0_j,Real lb_j,Real ub_j,Real h_mag)1561 Real Model::FDstep1(Real x0_j, Real lb_j, Real ub_j, Real h_mag)
1562 {
1563   Real h;
1564   shortStep = false;
1565   if (x0_j < 0.) {
1566     h = -h_mag;
1567     if (!ignoreBounds && x0_j + h < lb_j) {
1568       // step would exceed lower bound; try flipping, else shorten
1569       if (x0_j + h_mag <= ub_j)
1570         h = h_mag;
1571       else
1572         shortStep = true;
1573     }
1574   }
1575   else {
1576     h = h_mag;
1577     if (!ignoreBounds && x0_j + h > ub_j) {
1578       // step would exceed upper bound; try flipping, else shorten
1579       if (x0_j - h_mag >= lb_j)
1580         h = -h_mag;
1581       else {
1582         shortStep = true;
1583       }
1584     }
1585   }
1586 
1587   if (shortStep) {
1588     // take the step to the furthest boundary
1589     Real h1 = x0_j - lb_j;
1590     Real h2 = ub_j - x0_j;
1591     if (h1 < h2)
1592       h = h2;
1593     else
1594       h = -h1;
1595   }
1596 
1597   return h;
1598 }
1599 
1600 
1601 /** Auxiliary function to compute the second central-difference step size,
1602     honoring bounds. */
FDstep2(Real x0_j,Real lb_j,Real ub_j,Real h)1603 Real Model::FDstep2(Real x0_j, Real lb_j, Real ub_j, Real h)
1604 {
1605   Real h2 = -h;  // default is to take same size step in opposite direction
1606 
1607   // if taking a shorter step to a boundary, the second step is half of it
1608   if (shortStep)
1609     h2 = 0.5*h;
1610   else if (!ignoreBounds) {
1611     Real h1;
1612     if (h2 < 0.) {
1613       if (x0_j + h2 < lb_j) {
1614         // step would exceed lower bound; try full step in opposite
1615         // direction; contracting until in-bounds
1616         shortStep = true;
1617         h1 = h + h;
1618         if (x0_j + h1 <= ub_j)
1619           h2 = h1;
1620         else {
1621           h1 = 1.5*h;
1622           if (x0_j + h1 <= ub_j)
1623             h2 = h1;
1624           else
1625             h2 = 0.5*h;
1626         }
1627       }
1628     }
1629     else {
1630       if (x0_j + h2 > ub_j) {
1631         // step would exceed upper bound; try full step in opposite
1632         // direction; contracting until in-bounds
1633         shortStep = true;
1634         h1 = h + h;
1635         if (x0_j + h1 >= lb_j)
1636           h2 = h1;
1637         else {
1638           h1 = 1.5*h;
1639           if (x0_j + h1 >= lb_j)
1640             h2 = h1;
1641           else
1642             h2 = 0.5*h;
1643         }
1644       }
1645     }
1646   }
1647   return h2;
1648 }
1649 
1650 
1651 /** Estimate derivatives by computing finite difference gradients, finite
1652     difference Hessians, and/or quasi-Newton Hessians.  The total number of
1653     finite difference evaluations is returned for use by synchronize() to
1654     track response arrays, and it could be used to improve management of
1655     max_function_evaluations within the iterators. */
1656 int Model::
estimate_derivatives(const ShortArray & map_asv,const ShortArray & fd_grad_asv,const ShortArray & fd_hess_asv,const ShortArray & quasi_hess_asv,const ActiveSet & original_set,const bool asynch_flag)1657 estimate_derivatives(const ShortArray& map_asv, const ShortArray& fd_grad_asv,
1658 		     const ShortArray& fd_hess_asv,
1659 		     const ShortArray& quasi_hess_asv,
1660 		     const ActiveSet& original_set, const bool asynch_flag)
1661 {
1662   if (outputLevel > SILENT_OUTPUT)
1663     Cout << "\n------------------------------------------\n"
1664          <<   "Begin Dakota derivative estimation routine\n"
1665          <<   "------------------------------------------\n\n";
1666 
1667   // -----------------------------------------------
1668   // Retrieve/evaluate fn_vals_x0 and/or fn_grads_x0
1669   // -----------------------------------------------
1670 
1671   // Perform initial map at x0 to calculate (1) non-finite-differenced portions
1672   // of the response, (2) fn_vals_x0 for forward difference gradients or
1673   // second-order function-difference Hessians, and/or (3) fn_grads_x0 for
1674   // first-order gradient-difference Hessians.  The fn_vals_x0/fn_grads_x0 data
1675   // may already be available from preceding function evaluations (e.g., an
1676   // iterator such as OPT++ requests fn. values in one evaluate call
1677   // followed by a gradient request, followed by a Hessian request), so perform
1678   // a database search when appropriate to retrieve the data instead of relying
1679   // solely on duplication detection.
1680   bool initial_map = false, augmented_data_flag = false, db_capture = false,
1681     fd_grad_flag = false, fd_hess_flag = false, fd_hess_by_fn_flag = false,
1682     fd_hess_by_grad_flag = false;
1683   const ShortArray& orig_asv = original_set.request_vector();
1684   const SizetArray& orig_dvv = original_set.derivative_vector();
1685   size_t i, j, k, map_counter = 0, num_deriv_vars = orig_dvv.size();
1686   size_t ifg, nfg = 0;
1687 
1688   for (i=0; i<numFns; i++) {
1689     if (map_asv[i]) {
1690       initial_map = true;
1691       if ( ( (map_asv[i] & 1) && !(orig_asv[i] & 1) ) ||
1692            ( (map_asv[i] & 2) && !(orig_asv[i] & 2) ) )
1693         augmented_data_flag = true; // orig_asv val/grad requests augmented
1694     }
1695     if (fd_grad_asv[i])
1696       fd_grad_flag = true;           // gradient finite differencing needed
1697     if (fd_hess_asv[i]) {
1698       fd_hess_flag = true;           // Hessian finite differencing needed ...
1699       if (fd_hess_asv[i] & 1)
1700         fd_hess_by_fn_flag = true;   // ... by 2nd-order function differences
1701       if (fd_hess_asv[i] & 2)
1702         ++nfg;                       // ... by 1st-order gradient differences
1703     }
1704   }
1705   if (nfg)
1706     fd_hess_by_grad_flag = true;
1707 
1708   ActiveSet new_set(map_asv, orig_dvv);
1709   Response initial_map_response(currentResponse.shared_data(), new_set);
1710 
1711   // The logic for incurring an additional data_pairs search (beyond the
1712   // existing duplicate detection) is that a data request contained in
1713   // orig_asv is most likely not a duplicate, but there is a good chance
1714   // that an augmented data reqmt (appears in map_asv but not in orig_asv)
1715   // has been evaluated previously.  The additional search allows us to trap
1716   // this common case more gracefully (special header, no evaluation echo).
1717   if (augmented_data_flag) {
1718     if (db_lookup(currentVariables, new_set, initial_map_response)) {
1719       if (outputLevel > SILENT_OUTPUT)
1720         Cout << ">>>>> map at X performed previously and results retrieved\n\n";
1721       initial_map = false; // reset
1722       if (asynch_flag) {
1723         db_capture = true;
1724         dbResponseList.push_back(initial_map_response);
1725       }
1726     }
1727   }
1728   if (asynch_flag) { // communicate settings to synchronize_derivatives()
1729     initialMapList.push_back(initial_map);
1730     dbCaptureList.push_back(db_capture);
1731   }
1732 
1733   if (initial_map) {
1734     if (outputLevel > SILENT_OUTPUT) {
1735       Cout << ">>>>> Initial map for analytic portion of response";
1736       if (augmented_data_flag)
1737 	Cout << "\n      augmented with data requirements for differencing";
1738       Cout << ":\n";
1739     }
1740     if (asynch_flag) {
1741       derived_evaluate_nowait(new_set);
1742       if (outputLevel > SILENT_OUTPUT)
1743 	Cout << "\n\n";
1744     }
1745     else {
1746       derived_evaluate(new_set);
1747       initial_map_response.update(currentResponse);
1748     }
1749     ++map_counter;
1750   }
1751 
1752   // ------------------------------
1753   // Estimate numerical derivatives
1754   // ------------------------------
1755   RealVector dx;
1756   RealVectorArray fg, fx;
1757   RealMatrix new_fn_grads;
1758   if (fd_grad_flag && !asynch_flag) {
1759     new_fn_grads.shapeUninitialized(num_deriv_vars, numFns);
1760     new_fn_grads = 0.;
1761   }
1762   RealSymMatrixArray new_fn_hessians;
1763   if (fd_hess_flag) {
1764     if (fd_hess_by_fn_flag && !centralHess)
1765       dx.resize(num_deriv_vars);
1766     if (!asynch_flag) {
1767       new_fn_hessians.resize(numFns);
1768       for (i=0; i<numFns; i++) {
1769         new_fn_hessians[i].reshape(num_deriv_vars);
1770         new_fn_hessians[i] = 0.;
1771       }
1772       if (fd_hess_by_fn_flag && !centralHess)
1773         fx.resize(num_deriv_vars);
1774       if (fd_hess_by_grad_flag) {
1775         fg.resize(ifg = nfg*num_deriv_vars);
1776         while(ifg > 0)
1777           fg[--ifg].resize(num_deriv_vars);
1778       }
1779     }
1780   }
1781   if (fd_grad_flag || fd_hess_flag) {
1782 
1783     // define lower/upper bounds for finite differencing and cv_ids
1784     RealVector x0, fd_lb, fd_ub;
1785     bool active_derivs, inactive_derivs; // derivs w.r.t. {active,inactive} vars
1786     SizetMultiArrayConstView cv_ids =
1787       initialize_x0_bounds(orig_dvv, active_derivs, inactive_derivs, x0,
1788 			   fd_lb, fd_ub);
1789 
1790     const RealVector& fn_vals_x0  = initial_map_response.function_values();
1791     const RealMatrix& fn_grads_x0 = initial_map_response.function_gradients();
1792 
1793     // ------------------------
1794     // Loop over num_deriv_vars
1795     // ------------------------
1796     RealVector x = x0;
1797     for (j=0; j<num_deriv_vars; j++) { // difference the 1st num_deriv_vars vars
1798 
1799       size_t xj_index = find_index(cv_ids, orig_dvv[j]);
1800       Real x0_j = x0[xj_index], lb_j = fd_lb[j], ub_j = fd_ub[j];
1801 
1802       if (fd_grad_flag) {
1803         if (!ignoreBounds && lb_j >= ub_j) {
1804           if (asynch_flag)
1805             deltaList.push_back(0.);
1806           else
1807             for (i=0; i<numFns; i++)
1808               if (fd_grad_asv[i])
1809                 new_fn_grads(j,i) = 0.;
1810           continue;
1811         }
1812         new_set.request_vector(fd_grad_asv);
1813 
1814         // Compute the offset for the ith gradient variable.
1815         Real h = forward_grad_step(num_deriv_vars, xj_index, x0_j, lb_j, ub_j);
1816 
1817         if (asynch_flag) // communicate settings to synchronize_derivatives()
1818           deltaList.push_back(h);
1819 
1820         // -------------------------
1821         // Evaluate fn_vals_x_plus_h
1822         // -------------------------
1823         RealVector fn_vals_x_plus_h;
1824         x[xj_index] = x0_j + h;
1825         if (outputLevel > SILENT_OUTPUT)
1826           Cout << ">>>>> Dakota finite difference gradient evaluation for x["
1827                << j+1 << "] + h:\n";
1828         if (active_derivs)
1829           currentVariables.continuous_variables(x);
1830         else if (inactive_derivs)
1831           currentVariables.inactive_continuous_variables(x);
1832         else
1833           currentVariables.all_continuous_variables(x);
1834         if (asynch_flag) {
1835           derived_evaluate_nowait(new_set);
1836           if (outputLevel > SILENT_OUTPUT)
1837             Cout << "\n\n";
1838         }
1839         else {
1840           derived_evaluate(new_set);
1841           fn_vals_x_plus_h = currentResponse.function_values();
1842           if (intervalType == "forward") {
1843             for (i=0; i<numFns; i++)
1844               // prevent erroneous difference of vals present in fn_vals_x0 but
1845               // not in fn_vals_x_plus_h because of map/fd_grad asv differences
1846               if (fd_grad_asv[i])
1847                 new_fn_grads(j,i) = (fn_vals_x_plus_h[i] - fn_vals_x0[i])/h;
1848           }
1849         }
1850         ++map_counter;
1851 
1852         // --------------------------
1853         // Evaluate fn_vals_x_minus_h
1854         // --------------------------
1855         if (intervalType == "central") {
1856           Real h1, h2 = FDstep2(x0_j, lb_j, ub_j, h);
1857           x[xj_index] = x0_j + h2;
1858           if (outputLevel > SILENT_OUTPUT)
1859             Cout << ">>>>> Dakota finite difference gradient evaluation for x["
1860                  << j+1 << "] - h:\n";
1861           if (active_derivs)
1862             currentVariables.continuous_variables(x);
1863           else if (inactive_derivs)
1864             currentVariables.inactive_continuous_variables(x);
1865           else
1866             currentVariables.all_continuous_variables(x);
1867           if (asynch_flag) {
1868             deltaList.push_back(h2);
1869             derived_evaluate_nowait(new_set);
1870             if (outputLevel > SILENT_OUTPUT)
1871               Cout << "\n\n";
1872           }
1873           else {
1874             derived_evaluate(new_set);
1875             const RealVector& fn_vals_x_minus_h
1876               = currentResponse.function_values();
1877             // no need to check fd_grad_asv since it was used for both evals
1878             if (shortStep) {
1879               Real h12 = h*h, h22 = h2*h2;
1880               h1 = h*h2*(h2-h);
1881               for(i = 0; i < numFns; ++i)
1882                 new_fn_grads(j,i)
1883                   = ( h22*(fn_vals_x_plus_h[i]  - fn_vals_x0[i]) -
1884 		      h12*(fn_vals_x_minus_h[i] - fn_vals_x0[i]) ) / h1;
1885             }
1886             else {
1887               h1 = h - h2;
1888               for (i=0; i<numFns; i++)
1889                 new_fn_grads(j,i)
1890                   = (fn_vals_x_plus_h[i] - fn_vals_x_minus_h[i]) / h1;
1891             }
1892           }
1893           ++map_counter;
1894         }
1895       }
1896 
1897       if (fd_hess_flag) {
1898         new_set.request_vector(fd_hess_asv);
1899 
1900         // If analytic grads, then 1st-order gradient differences
1901         // > no interval type control (uses only forward diff of analytic
1902         //   grads), separate finite diff step size.
1903         // If numerical grads, then 2nd-order function differences
1904         // > no interval type control (uses only central diffs of numerical
1905         //   grads from central fn diffs), separate finite diff step size.
1906         //   Could get some eval reuse for diagonal Hessian terms by setting
1907         //   fdHessStepSize to half of fdGradStepSize, but this is not
1908         //   hard-wired since generally want fdHessStepSize > fdGradStepSize
1909         //   (if desired, the user can set fdHessStepSize to fdGradStepSize/2
1910         //   to get reuse).
1911         // If mixed grads, then mixed 1st/2nd-order diffs for numerical Hessians
1912 
1913         if (fd_hess_by_fn_flag) {
1914           if (centralHess) {
1915             RealVector fn_vals_x_plus_2h, fn_vals_x_minus_2h;
1916 
1917             // Compute the 2nd-order Hessian offset for the ith variable.
1918             // Enforce a minimum delta of fdhss*.01
1919             Real fdhbfss = (fdHessByFnStepSize.length() == num_deriv_vars)
1920               ? fdHessByFnStepSize[xj_index] : fdHessByFnStepSize[0];
1921             Real h_mag = fdhbfss * std::max(std::fabs(x0_j), .01);
1922             Real h = (x0_j < 0.) ? -h_mag : h_mag; // h has same sign as x0_j
1923             if (asynch_flag)// communicate settings to synchronize_derivatives()
1924               deltaList.push_back(h);
1925 
1926             // evaluate diagonal term
1927 
1928             // --------------------------
1929             // Evaluate fn_vals_x_plus_2h
1930             // --------------------------
1931             x[xj_index] = x0[xj_index] + 2.*h;
1932             if (outputLevel > SILENT_OUTPUT)
1933               Cout << ">>>>> Dakota finite difference Hessian evaluation for x["
1934                    << j+1 << "] + 2h:\n";
1935             if (active_derivs)
1936               currentVariables.continuous_variables(x);
1937             else if (inactive_derivs)
1938               currentVariables.inactive_continuous_variables(x);
1939             else
1940               currentVariables.all_continuous_variables(x);
1941             if (asynch_flag) {
1942               derived_evaluate_nowait(new_set);
1943               if (outputLevel > SILENT_OUTPUT)
1944                 Cout << "\n\n";
1945             }
1946             else {
1947               derived_evaluate(new_set);
1948               fn_vals_x_plus_2h = currentResponse.function_values();
1949             }
1950 
1951             // ---------------------------
1952             // Evaluate fn_vals_x_minus_2h
1953             // ---------------------------
1954             x[xj_index] = x0[xj_index] - 2.*h;
1955             if (outputLevel > SILENT_OUTPUT)
1956               Cout << ">>>>> Dakota finite difference Hessian evaluation for x["
1957                    << j+1 << "] - 2h:\n";
1958             if (active_derivs)
1959               currentVariables.continuous_variables(x);
1960             else if (inactive_derivs)
1961               currentVariables.inactive_continuous_variables(x);
1962             else
1963               currentVariables.all_continuous_variables(x);
1964             if (asynch_flag) {
1965               derived_evaluate_nowait(new_set);
1966               if (outputLevel > SILENT_OUTPUT)
1967                 Cout << "\n\n";
1968             }
1969             else {
1970               derived_evaluate(new_set);
1971               fn_vals_x_minus_2h = currentResponse.function_values();
1972             }
1973 
1974             map_counter += 2;
1975             if (!asynch_flag) {
1976               for (i=0; i<numFns; i++)
1977                 // prevent error in differencing vals present in fn_vals_x0 but
1978                 // not in fn_vals_x_(plus/minus)_2h due to map/fd_hess asv diffs
1979                 if (fd_hess_asv[i] & 1)
1980                   new_fn_hessians[i](j,j) = (fn_vals_x_plus_2h[i]
1981                      - 2.*fn_vals_x0[i] +  fn_vals_x_minus_2h[i])/(4.*h*h);
1982             }
1983 
1984             // evaluate off-diagonal terms
1985 
1986             for (k=j+1; k<num_deriv_vars; k++) {
1987               size_t xk_index = find_index(cv_ids, orig_dvv[k]);
1988               RealVector fn_vals_x_plus_h_plus_h,  fn_vals_x_plus_h_minus_h,
1989                 fn_vals_x_minus_h_plus_h, fn_vals_x_minus_h_minus_h;
1990 
1991               // --------------------------------
1992               // Evaluate fn_vals_x_plus_h_plus_h
1993               // --------------------------------
1994               x[xj_index] = x0[xj_index] + h;
1995               x[xk_index] = x0[xk_index] + h;
1996               if (outputLevel > SILENT_OUTPUT)
1997                 Cout << ">>>>> Dakota finite difference Hessian evaluation for "
1998                      << "x[" << j+1 << "] + h, x[" << k+1 << "] + h:\n";
1999               if (active_derivs)
2000                 currentVariables.continuous_variables(x);
2001               else if (inactive_derivs)
2002                 currentVariables.inactive_continuous_variables(x);
2003               else
2004                 currentVariables.all_continuous_variables(x);
2005               if (asynch_flag) {
2006                 derived_evaluate_nowait(new_set);
2007                 if (outputLevel > SILENT_OUTPUT)
2008                   Cout << "\n\n";
2009               }
2010               else {
2011                 derived_evaluate(new_set);
2012                 fn_vals_x_plus_h_plus_h = currentResponse.function_values();
2013               }
2014               // ---------------------------------
2015               // Evaluate fn_vals_x_plus_h_minus_h
2016               // ---------------------------------
2017               //x[xj_index] = x0[xj_index] + h;
2018               x[xk_index] = x0[xk_index] - h;
2019               if (outputLevel > SILENT_OUTPUT)
2020                 Cout << ">>>>> Dakota finite difference Hessian evaluation for "
2021                      << "x[" << j+1 << "] + h, x[" << k+1 << "] - h:\n";
2022               if (active_derivs)
2023                 currentVariables.continuous_variables(x);
2024               else if (inactive_derivs)
2025                 currentVariables.inactive_continuous_variables(x);
2026               else
2027                 currentVariables.all_continuous_variables(x);
2028               if (asynch_flag) {
2029                 derived_evaluate_nowait(new_set);
2030                 if (outputLevel > SILENT_OUTPUT)
2031                   Cout << "\n\n";
2032               }
2033               else {
2034                 derived_evaluate(new_set);
2035                 fn_vals_x_plus_h_minus_h = currentResponse.function_values();
2036               }
2037               // ---------------------------------
2038               // Evaluate fn_vals_x_minus_h_plus_h
2039               // ---------------------------------
2040               x[xj_index] = x0[xj_index] - h;
2041               x[xk_index] = x0[xk_index] + h;
2042               if (outputLevel > SILENT_OUTPUT)
2043                 Cout << ">>>>> Dakota finite difference Hessian evaluation for "
2044                      << "x[" << j+1 << "] - h, x[" << k+1 << "] + h:\n";
2045               if (active_derivs)
2046                 currentVariables.continuous_variables(x);
2047               else if (inactive_derivs)
2048                 currentVariables.inactive_continuous_variables(x);
2049               else
2050                 currentVariables.all_continuous_variables(x);
2051               if (asynch_flag) {
2052                 derived_evaluate_nowait(new_set);
2053                 if (outputLevel > SILENT_OUTPUT)
2054                   Cout << "\n\n";
2055               }
2056               else {
2057                 derived_evaluate(new_set);
2058                 fn_vals_x_minus_h_plus_h = currentResponse.function_values();
2059               }
2060               // ----------------------------------
2061               // Evaluate fn_vals_x_minus_h_minus_h
2062               // ----------------------------------
2063               //x[xj_index] = x0[xj_index] - h;
2064               x[xk_index] = x0[xk_index] - h;
2065               if (outputLevel > SILENT_OUTPUT)
2066                 Cout << ">>>>> Dakota finite difference Hessian evaluation for "
2067                      << "x[" << j+1 << "] - h, x[" << k+1 << "] - h:\n";
2068               if (active_derivs)
2069                 currentVariables.continuous_variables(x);
2070               else if (inactive_derivs)
2071                 currentVariables.inactive_continuous_variables(x);
2072               else
2073                 currentVariables.all_continuous_variables(x);
2074               if (asynch_flag) {
2075                 derived_evaluate_nowait(new_set);
2076                 if (outputLevel > SILENT_OUTPUT)
2077                   Cout << "\n\n";
2078               }
2079               else {
2080                 derived_evaluate(new_set);
2081                 fn_vals_x_minus_h_minus_h = currentResponse.function_values();
2082               }
2083 
2084               map_counter += 4;
2085               if (!asynch_flag)
2086                 for (i=0; i<numFns; i++)
2087                   // no need to check fd_hess_asv since used for each eval
2088                   // NOTE: symmetry is naturally satisfied.  Teuchos maps
2089                   // new_fn_hessians[i](j,k) and new_fn_hessians[i](k,j)
2090                   // to the same memory cell.
2091                   new_fn_hessians[i](j,k)
2092                     = (fn_vals_x_plus_h_plus_h[i] - fn_vals_x_plus_h_minus_h[i]
2093                        -  fn_vals_x_minus_h_plus_h[i]
2094                        +  fn_vals_x_minus_h_minus_h[i] ) / (4.*h*h);
2095 
2096               x[xk_index] = x0[xk_index];
2097             }
2098           }
2099           else { //!! new logic
2100             RealVector fn_vals_x1, fn_vals_x12, fn_vals_x2;
2101 
2102             // Compute the 2nd-order Hessian offset for the ith variable.
2103             // Enforce a minimum delta of fdhss*.01
2104             Real fdhbfss = (fdHessByFnStepSize.length() == num_deriv_vars)
2105               ? fdHessByFnStepSize[xj_index] : fdHessByFnStepSize[0];
2106             //	    Real h1 = FDstep1(x0_j, lb_j, ub_j, 2. * fdhbfss *
2107             //			      std::max(std::fabs(x0_j), .01));
2108             Real h1 = FDstep1(x0_j, lb_j, ub_j,
2109 	      initialize_h(x0_j, lb_j, ub_j, 2.*fdhbfss, fdHessStepType));
2110             Real h2 = FDstep2(x0_j, lb_j, ub_j, h1);
2111             Real denom, hdiff;
2112             if (asynch_flag) { // transfer settings to synchronize_derivatives()
2113               deltaList.push_back(h1);
2114               deltaList.push_back(h2);
2115             }
2116             dx[j] = h1;
2117 
2118             // evaluate diagonal term
2119 
2120             // --------------------------
2121             // Evaluate fn_vals_x1
2122             // --------------------------
2123             x[xj_index] = x0[xj_index] + h1;
2124             if (outputLevel > SILENT_OUTPUT)
2125               Cout << ">>>>> Dakota finite difference Hessian evaluation for x["
2126                    << j+1 << "] + 2h:\n";
2127             if (active_derivs)
2128               currentVariables.continuous_variables(x);
2129             else if (inactive_derivs)
2130               currentVariables.inactive_continuous_variables(x);
2131             else
2132               currentVariables.all_continuous_variables(x);
2133             if (asynch_flag) {
2134               derived_evaluate_nowait(new_set);
2135               if (outputLevel > SILENT_OUTPUT)
2136                 Cout << "\n\n";
2137             }
2138             else {
2139               derived_evaluate(new_set);
2140               fx[j] = fn_vals_x1 = currentResponse.function_values();
2141             }
2142 
2143             // ---------------------------
2144             // Evaluate fn_vals_x2
2145             // ---------------------------
2146             x[xj_index] = x0[xj_index] + h2;
2147             if (outputLevel > SILENT_OUTPUT)
2148               Cout << ">>>>> Dakota finite difference Hessian evaluation for x["
2149                    << j+1 << "] - 2h:\n";
2150             if (active_derivs)
2151               currentVariables.continuous_variables(x);
2152             else if (inactive_derivs)
2153               currentVariables.inactive_continuous_variables(x);
2154             else
2155               currentVariables.all_continuous_variables(x);
2156             if (asynch_flag) {
2157               derived_evaluate_nowait(new_set);
2158               if (outputLevel > SILENT_OUTPUT)
2159                 Cout << "\n\n";
2160             }
2161             else {
2162               derived_evaluate(new_set);
2163               fn_vals_x2 = currentResponse.function_values();
2164             }
2165 
2166             map_counter += 2;
2167             if (!asynch_flag) {
2168               if (h1 + h2 == 0.) {
2169                 denom = h1*h1;
2170                 for (i = 0; i < numFns; i++)
2171                   // prevent erroneous difference of vals in fn_vals_x0 but not
2172                   // in fn_vals_x_(plus/minus)_2h due to map/fd_hess asv diffs
2173                   if (fd_hess_asv[i] & 1)
2174                     new_fn_hessians[i](j,j)
2175                       = (fn_vals_x1[i] - 2.*fn_vals_x0[i] + fn_vals_x2[i])
2176                       / denom;
2177               }
2178               else {
2179                 hdiff = h1 - h2;
2180                 denom = 0.5*h1*h2*hdiff;
2181                 for (i = 0; i < numFns; i++)
2182                   if (fd_hess_asv[i] & 1)
2183                     new_fn_hessians[i](j,j)
2184                       = (h2*fn_vals_x1[i] + hdiff*fn_vals_x0[i] -
2185                          h1*fn_vals_x2[i])/denom;
2186               }
2187             }
2188 
2189             // evaluate off-diagonal terms
2190 
2191             for (k = 0; k < j; k++) {
2192               size_t xk_index = find_index(cv_ids, orig_dvv[k]);
2193 
2194               // --------------------------------
2195               // Evaluate fn_vals_x12
2196               // --------------------------------
2197               h2 = dx[k];
2198               x[xj_index] = x0[xj_index] + h1;
2199               x[xk_index] = x0[xk_index] + h2;
2200               if (outputLevel > SILENT_OUTPUT)
2201                 Cout << ">>>>> Dakota finite difference Hessian evaluation for "
2202                      << "x[" << j+1 << "] + h, x[" << k+1 << "] + h:\n";
2203               if (active_derivs)
2204                 currentVariables.continuous_variables(x);
2205               else if (inactive_derivs)
2206                 currentVariables.inactive_continuous_variables(x);
2207               else
2208                 currentVariables.all_continuous_variables(x);
2209               if (asynch_flag) {
2210                 derived_evaluate_nowait(new_set);
2211                 if (outputLevel > SILENT_OUTPUT)
2212                   Cout << "\n\n";
2213               }
2214               else {
2215                 derived_evaluate(new_set);
2216                 fn_vals_x12 = currentResponse.function_values();
2217                 denom = h1*h2;
2218                 const RealVector& f2 = fx[k];
2219                 for (i=0; i<numFns; ++i)
2220                   new_fn_hessians[i](j,k) = (fn_vals_x12[i] - fn_vals_x1[i] -
2221                                              f2[i] + fn_vals_x0[i]) / denom;
2222               }
2223 
2224               ++map_counter;
2225               x[xk_index] = x0[xk_index];
2226             }
2227           }
2228         }
2229 
2230         if (fd_hess_by_grad_flag) {
2231 
2232           // Compute the 1st-order Hessian offset for the ith variable.
2233           // Enforce a minimum delta of fdhss*.01
2234           Real fdhbgss = (fdHessByGradStepSize.length() == num_deriv_vars)
2235             ? fdHessByGradStepSize[xj_index] : fdHessByGradStepSize[0];
2236           //	  Real h = FDstep1(x0_j, lb_j, ub_j, fdhbgss *
2237           //			   std::max(std::fabs(x0_j), .01));
2238           Real h = FDstep1(x0_j, lb_j, ub_j,
2239             initialize_h(x0_j, lb_j, ub_j, fdhbgss, fdHessStepType));
2240           if (asynch_flag) // communicate settings to synchronize_derivatives()
2241             deltaList.push_back(h);
2242 
2243           // --------------------------
2244           // Evaluate fn_grads_x_plus_h
2245           // --------------------------
2246           x[xj_index] = x0[xj_index] + h;
2247           if (outputLevel > SILENT_OUTPUT)
2248             Cout << ">>>>> Dakota finite difference Hessian evaluation for x["
2249                  << j+1 << "] + h:\n";
2250           if (active_derivs)
2251             currentVariables.continuous_variables(x);
2252           else if (inactive_derivs)
2253             currentVariables.inactive_continuous_variables(x);
2254           else
2255             currentVariables.all_continuous_variables(x);
2256           if (asynch_flag) {
2257             derived_evaluate_nowait(new_set);
2258             if (outputLevel > SILENT_OUTPUT)
2259               Cout << "\n\n";
2260           }
2261           else {
2262             derived_evaluate(new_set);
2263             const RealMatrix& fn_grads_x_plus_h
2264               = currentResponse.function_gradients();
2265             ifg = j;
2266             for (i=0; i<numFns; i++)
2267               // prevent erroneous difference of grads present in fn_grads_x0
2268               // but not in fn_grads_x_plus_h due to map_asv & fd_hess_asv diffs
2269               // NOTE: symmetry NOT enforced [could replace with 1/2 (H + H^T)]
2270 
2271               if (fd_hess_asv[i] & 2) {
2272                 //fg[ifg] = (fn_grads_x_plus_h[i] - fn_grads_x0[i]) / h;
2273                 for(k = 0; k < num_deriv_vars; ++k)
2274                   fg[ifg][k] = (fn_grads_x_plus_h[i][k] - fn_grads_x0[i][k])/ h;
2275                 ifg += num_deriv_vars;
2276               }
2277           }
2278           ++map_counter;
2279         }
2280       }
2281       x[xj_index] = x0[xj_index];
2282     }
2283 
2284     // Reset currentVariables to x0 (for graphics, etc.)
2285     if (active_derivs)
2286       currentVariables.continuous_variables(x0);
2287     else if (inactive_derivs)
2288       currentVariables.inactive_continuous_variables(x0);
2289     else
2290       currentVariables.all_continuous_variables(x0);
2291   }
2292 
2293   // If asynchronous, then synchronize_derivatives() will postprocess and
2294   // update the response.  If synchronous, then update the response now.
2295   if (!asynch_flag) {
2296     // Enforce symmetry in the case of FD Hessians from 1st-order gradient
2297     // differences by averaging off-diagonal terms: H' = 1/2 (H + H^T)
2298     if (fd_hess_by_grad_flag)
2299       for (i = ifg = 0; i < numFns; i++)
2300         if (fd_hess_asv[i] & 2) {
2301           for (j=0; j<num_deriv_vars; j++) {
2302             for (k = 0; k < j; k++)
2303               new_fn_hessians[i](j,k) = 0.5 * (fg[ifg+j][k] + fg[ifg+k][j]);
2304             new_fn_hessians[i](j,j) = fg[ifg+j][j];
2305           }
2306           ifg += num_deriv_vars;
2307         }
2308 
2309     update_response(currentVariables, currentResponse, fd_grad_asv, fd_hess_asv,
2310                     quasi_hess_asv, original_set, initial_map_response,
2311                     new_fn_grads, new_fn_hessians);
2312   }
2313 
2314   return map_counter;
2315 }
2316 
2317 
2318 /** Merge an array of fd_responses into a single new_response.  This
2319     function is used both by synchronous evaluate() for the
2320     case of asynchronous estimate_derivatives() and by synchronize()
2321     for the case where one or more evaluate_nowait() calls has
2322     employed asynchronous estimate_derivatives(). */
2323 void Model::
synchronize_derivatives(const Variables & vars,const IntResponseMap & fd_responses,Response & new_response,const ShortArray & fd_grad_asv,const ShortArray & fd_hess_asv,const ShortArray & quasi_hess_asv,const ActiveSet & original_set)2324 synchronize_derivatives(const Variables& vars,
2325 			const IntResponseMap& fd_responses,
2326 			Response& new_response, const ShortArray& fd_grad_asv,
2327 			const ShortArray& fd_hess_asv,
2328 			const ShortArray& quasi_hess_asv,
2329 			const ActiveSet& original_set)
2330 {
2331   const SizetArray& orig_dvv = original_set.derivative_vector();
2332   size_t i, j, k, num_deriv_vars = orig_dvv.size();
2333   bool fd_grad_flag = false, fd_hess_flag = false, fd_hess_by_fn_flag = false,
2334     fd_hess_by_grad_flag = false;
2335   RealVector dx;
2336   std::vector<const RealVector*> fx;
2337   size_t ifg, nfg = 0;
2338 
2339   for (i=0; i<numFns; i++) {
2340     if (fd_grad_asv[i])
2341       fd_grad_flag = true;           // gradient finite differencing used
2342     if (fd_hess_asv[i]) {
2343       fd_hess_flag = true;           // Hessian finite differencing used ...
2344       if (fd_hess_asv[i] & 1)
2345         fd_hess_by_fn_flag = true;   // ... with 2nd-order function differences
2346       if (fd_hess_asv[i] & 2)
2347         ++nfg;                       // ... with 1st-order gradient differences
2348     }
2349   }
2350   if (nfg)
2351     fd_hess_by_grad_flag = true;
2352 
2353   RealMatrix new_fn_grads;
2354   if (fd_grad_flag) {
2355     new_fn_grads.shapeUninitialized(num_deriv_vars, numFns);
2356     new_fn_grads = 0.;
2357   }
2358   RealSymMatrixArray new_fn_hessians;
2359   RealVectorArray fg;
2360   if (fd_hess_flag) {
2361     if (fd_hess_by_grad_flag) {
2362       fg.resize(ifg = nfg*num_deriv_vars);
2363       while(ifg > 0)
2364         fg[--ifg].resize(num_deriv_vars);
2365     }
2366     new_fn_hessians.resize(numFns);
2367     for (i=0; i<numFns; i++) {
2368       new_fn_hessians[i].reshape(num_deriv_vars);
2369       new_fn_hessians[i] = 0.;
2370     }
2371   }
2372 
2373   // Get non-finite-diff. portion of the response from 1st fd_responses
2374   // or from a DB capture in estimate_derivatives()
2375   bool initial_map = initialMapList.front(); initialMapList.pop_front();
2376   bool db_capture  = dbCaptureList.front();  dbCaptureList.pop_front();
2377   Response initial_map_response;
2378   IntRespMCIter fd_resp_cit = fd_responses.begin();
2379   if (initial_map) {
2380     initial_map_response = fd_resp_cit->second;
2381     ++fd_resp_cit;
2382   }
2383   else if (db_capture) {
2384     initial_map_response = dbResponseList.front();
2385     dbResponseList.pop_front();
2386   }
2387   else { // construct an empty initial_map_response
2388     ShortArray asv(numFns, 0);
2389     ActiveSet initial_map_set(asv, orig_dvv);
2390     initial_map_response
2391       = Response(currentResponse.shared_data(), initial_map_set);
2392   }
2393 
2394   if (fd_hess_flag && fd_hess_by_fn_flag && !centralHess) {
2395     dx.resize(num_deriv_vars);
2396     fx.resize(num_deriv_vars);
2397   }
2398 
2399   // Postprocess the finite difference responses
2400   if (fd_grad_flag || fd_hess_flag) {
2401     SizetMultiArray cv_ids;
2402     if (orig_dvv == currentVariables.continuous_variable_ids()) {
2403       cv_ids.resize(boost::extents[cv()]);
2404       cv_ids = currentVariables.continuous_variable_ids();
2405     }
2406     else if (orig_dvv == currentVariables.inactive_continuous_variable_ids()) {
2407       cv_ids.resize(boost::extents[icv()]);
2408       cv_ids = currentVariables.inactive_continuous_variable_ids();
2409     }
2410     else { // general derivatives
2411       cv_ids.resize(boost::extents[acv()]);
2412       cv_ids = currentVariables.all_continuous_variable_ids();
2413     }
2414     const RealVector& fn_vals_x0  = initial_map_response.function_values();
2415     const RealMatrix& fn_grads_x0 = initial_map_response.function_gradients();
2416     for (j=0; j<num_deriv_vars; j++) {
2417       size_t xj_index = find_index(cv_ids, orig_dvv[j]);
2418 
2419       if (fd_grad_flag) { // numerical gradients
2420         Real h = deltaList.front(); deltaList.pop_front();// first in, first out
2421 
2422         if (h == 0.) // lower bound == upper bound; report 0 gradient
2423           for (i=0; i<numFns; i++)
2424             new_fn_grads(j,i) = 0.;
2425         else {
2426           const RealVector& fn_vals_x_plus_h
2427             = fd_resp_cit->second.function_values();
2428           ++fd_resp_cit;
2429           if (intervalType == "central") {
2430             Real h1, h12, h2, h22;
2431             const RealVector& fn_vals_x_minus_h
2432               = fd_resp_cit->second.function_values();
2433             ++fd_resp_cit;
2434             h2 = deltaList.front(); deltaList.pop_front();
2435             // no need to check fd_grad_asv since it was used for both map calls
2436             if (h + h2 == 0.) {
2437               h1 = h - h2;
2438               for (i=0; i<numFns; ++i)
2439                 new_fn_grads(j,i)
2440                   = (fn_vals_x_plus_h[i] - fn_vals_x_minus_h[i])/h1;
2441             }
2442             else {
2443               h12 = h*h;
2444               h22 = h2*h2;
2445               h1  = h*h2*(h2-h);
2446               for (i=0; i<numFns; ++i)
2447                 new_fn_grads(j,i)
2448                   = ( h22*(fn_vals_x_plus_h[i]  - fn_vals_x0[i]) -
2449                       h12*(fn_vals_x_minus_h[i] - fn_vals_x0[i]) ) / h1;
2450             }
2451           }
2452           else {
2453             for (i=0; i<numFns; i++)
2454               // prevent erroneous difference of vals present in fn_vals_x0 but
2455               // not in fn_vals_x_plus_h due to map_asv & fd_grad_asv diffs
2456               if (fd_grad_asv[i])
2457                 new_fn_grads(j,i) = (fn_vals_x_plus_h[i] - fn_vals_x0[i])/h;
2458           }
2459         }
2460       }
2461 
2462       if (fd_hess_flag) { // numerical Hessians
2463 
2464         if (fd_hess_by_fn_flag) { // 2nd-order function differences
2465           if (centralHess) {
2466             Real h = deltaList.front(); deltaList.pop_front();// 1st in, 1st out
2467 
2468             // diagonal term
2469 
2470             const RealVector& fn_vals_x_plus_2h
2471               = fd_resp_cit->second.function_values();
2472             ++fd_resp_cit;
2473             const RealVector& fn_vals_x_minus_2h
2474               = fd_resp_cit->second.function_values();
2475             ++fd_resp_cit;
2476             for (i=0; i<numFns; i++)
2477               // prevent erroneous difference of vals present in fn_vals_x0 but
2478               // not in fn_vals_x_(plus/minus)_2h due to map/fd_hess asv diffs
2479               if (fd_hess_asv[i] & 1)
2480                 new_fn_hessians[i](j,j) =
2481                   (fn_vals_x_plus_2h[i] - 2.*fn_vals_x0[i] +
2482                    fn_vals_x_minus_2h[i])/(4.*h*h);
2483 
2484             // off-diagonal terms
2485 
2486             for (k=j+1; k<num_deriv_vars; k++) {
2487               size_t xk_index = find_index(cv_ids, orig_dvv[k]);
2488               const RealVector& fn_vals_x_plus_h_plus_h
2489                 = fd_resp_cit->second.function_values();
2490               ++fd_resp_cit;
2491               const RealVector& fn_vals_x_plus_h_minus_h
2492                 = fd_resp_cit->second.function_values();
2493               ++fd_resp_cit;
2494               const RealVector& fn_vals_x_minus_h_plus_h
2495                 = fd_resp_cit->second.function_values();
2496               ++fd_resp_cit;
2497               const RealVector& fn_vals_x_minus_h_minus_h
2498                 = fd_resp_cit->second.function_values();
2499               ++fd_resp_cit;
2500               for (i=0; i<numFns; i++)
2501                 // no need to check fd_hess_asv since it was used for each eval
2502                 // NOTE: symmetry is naturally satisfied.
2503                 new_fn_hessians[i](j,k)
2504                   = new_fn_hessians[i](k,j)
2505                   = (fn_vals_x_plus_h_plus_h[i]  - fn_vals_x_plus_h_minus_h[i]
2506                   -  fn_vals_x_minus_h_plus_h[i] + fn_vals_x_minus_h_minus_h[i])
2507                   / (4.*h*h);
2508             }
2509           }
2510           else { //!!
2511             Real denom, h1, h2, hdiff;
2512             const RealVector *fx1, *fx12, *fx2;
2513 
2514             dx[j] = h1 = deltaList.front(); deltaList.pop_front();
2515             h2 = deltaList.front(); deltaList.pop_front();
2516             fx[j] = fx1 = &fd_resp_cit->second.function_values();
2517             ++fd_resp_cit;
2518             fx2 = &fd_resp_cit->second.function_values();
2519             ++fd_resp_cit;
2520             if (h1 + h2 == 0.) {
2521               denom = h1*h1;
2522               for(i = 0; i < numFns; ++i)
2523                 if (fd_hess_asv[i] & 1)
2524                   new_fn_hessians[i](j,j)
2525                     = ((*fx1)[i] - 2.*fn_vals_x0[i] + (*fx2)[i]) / denom;
2526             }
2527             else {
2528               hdiff = h1 - h2;
2529               denom = 0.5*h1*h2*hdiff;
2530               for (i = 0; i < numFns; i++)
2531                 if (fd_hess_asv[i] & 1)
2532                   new_fn_hessians[i](j,j)
2533                     = (h2*(*fx1)[i] + hdiff*fn_vals_x0[i] - h1*(*fx2)[i])/denom;
2534             }
2535 
2536             // off-diagonal terms
2537 
2538             for(k = 0; k < j; ++k) {
2539               size_t xk_index = find_index(cv_ids, orig_dvv[k]);
2540               h2 = dx[k];
2541               denom = h1*h2;
2542               fx2 = fx[k];
2543               fx12 = &fd_resp_cit->second.function_values();
2544               ++fd_resp_cit;
2545               for (i = 0; i < numFns; i++)
2546                 new_fn_hessians[i](j,k) =
2547                   ((*fx12)[i] - (*fx1)[i] - (*fx2)[i] + fn_vals_x0[i]) / denom;
2548             }
2549           }
2550         }
2551         if (fd_hess_by_grad_flag) { // 1st-order gradient differences
2552           Real h = deltaList.front(); deltaList.pop_front(); // 1st in, 1st out
2553 
2554           const RealMatrix& fn_grads_x_plus_h
2555             = fd_resp_cit->second.function_gradients();
2556           ++fd_resp_cit;
2557           ifg = j;
2558           for (i=0; i<numFns; i++)
2559             // prevent erroneous difference of grads present in fn_grads_x0 but
2560             // not in fn_grads_x_plus_h due to map_asv & fd_hess_asv diffs
2561             // NOTE: symmetry must be enforced below.
2562             if (fd_hess_asv[i] & 2) {
2563               //fg[ifg] = (fn_grads_x_plus_h[i] - fn_grads_x0[i]) / h;
2564               for(k = 0; k < num_deriv_vars; ++k)
2565                 fg[ifg][k] = (fn_grads_x_plus_h[i][k] - fn_grads_x0[i][k]) / h;
2566               ifg += num_deriv_vars;
2567             }
2568         }
2569       }
2570     }
2571   }
2572 
2573   // Enforce symmetry in the case of FD Hessians from 1st-order gradient
2574   // differences by averaging off-diagonal terms: H' = 1/2 (H + H^T)
2575   if (fd_hess_by_grad_flag)
2576     for (i=0; i<numFns; i++)
2577       if (fd_hess_asv[i] & 2)
2578         for (i = ifg = 0; i < numFns; i++)
2579           if (fd_hess_asv[i] & 2) {
2580             for (j=0; j<num_deriv_vars; j++) {
2581               for (k = 0; k < j; k++)
2582                 new_fn_hessians[i](j,k) = 0.5 * (fg[ifg+j][k] + fg[ifg+k][j]);
2583               new_fn_hessians[i](j,j) = fg[ifg+j][j];
2584             }
2585             ifg += num_deriv_vars;
2586           }
2587 
2588   update_response(vars, new_response, fd_grad_asv, fd_hess_asv, quasi_hess_asv,
2589                   original_set, initial_map_response, new_fn_grads,
2590                   new_fn_hessians);
2591 }
2592 
2593 
2594 /** Overlay the initial_map_response with numerically estimated new_fn_grads
2595     and new_fn_hessians to populate new_response as governed by asv vectors.
2596     Quasi-Newton secant Hessian updates are also performed here, since this
2597     is where the gradient data needed for the updates is first consolidated.
2598     Convenience function used by estimate_derivatives() for the synchronous
2599     case and by synchronize_derivatives() for the asynchronous case. */
2600 void Model::
update_response(const Variables & vars,Response & new_response,const ShortArray & fd_grad_asv,const ShortArray & fd_hess_asv,const ShortArray & quasi_hess_asv,const ActiveSet & original_set,Response & initial_map_response,const RealMatrix & new_fn_grads,const RealSymMatrixArray & new_fn_hessians)2601 update_response(const Variables& vars, Response& new_response,
2602 		const ShortArray& fd_grad_asv, const ShortArray& fd_hess_asv,
2603 		const ShortArray& quasi_hess_asv, const ActiveSet& original_set,
2604 		Response& initial_map_response, const RealMatrix& new_fn_grads,
2605 		const RealSymMatrixArray& new_fn_hessians)
2606 {
2607   // ----------
2608   // Initialize
2609   // ----------
2610 
2611   // If estDerivsFlag is set, then new_response was initially built with its
2612   // default constructor and has a NULL rep.  In this case, allocate a new
2613   // response by copying currentResponse.  This is a convenient way to carry
2614   // over fnTags and interfaceId to the new response.
2615   if (new_response.is_null())
2616     new_response = currentResponse.copy();
2617 
2618   size_t i;
2619   bool initial_map = false, initial_map_fn_flag = false,
2620        initial_map_grad_flag = false, initial_map_hess_flag = false,
2621        fd_grad_flag = false, fd_hess_flag = false, quasi_hess_flag = false;
2622   const ShortArray& initial_map_asv
2623     = initial_map_response.active_set_request_vector();
2624   for (i=0; i<numFns; i++) {
2625     if (initial_map_asv[i]) {
2626       initial_map = true;
2627       if (initial_map_asv[i] & 1)
2628 	initial_map_fn_flag = true;
2629       if (initial_map_asv[i] & 2)
2630 	initial_map_grad_flag = true;
2631       if (initial_map_asv[i] & 4)
2632 	initial_map_hess_flag = true;
2633     }
2634     if (fd_grad_asv[i])
2635       fd_grad_flag = true; // gradient finite differencing used
2636     if (fd_hess_asv[i])
2637       fd_hess_flag = true; // Hessian finite differencing used
2638     if (quasi_hess_asv[i])
2639       quasi_hess_flag = true; // quasi-Hessians needed in new_response
2640   }
2641 
2642   // ----------------------
2643   // Update function values
2644   // ----------------------
2645 
2646   if (initial_map_fn_flag)
2647     new_response.function_values(initial_map_response.function_values());
2648   // else reset_inactive() zero's out all of the function values
2649 
2650   // -------------------------
2651   // Update function gradients
2652   // -------------------------
2653 
2654   if (initial_map) {
2655     if (fd_grad_flag) { // merge new_fn_grads with initial map grads
2656       RealMatrix partial_fn_grads;
2657       if (initial_map_grad_flag)
2658 	partial_fn_grads = initial_map_response.function_gradients();
2659       else
2660 	partial_fn_grads.shape(new_fn_grads.numRows(), new_fn_grads.numCols());
2661       for (i=0; i<numFns; ++i) {
2662 	// overwrite partial_fn_grads with new_fn_grads based on fd_grad_asv
2663 	// (needed for case of mixed gradients)
2664 	if (fd_grad_asv[i]) {
2665 	  RealVector new_fn_gradi_col_vector = Teuchos::getCol(Teuchos::View,
2666 	    const_cast<RealMatrix&>(new_fn_grads), (int)i);
2667 	  Teuchos::setCol(new_fn_gradi_col_vector, (int)i, partial_fn_grads);
2668         }
2669       }
2670       new_response.function_gradients(partial_fn_grads);
2671     }
2672     else if (initial_map_grad_flag) // no merge: initial map grads are complete
2673       new_response.function_gradients(
2674         initial_map_response.function_gradients());
2675   }
2676   else if (fd_grad_flag) // no merge: new_fn_grads are complete
2677     new_response.function_gradients(new_fn_grads);
2678 
2679   // ------------------------
2680   // Update function Hessians
2681   // ------------------------
2682 
2683   // perform quasi-Newton updates if quasi_hessians have been specified by the
2684   // user, the response data is uncorrected, and the DVV is set to the active
2685   // continuous variables (the default).
2686   if ( supportsEstimDerivs &&
2687        surrogate_response_mode() != AUTO_CORRECTED_SURROGATE &&
2688        original_set.derivative_vector() ==
2689        currentVariables.continuous_variable_ids() &&
2690        ( hessianType == "quasi" ||
2691 	 ( hessianType == "mixed" && !hessIdQuasi.empty() ) ) )
2692     update_quasi_hessians(vars, new_response, original_set);
2693 
2694   // overlay Hessian data as needed
2695   if (initial_map || hessianType == "mixed") {
2696     // merge initial map Hessians, new_fn_hessians, and quasiHessians
2697     if (fd_hess_flag || quasi_hess_flag) {
2698       RealSymMatrixArray partial_fn_hessians;
2699       if (initial_map_hess_flag)
2700 	partial_fn_hessians = initial_map_response.function_hessians();
2701       else
2702 	partial_fn_hessians.resize(numFns);
2703       for (i=0; i<numFns; i++) {
2704 	if (fd_hess_asv[i])    // overwrite with FD Hessians
2705 	  partial_fn_hessians[i] = new_fn_hessians[i]; // full matrix
2706 	if (quasi_hess_asv[i]) // overwrite with quasi-Hessians
2707 	  partial_fn_hessians[i] = quasiHessians[i];   // full matrix
2708       }
2709       new_response.function_hessians(partial_fn_hessians);
2710     }
2711     else if (initial_map_hess_flag)
2712       new_response.function_hessians(initial_map_response.function_hessians());
2713   }
2714   else if (fd_hess_flag)    // no merge necessary
2715     new_response.function_hessians(new_fn_hessians);
2716   else if (quasi_hess_flag) // no merge necessary
2717     new_response.function_hessians(quasiHessians);
2718 
2719   // --------
2720   // Finalize
2721   // --------
2722 
2723   // update newly rebuilt new_response with the original asv request
2724   new_response.active_set_request_vector(original_set.request_vector());
2725   new_response.reset_inactive(); // clear out any left overs
2726 
2727   // Output header for the rebuilt new_response
2728   if (outputLevel > QUIET_OUTPUT) {
2729     if (initial_map)
2730       Cout << ">>>>> Total response returned to iterator:\n\n";
2731     else
2732       Cout << ">>>>> Gradients returned to iterator:\n\n";
2733     Cout << new_response << std::endl;
2734   }
2735 }
2736 
2737 
2738 /** quasi-Newton updates are performed for approximating response
2739     function Hessians using BFGS or SR1 formulations.  These Hessians
2740     are supported only for the active continuous variables, and a
2741     check is performed on the DVV prior to invoking the function. */
2742 void Model::
update_quasi_hessians(const Variables & vars,Response & new_response,const ActiveSet & original_set)2743 update_quasi_hessians(const Variables& vars, Response& new_response,
2744 		      const ActiveSet& original_set)
2745 {
2746   size_t i, j, k;
2747   const RealVector& x        = vars.continuous_variables(); // view
2748   const ShortArray& orig_asv = original_set.request_vector();
2749   const RealMatrix& fn_grads = new_response.function_gradients();
2750 
2751   // if necessary, initialize quasi-Hessians before populating
2752   if ( numQuasiUpdates.empty() ) {
2753     if (outputLevel == DEBUG_OUTPUT)
2754       Cout << "Reshaping quasiHessians in modelType = " << modelType
2755 	   << std::endl;
2756     xPrev.resize(numFns);
2757     fnGradsPrev.reshape(numDerivVars, numFns);
2758     quasiHessians.resize(numFns);
2759     for (size_t i=0; i<numFns; i++) {
2760       if ( hessianType == "quasi" ||
2761 	   ( hessianType == "mixed" && contains(hessIdQuasi, i+1) ) ) {
2762 	quasiHessians[i].reshape(numDerivVars);
2763 	quasiHessians[i] = 0.;
2764 	// leave as zero matrix so that any early use of quasi-Hessian has
2765 	// no effect.  The initial scaling updates to nonzero values at the
2766 	// appropriate time.
2767 	//for (size_t j=0; j<numDerivVars; j++)
2768 	//  quasiHessians[i][j][j] = 1.; // initialize to identity
2769       }
2770     }
2771     numQuasiUpdates.assign(numFns, 0);
2772   }
2773 
2774   for (i=0; i<numFns; ++i) {
2775 
2776     // perform i-th quasi-Hessian update if a new grad vector is present,
2777     // regardless of whether the quasi-Hessian is active on this eval.
2778     if ( !quasiHessians[i].empty() && (orig_asv[i] & 2) ) {
2779 
2780       // quasi-Hessian updates require a history of at least 2 gradient evals
2781       Real norm_s = 0.;
2782       if (numQuasiUpdates[i]) {
2783 
2784 	RealVector s(numDerivVars), y(numDerivVars);
2785 	Real sj, yj, norm_s_sq = 0., norm_y_sq = 0.;
2786 	for (j=0; j<numDerivVars; ++j) {
2787 	  s[j] = sj = x[j]           - xPrev[i][j];       // s = step
2788 	  y[j] = yj = fn_grads[i][j] - fnGradsPrev[i][j]; // y = yield
2789 	  norm_s_sq += sj*sj; // avoid repeated indexing
2790 	  norm_y_sq += yj*yj; // avoid repeated indexing
2791 	}
2792 	norm_s = std::sqrt(norm_s_sq);
2793 	Real norm_y = std::sqrt(norm_y_sq);
2794 	if (outputLevel == DEBUG_OUTPUT)
2795 	  Cout << "Quasi-Hessian step/yield:\ns =\n" << s << "y =\n" << y
2796 	       << "norm_s = " << norm_s << " norm_y = " << norm_y << '\n';
2797 
2798 	// Verify that there's a non-zero step (zero yield is acceptable).
2799 	// Don't update anything (including history) if step is zero.
2800 	if (norm_s > Pecos::SMALL_NUMBER) {
2801 
2802 	  // -------------------------------------------
2803 	  // Apply initial scaling prior to first update
2804 	  // -------------------------------------------
2805 	  //   Gay:             y's/s's I  (scaling1/norm_s_sq)
2806 	  //   Dennis/Schnabel: y's/s'Hs I (same as Gay if initial H = I)
2807 	  //   Shanno/Phua:     y'y/y's I  (current selection)
2808 	  if (numQuasiUpdates[i] == 1) {
2809 	    Real scaling1 = 0.;
2810 	    for (j=0; j<numDerivVars; j++)
2811 	      scaling1 += y[j]*s[j];
2812 	    // step is nonzero but yield may reasonably be zero, so safeguard
2813 	    // numerics.  In the case of no yield in gradients (no observed
2814 	    // curvature), 0 is assigned as the initial scaling.
2815 	    Real scaling2 = 0.;
2816 	    if (norm_y > Pecos::SMALL_NUMBER)
2817 	      scaling2 = (std::sqrt(std::fabs(scaling1)) > Pecos::SMALL_NUMBER)
2818                        ? norm_y_sq/scaling1 : 1.;
2819 	    for (j=0; j<numDerivVars; j++)
2820 	      quasiHessians[i](j,j) = scaling2;
2821 	    if (outputLevel == DEBUG_OUTPUT)
2822 	      Cout << "initial scaling =\n" << quasiHessians[i] << '\n';
2823 	  }
2824 
2825 	  // ----------------------
2826 	  // Symmetric Rank 1 (SR1)
2827 	  // ----------------------
2828 	  if (quasiHessType == "sr1") {
2829 	    RealVector ymBs(numDerivVars);
2830 	    Real scaling = 0.;
2831 	    for (j=0; j<numDerivVars; j++) {
2832 	      Real Bs = 0.;
2833 	      for (k=0; k<numDerivVars; k++)
2834 		Bs += quasiHessians[i](j,k)*s[k];
2835 	      ymBs[j] = y[j] - Bs;
2836 	      scaling += ymBs[j]*s[j];
2837 	    }
2838 	    // use standard safeguard against orthogonality in denominator.
2839 	    // skip update if denominator magnitude is insufficient.  Guanghui
2840 	    // Liu thesis uses 1.e-4 (p. 57) and Nocedal and Wright uses 1.e-8
2841 	    // (pp. 203-4).  Here we split the difference with 1.e-6.
2842 	    Real norm_ymBs = 0.;
2843 	    for (j=0; j<numDerivVars; j++)
2844 	      norm_ymBs += ymBs[j]*ymBs[j];
2845 	    norm_ymBs = std::sqrt(norm_ymBs);
2846 	    // perform update
2847 	    if (std::fabs(scaling) > 1.e-6*norm_s*norm_ymBs) {
2848 	      for (j=0; j<numDerivVars; j++) {
2849 		// exploit hereditary symmetry: compute upper half of matrix
2850 		quasiHessians[i](j,j) += ymBs[j]*ymBs[j]/scaling;
2851 		for (k=j+1; k<numDerivVars; k++)
2852 		  quasiHessians[i](k,j) = quasiHessians[i](j,k) +=
2853 		    ymBs[j]*ymBs[k]/scaling;
2854 	      }
2855 	      if (outputLevel == DEBUG_OUTPUT)
2856 		Cout << "SR1: B = " << quasiHessians[i] << '\n';
2857 	    }
2858 	    else if (outputLevel == DEBUG_OUTPUT)
2859 	      Cout << "SR1: update skipped\n";
2860 	  }
2861 	  // ---------------------------------------
2862 	  // Broyden-Fletcher-Goldfarb-Shanno (BFGS)
2863 	  // ---------------------------------------
2864 	  else {
2865 	    bool damped_bfgs = (quasiHessType == "damped_bfgs") ? true : false;
2866 	    RealVector Bs(numDerivVars, true);
2867 	    Real scaling1 = 0., scaling2 = 0.;
2868 	    for (j=0; j<numDerivVars; j++) {
2869 	      scaling1 += y[j]*s[j];
2870 	      for (k=0; k<numDerivVars; k++)
2871 		Bs[j] += quasiHessians[i](j,k)*s[k];
2872 	      scaling2 += s[j]*Bs[j];
2873 	    }
2874 	    // These two approaches appear in the literature for enforcing the
2875 	    // curvature condition when the steps are generated by a Newton
2876 	    // optimizer:
2877 	    //   damp: if (scaling1 >=  0.2*scaling2) { update } else { damp }
2878 	    //   skip: if (scaling1 >= 1e-6*scaling2) { update }
2879 	    // These approaches appear to be inappropriate when the steps are
2880 	    // not directly Newton-generated (e.g., from trust-region SBO) since
2881 	    // the curvature condition is violated too frequently.  In this
2882 	    // case, a minimal approach can be used which only protects the
2883 	    // denominator magnitude.  The BFGS update can lose positive
2884 	    // definiteness in this case -> use fabs on scaling2 triple product.
2885 	    using std::fabs;
2886 	    if ( ( !damped_bfgs && fabs(scaling1) > 1.e-6*fabs(scaling2) ) ||
2887 		 (  damped_bfgs && scaling1 >= 0.2*scaling2 ) ) {
2888 	      // standard BFGS (in damped formulation: theta=1 and r=y)
2889 	      for (j=0; j<numDerivVars; j++) {
2890 		// exploit hereditary symmetry: compute upper half of matrix
2891 		quasiHessians[i](j,j) +=  y[j]* y[j]/scaling1
2892                                        -  Bs[j]*Bs[j]/scaling2;
2893 		for (k=j+1; k<numDerivVars; k++)
2894 		  quasiHessians[i](k,j) = quasiHessians[i](j,k) +=
2895 		    y[j]*y[k]/scaling1 - Bs[j]*Bs[k]/scaling2;
2896 	      }
2897 	      if (outputLevel == DEBUG_OUTPUT)
2898 		Cout << "BFGS: y's = " << scaling1 << " s'Bs = " << scaling2
2899 		     << "\nB = " << quasiHessians[i] << '\n';
2900 	    }
2901 	    else if (damped_bfgs) {
2902 	      // damped BFGS is a standard safeguard against violation of the
2903 	      // curvature condition (y's should be > 0).  See Nocedal & Wright
2904 	      // (pp. 540-1) or Guanghui Liu thesis (p. 57), among others.
2905 	      RealVector r(numDerivVars);
2906 	      Real theta = 0.8*scaling2/(scaling2 - scaling1);
2907 	      //Cout << "Damped BFGS:   y's = " << scaling1 << " s'Bs = "
2908               //     << scaling2 << " theta = " << theta;
2909 	      scaling1 = 0.;
2910 	      for (j=0; j<numDerivVars; j++) {
2911 		r[j] = theta*y[j] + (1.-theta)*Bs[j];
2912 		scaling1 += r[j]*s[j];
2913 	      }
2914 	      for (j=0; j<numDerivVars; j++) {
2915 		// exploit hereditary symmetry: compute upper half of matrix
2916 		quasiHessians[i](j,j) +=  r[j]* r[j]/scaling1
2917                                        -  Bs[j]*Bs[j]/scaling2;
2918 		for (k=j+1; k<numDerivVars; k++)
2919 		  quasiHessians[i](k,j) = quasiHessians[i](j,k) +=
2920 		    r[j]*r[k]/scaling1 - Bs[j]*Bs[k]/scaling2;
2921 	      }
2922 	      if (outputLevel == DEBUG_OUTPUT)
2923 		Cout << "Damped BFGS: r's = " << scaling1 << "\nB = "
2924 		     << quasiHessians[i] << '\n';
2925 	    }
2926 	    else if (outputLevel == DEBUG_OUTPUT)
2927 	      Cout << "Undamped BFGS: update skipped\n";
2928 	  }
2929 	}
2930       }
2931 
2932       // history updates for next iteration
2933       if ( numQuasiUpdates[i] == 0 || norm_s > Pecos::SMALL_NUMBER ) {
2934 	// store previous data independently for each response fn.  So long
2935 	// as at least one previous data pt has been stored, we do not need
2936 	// to track the presence of active grads in particular responses.
2937 	copy_data(x, xPrev[i]); // view->copy
2938 	RealVector tmp_col_vector = Teuchos::getCol(Teuchos::View,
2939 	  const_cast<RealMatrix&>(fn_grads), (int)i);
2940 	Teuchos::setCol(tmp_col_vector, (int)i, fnGradsPrev);
2941 	++numQuasiUpdates[i];
2942       }
2943     }
2944   }
2945 }
2946 
2947 
2948 /** Splits asv_in total request into map_asv_out, fd_grad_asv_out,
2949     fd_hess_asv_out, and quasi_hess_asv_out as governed by the
2950     responses specification.  If the returned use_est_deriv is true,
2951     then these asv outputs are used by estimate_derivatives() for the
2952     initial map, finite difference gradient evals, finite difference
2953     Hessian evals, and quasi-Hessian updates, respectively.  If the
2954     returned use_est_deriv is false, then only map_asv_out is used. */
manage_asv(const ActiveSet & original_set,ShortArray & map_asv_out,ShortArray & fd_grad_asv_out,ShortArray & fd_hess_asv_out,ShortArray & quasi_hess_asv_out)2955 bool Model::manage_asv(const ActiveSet& original_set, ShortArray& map_asv_out,
2956 		       ShortArray& fd_grad_asv_out, ShortArray& fd_hess_asv_out,
2957 		       ShortArray& quasi_hess_asv_out)
2958 {
2959   const ShortArray& asv_in   = original_set.request_vector();
2960   const SizetArray& orig_dvv = original_set.derivative_vector();
2961 
2962   // *_asv_out[i] have all been initialized to zero
2963 
2964   // For HierarchSurr and Recast models with no scaling (which contain no
2965   // interface object, only subordinate models), pass the ActiveSet through to
2966   // the sub-models in one piece and do not break it apart here. This preserves
2967   // sub-model parallelism.
2968   if (!supportsEstimDerivs)
2969     return false;
2970 
2971   bool use_est_deriv = false, fd_grad_flag = false;
2972   size_t i, asv_len = asv_in.size();
2973   for (i=0; i<asv_len; ++i) {
2974 
2975     // Function value requests
2976     if (asv_in[i] & 1)
2977       map_asv_out[i] = 1;
2978 
2979     // Function gradient requests
2980     if (asv_in[i] & 2) {
2981       if ( gradientType == "analytic" ||
2982            ( gradientType == "mixed" && contains(gradIdAnalytic, i+1) ) )
2983         map_asv_out[i] |= 2; // activate 2nd bit
2984       else if ( methodSource == "dakota" && ( gradientType == "numerical" ||
2985 		( gradientType == "mixed" && contains(gradIdNumerical, i+1)))) {
2986         fd_grad_asv_out[i] = 1;
2987         fd_grad_flag = true;
2988         if (intervalType == "forward")
2989           map_asv_out[i] |= 1; // activate 1st bit
2990         use_est_deriv = true;
2991       }
2992       else { // could happen if an iterator requiring gradients is selected
2993         // with no_gradients or unsupported vendor numerical gradients
2994         // and lacks a separate error check.
2995         Cerr << "Error: Model '" << model_id()
2996              << "' received unsupported gradient request from ASV in "
2997              << "Model::manage_asv." << std::endl;
2998         abort_handler(MODEL_ERROR);
2999       }
3000       if ( surrogate_response_mode() != AUTO_CORRECTED_SURROGATE &&
3001            ( hessianType == "quasi" ||
3002              ( hessianType == "mixed" && contains(hessIdQuasi, i+1) ) ) )
3003         use_est_deriv = true;
3004     }
3005 
3006     // Function Hessian requests
3007     if (asv_in[i] & 4) {
3008       if ( hessianType == "analytic" ||
3009            ( hessianType == "mixed" && contains(hessIdAnalytic, i+1) ) )
3010         map_asv_out[i] |= 4; // activate 3rd bit
3011       else if ( hessianType == "numerical" ||
3012                 ( hessianType == "mixed" && contains(hessIdNumerical, i+1) ) ) {
3013         if ( gradientType == "analytic" ||
3014              ( gradientType == "mixed" && contains(gradIdAnalytic, i+1) ) ) {
3015           // numerical Hessians from 1st-order gradient differences
3016           fd_hess_asv_out[i] = 2;
3017           map_asv_out[i] |= 2; // activate 2nd bit
3018         }
3019         else { // numerical Hessians from 2nd-order function differences
3020           fd_hess_asv_out[i] = 1;
3021           map_asv_out[i] |= 1; // activate 1st bit
3022         }
3023         use_est_deriv = true;
3024       }
3025       else if ( hessianType == "quasi" ||
3026                 (hessianType == "mixed" && contains(hessIdQuasi, i+1))) {
3027         quasi_hess_asv_out[i] = 2; // value not currently used
3028         use_est_deriv = true; // update_response needed even if no secant update
3029       }
3030       else { // could happen if an iterator requiring Hessians is selected
3031         // with no_hessians and it lacks a separate error check.
3032         Cerr << "Error: Model '" << model_id()
3033              << "' received unsupported Hessian request from ASV in "
3034              << "Model::manage_asv." << std::endl;
3035         abort_handler(MODEL_ERROR);
3036       }
3037     }
3038   }
3039 
3040   // Depending on bounds-respecting differencing, finite difference gradients
3041   // may require f(x0).  The following computes the step and updates shortStep.
3042   if (fd_grad_flag && !ignoreBounds) { // protect call to forward_grad_step
3043     size_t num_deriv_vars = orig_dvv.size();
3044 
3045     // define lower/upper bounds for finite differencing and cv_ids
3046     RealVector x0, fd_lb, fd_ub;
3047     bool active_derivs, inactive_derivs; // derivs w.r.t. {active,inactive} vars
3048     SizetMultiArrayConstView cv_ids =
3049       initialize_x0_bounds(orig_dvv, active_derivs, inactive_derivs, x0,
3050 			   fd_lb, fd_ub);
3051 
3052     // Accumulate short step over all derivative variables
3053     bool short_step = false;
3054     for (size_t j=0; j<num_deriv_vars; j++) {
3055       size_t xj_index = find_index(cv_ids, orig_dvv[j]);
3056       Real x0_j = x0[xj_index], lb_j = fd_lb[j], ub_j = fd_ub[j];
3057 
3058       // NOTE: resets shortStep to false for each variable
3059       Real h = forward_grad_step(num_deriv_vars, xj_index, x0_j, lb_j, ub_j);
3060       if (intervalType == "central")
3061         Real h2 = FDstep2(x0_j, lb_j, ub_j, h);
3062 
3063       if (shortStep)
3064         short_step = true;
3065     }
3066 
3067     // update ASV with f(x0) requests needed for shortStep
3068     for (i=0; i<asv_len; ++i)
3069       if ( (fd_grad_asv_out[i] & 1) && short_step)
3070         map_asv_out[i] |= 1; // activate 1st bit
3071   }
3072 
3073   return use_est_deriv;
3074 }
3075 
3076 
3077 /** Constructor helper to manage model recastings for data import/export. */
manage_data_recastings()3078 bool Model::manage_data_recastings()
3079 {
3080   if (modelRep) // should not occur: protected fn only used by the letter
3081     return modelRep->manage_data_recastings(); // fwd to letter
3082   else { // letter lacking redefinition of virtual fn.
3083     // Test for any recasting or nesting within actualModel: we assume that
3084     // user data import is post-nesting, but pre-recast.
3085     // (1) data is imported at the user-space level but then must be applied
3086     //     within the transformed space.  Transform imported data at run time
3087     //     in order to capture latest initialize() calls to RecastModels.
3088     // (2) stop the recursion if a nested model is encountered: we will apply
3089     //     any recastings that occur following the last nesting.
3090     // (3) Additional surrogates in this recursion hierarchy are ignored.
3091     ModelList& sub_models = subordinate_models(); // populates/returns modelList
3092     ModelLIter ml_it; size_t i, num_models = sub_models.size();
3093     bool manage_recasting = false;
3094     recastFlags.assign(num_models, false);
3095     // detect recasting needs top down
3096     for (ml_it=sub_models.begin(), i=0; ml_it!=sub_models.end(); ++ml_it, ++i) {
3097       const String& m_type = ml_it->model_type();
3098       if (m_type == "recast" ||
3099 	  m_type == "probability_transform") // + other Recast types...
3100 	manage_recasting = recastFlags[i] = true;
3101       else if (m_type == "nested")
3102 	break;
3103     }
3104 
3105     if (!manage_recasting) recastFlags.clear();
3106     return manage_recasting;
3107   }
3108 }
3109 
3110 
3111 void Model::
user_space_to_iterator_space(const Variables & user_vars,const Response & user_resp,Variables & iter_vars,Response & iter_resp)3112 user_space_to_iterator_space(const Variables& user_vars,
3113 			     const Response&  user_resp,
3114 			     Variables& iter_vars, Response& iter_resp)
3115 {
3116   if (modelRep) // fwd to letter
3117     return modelRep->user_space_to_iterator_space(user_vars, user_resp,
3118 						  iter_vars, iter_resp);
3119   else { // letter lacking redefinition of virtual fn.
3120 
3121     iter_vars = user_vars.copy(); // deep copy to preserve inactive in source
3122     iter_resp = user_resp;//.copy(); // shallow copy currently sufficient
3123 
3124     // apply recastings bottom up (e.g., for data import)
3125     // modelList assigned in manage_data_recastings() -> subordinate_models()
3126     // (don't want to incur this overhead for every import/export)
3127     ModelLRevIter ml_rit; size_t i;
3128     for (ml_rit =modelList.rbegin(), i=modelList.size()-1;
3129 	 ml_rit!=modelList.rend(); ++ml_rit, --i)
3130       if (recastFlags[i]) {
3131 	// utilize RecastModel::current{Variables,Response} to xform data
3132 	Variables recast_vars = ml_rit->current_variables(); // shallow copy
3133 	Response  recast_resp = ml_rit->current_response();  // shallow copy
3134 	ActiveSet recast_set  = recast_resp.active_set();    // copy
3135 	// to propagate vars bottom up, inverse of std transform is reqd
3136 	RecastModel& recast_model_rep =
3137 	  *std::static_pointer_cast<RecastModel>(ml_rit->model_rep());
3138 	recast_model_rep.inverse_transform_variables(iter_vars, recast_vars);
3139 	recast_model_rep.
3140 	  inverse_transform_set(iter_vars, iter_resp.active_set(), recast_set);
3141 	// to propagate response bottom up, std transform is used
3142 	recast_resp.active_set(recast_set);
3143 	recast_model_rep.
3144 	  transform_response(recast_vars, iter_vars, iter_resp, recast_resp);
3145 	// update active in iter_vars
3146 	iter_vars.active_variables(recast_vars);
3147 	// reassign resp rep pointer (no actual data copying)
3148 	iter_resp = recast_resp; // sufficient for now...
3149 	//iter_resp.active_set(recast_set);
3150 	//iter_resp.update(recast_resp);
3151       }
3152   }
3153 }
3154 
3155 
3156 void Model::
iterator_space_to_user_space(const Variables & iter_vars,const Response & iter_resp,Variables & user_vars,Response & user_resp)3157 iterator_space_to_user_space(const Variables& iter_vars,
3158 			     const Response&  iter_resp,
3159 			     Variables& user_vars, Response& user_resp)
3160 {
3161   if (modelRep) // fwd to letter
3162     return modelRep->iterator_space_to_user_space(iter_vars, iter_resp,
3163 						  user_vars, user_resp);
3164   else { // letter lacking redefinition of virtual fn.
3165 
3166     user_vars = iter_vars.copy(); // deep copy to preserve inactive in source
3167     user_resp = iter_resp;//.copy(); // shallow copy currently sufficient
3168 
3169     // apply recastings top down (e.g., for data export)
3170     // modelList assigned in manage_data_recastings() -> subordinate_models()
3171     // (don't want to incur this overhead for every import/export)
3172     ModelLIter ml_it; size_t i;
3173     for (ml_it=modelList.begin(), i=0; ml_it!=modelList.end(); ++ml_it, ++i)
3174       if (recastFlags[i]) {
3175 	// utilize RecastModel::current{Variables,Response} to xform data
3176 	Variables recast_vars = ml_it->current_variables(); // shallow copy
3177 	Response  recast_resp = ml_it->current_response();  // shallow copy
3178 	ActiveSet recast_set  = recast_resp.active_set();   // copy
3179 	// to propagate vars top down, forward transform is reqd
3180 	RecastModel& recast_model_rep =
3181 	  *std::static_pointer_cast<RecastModel>(ml_it->model_rep());
3182 	recast_model_rep.transform_variables(user_vars, recast_vars);
3183 	recast_model_rep.
3184 	  transform_set(user_vars, user_resp.active_set(), recast_set);
3185 	// to propagate response top down, inverse transform is used.  Note:
3186 	// derivatives are not currently exported --> a no-op for Nataf.
3187 	recast_resp.active_set(recast_set);
3188 	recast_model_rep.inverse_transform_response(recast_vars, user_vars,
3189 						     user_resp, recast_resp);
3190 	// update active in iter_vars
3191 	user_vars.active_variables(recast_vars);
3192 	// reassign resp rep pointer (no actual data copying)
3193 	user_resp = recast_resp; // sufficient for now...
3194 	//user_resp.active_set(recast_set);
3195 	//user_resp.update(recast_resp);
3196       }
3197   }
3198 }
3199 
3200 
derived_evaluate(const ActiveSet & set)3201 void Model::derived_evaluate(const ActiveSet& set)
3202 {
3203   if (modelRep) // should not occur: protected fn only used by the letter
3204     modelRep->derived_evaluate(set); // envelope fwd to letter
3205   else { // letter lacking redefinition of virtual fn.
3206     Cerr << "Error: Letter lacking redefinition of virtual derived_compute_"
3207          << "response() function.\nNo default defined at base class."
3208 	 << std::endl;
3209     abort_handler(MODEL_ERROR);
3210   }
3211 }
3212 
3213 
derived_evaluate_nowait(const ActiveSet & set)3214 void Model::derived_evaluate_nowait(const ActiveSet& set)
3215 {
3216   if (modelRep) // should not occur: protected fn only used by the letter
3217     modelRep->derived_evaluate_nowait(set); // envelope fwd to letter
3218   else { // letter lacking redefinition of virtual fn.
3219     Cerr << "Error: Letter lacking redefinition of virtual derived_asynch_"
3220          << "evaluate() function.\nNo default defined at base class."
3221          << std::endl;
3222     abort_handler(MODEL_ERROR);
3223   }
3224 }
3225 
3226 
derived_synchronize()3227 const IntResponseMap& Model::derived_synchronize()
3228 {
3229   if (!modelRep) { // letter lacking redefinition of virtual fn.
3230     Cerr << "Error: Letter lacking redefinition of virtual derived_synchronize"
3231          << "() function.\n       derived_synchronize is not available for this"
3232 	 << " Model." << std::endl;
3233     abort_handler(MODEL_ERROR);
3234   }
3235 
3236   // should not occur: protected fn only used by the letter
3237   return modelRep->derived_synchronize(); // envelope fwd to letter
3238 }
3239 
3240 
derived_synchronize_nowait()3241 const IntResponseMap& Model::derived_synchronize_nowait()
3242 {
3243   if (!modelRep) { // letter lacking redefinition of virtual fn.
3244     Cerr << "Error: Letter lacking redefinition of virtual derived_synchronize"
3245          << "_nowait() function.\n       derived_synchronize_nowait is not "
3246 	 << "available for this Model." << std::endl;
3247     abort_handler(MODEL_ERROR);
3248   }
3249 
3250   // should not occur: protected fn only used by the letter
3251   return modelRep->derived_synchronize_nowait(); // envelope fwd to letter
3252 }
3253 
3254 
derived_master_overload() const3255 bool Model::derived_master_overload() const
3256 {
3257   if (modelRep) // should not occur: protected fn only used by the letter
3258     return modelRep->derived_master_overload(); // envelope fwd to letter
3259   else // letter lacking redefinition of virtual fn.
3260     return false; // default for Surrogate models
3261 }
3262 
3263 
3264 /** return by reference requires use of dummy objects, but is
3265     important to allow use of assign_rep() since this operation must
3266     be performed on the original envelope object. */
subordinate_iterator()3267 Iterator& Model::subordinate_iterator()
3268 {
3269   if (modelRep)
3270     return modelRep->subordinate_iterator(); // envelope fwd to letter
3271   else // letter lacking redefinition of virtual fn.
3272     return dummy_iterator; // return null/empty envelope
3273 }
3274 
3275 
3276 /** return by reference requires use of dummy objects, but is
3277     important to allow use of assign_rep() since this operation must
3278     be performed on the original envelope object. */
subordinate_model()3279 Model& Model::subordinate_model()
3280 {
3281   if (modelRep) // envelope fwd to letter
3282     return modelRep->subordinate_model();
3283   else // letter lacking redefinition of virtual fn.
3284     return dummy_model; // return null/empty envelope
3285 }
3286 
3287 
active_model_key(const UShortArray & mi_key)3288 void Model::active_model_key(const UShortArray& mi_key)
3289 {
3290   if (modelRep) // envelope fwd to letter
3291     modelRep->active_model_key(mi_key);
3292   else {
3293     Cerr << "Error: Letter lacking redefinition of virtual active_model_key() "
3294 	 << "function.\n       model key activation is not supported by this "
3295 	 << "Model class." << std::endl;
3296     abort_handler(MODEL_ERROR);
3297   }
3298 }
3299 
3300 
clear_model_keys()3301 void Model::clear_model_keys()
3302 {
3303   if (modelRep) // envelope fwd to letter
3304     modelRep->clear_model_keys();
3305   else {
3306     Cerr << "Error: Letter lacking redefinition of virtual clear_model_keys() "
3307 	 << "function.\n       model keys are not supported by this Model "
3308 	 << "class." << std::endl;
3309     abort_handler(MODEL_ERROR);
3310   }
3311 }
3312 
3313 
qoi() const3314 size_t Model::qoi() const
3315 {
3316   if (modelRep) // envelope fwd to letter
3317     return modelRep->qoi();
3318   else // default for models without aggregation
3319     return response_size();
3320 }
3321 
3322 
3323 /** return by reference requires use of dummy objects, but is
3324     important to allow use of assign_rep() since this operation must
3325     be performed on the original envelope object. */
surrogate_model()3326 Model& Model::surrogate_model()
3327 {
3328   if (modelRep) // envelope fwd to letter
3329     return modelRep->surrogate_model();
3330   else // letter lacking redefinition of virtual fn.
3331     return dummy_model; // default is no surrogate -> return empty envelope
3332 }
3333 
3334 
surrogate_model() const3335 const Model& Model::surrogate_model() const
3336 {
3337   if (modelRep) // envelope fwd to letter
3338     return modelRep->surrogate_model();
3339   else // letter lacking redefinition of virtual fn.
3340     return dummy_model; // default is no surrogate -> return empty envelope
3341 }
3342 
3343 
3344 /** return by reference requires use of dummy objects, but is
3345     important to allow use of assign_rep() since this operation must
3346     be performed on the original envelope object. */
truth_model()3347 Model& Model::truth_model()
3348 {
3349   if (modelRep) // envelope fwd to letter
3350     return modelRep->truth_model();
3351   else // letter lacking redefinition of virtual fn.
3352     return *this; // default is no surrogate -> return this model instance
3353 }
3354 
3355 
truth_model() const3356 const Model& Model::truth_model() const
3357 {
3358   if (modelRep) // envelope fwd to letter
3359     return modelRep->truth_model();
3360   else // letter lacking redefinition of virtual fn.
3361     return *this; // default is no surrogate -> return this model instance
3362 }
3363 
3364 
3365 /** since modelList is built with list insertions (using envelope
3366     copies), these models may not be used for model.assign_rep() since
3367     this operation must be performed on the original envelope object.
3368     They may, however, be used for letter-based operations (including
3369     assign_rep() on letter contents such as an interface). */
subordinate_models(bool recurse_flag)3370 ModelList& Model::subordinate_models(bool recurse_flag)
3371 {
3372   if (modelRep) // envelope fwd to letter
3373     return modelRep->subordinate_models(recurse_flag);
3374   else { // letter (not virtual)
3375     modelList.clear();
3376     derived_subordinate_models(modelList, recurse_flag);
3377     return modelList;
3378   }
3379 }
3380 
3381 
derived_subordinate_models(ModelList & ml,bool recurse_flag)3382 void Model::derived_subordinate_models(ModelList& ml, bool recurse_flag)
3383 {
3384   if (modelRep) // envelope fwd to letter
3385     modelRep->derived_subordinate_models(ml, recurse_flag);
3386   // else: default implementation (SimulationModel) is no-op.
3387 }
3388 
3389 
resize_from_subordinate_model(size_t depth)3390 void Model::resize_from_subordinate_model(size_t depth)
3391 {
3392   if (modelRep) // envelope fwd to letter
3393     modelRep->resize_from_subordinate_model(depth);
3394   // else default if no redefinition is no-op
3395 }
3396 
3397 
3398 /** used only for instantiate-on-the-fly model recursions (all RecastModel
3399     instantiations and alternate DataFitSurrModel instantiations).  Simulation,
3400     Hierarchical, and Nested Models do not redefine the function since they
3401     do not support instantiate-on-the-fly.  This means that the recursion
3402     will stop as soon as it encounters a Model that was instantiated normally,
3403     which is appropriate since ProblemDescDB-constructed Models use top-down
3404     information flow and do not require bottom-up updating. */
update_from_subordinate_model(size_t depth)3405 void Model::update_from_subordinate_model(size_t depth)
3406 {
3407   if (modelRep) // envelope fwd to letter
3408     modelRep->update_from_subordinate_model(depth);
3409   // else default if no redefinition is no-op
3410 }
3411 
3412 
3413 /** return by reference requires use of dummy objects, but is
3414     important to allow use of assign_rep() since this operation must
3415     be performed on the original envelope object. */
derived_interface()3416 Interface& Model::derived_interface()
3417 {
3418   if (modelRep) return modelRep->derived_interface(); // fwd to letter
3419   else          return dummy_interface; // return null/empty envelope
3420 }
3421 
3422 
3423 /** return the number of levels within a solution / discretization hierarchy. */
solution_levels(bool lwr_bnd) const3424 size_t Model::solution_levels(bool lwr_bnd) const
3425 {
3426   if (modelRep) return modelRep->solution_levels(lwr_bnd); // fwd to letter
3427   else          return (lwr_bnd) ? 1 : 0; // default
3428 }
3429 
3430 
3431 /** activate a particular level within a solution / discretization hierarchy. */
solution_level_index(unsigned short index)3432 void Model::solution_level_index(unsigned short index)
3433 {
3434   if (modelRep)
3435     modelRep->solution_level_index(index); // envelope fwd to letter
3436   else if (index != USHRT_MAX) {
3437     // letter lacking redefinition of virtual fn (for case that requires fwd)
3438     Cerr << "Error: Letter lacking redefinition of virtual solution_level_index"
3439          << "() function.\n       solution_level_index is not supported by this"
3440 	 << " Model class." << std::endl;
3441     abort_handler(MODEL_ERROR);
3442   }
3443 }
3444 
3445 
solution_level_index() const3446 unsigned short Model::solution_level_index() const
3447 {
3448   if (modelRep) return modelRep->solution_level_index(); // fwd to letter
3449   else          return USHRT_MAX; // not defined (default)
3450 }
3451 
3452 
solution_level_costs() const3453 RealVector Model::solution_level_costs() const
3454 {
3455   if (!modelRep) { // letter lacking redefinition of virtual fn.
3456     Cerr << "Error: Letter lacking redefinition of virtual solution_level_costs"
3457          << "() function.\n       solution_level_costs is not supported by "
3458 	 << "this Model class." << std::endl;
3459     abort_handler(MODEL_ERROR);
3460   }
3461 
3462   return modelRep->solution_level_costs(); // envelope fwd to letter
3463 }
3464 
3465 
solution_level_cost() const3466 Real Model::solution_level_cost() const
3467 {
3468   if (!modelRep) { // letter lacking redefinition of virtual fn.
3469     Cerr << "Error: Letter lacking redefinition of virtual solution_level_cost"
3470          << "() function.\n       solution_level_cost is not supported by this "
3471 	 << "Model class." << std::endl;
3472     abort_handler(MODEL_ERROR);
3473   }
3474 
3475   return modelRep->solution_level_cost(); // envelope fwd to letter
3476 }
3477 
3478 
3479 /** return by reference requires use of dummy objects, but is
3480     important to allow use of assign_rep() since this operation must
3481     be performed on the original envelope object. */
interface_id() const3482 const String& Model::interface_id() const
3483 {
3484   if (modelRep)
3485     return modelRep->interface_id(); // envelope fwd to letter
3486   else // letter lacking redefinition of virtual fn.
3487     return dummy_interface.interface_id(); // return empty string
3488 }
3489 
3490 
3491 void Model::
primary_response_fn_weights(const RealVector & wts,bool recurse_flag)3492 primary_response_fn_weights(const RealVector& wts, bool recurse_flag)
3493 {
3494   if (modelRep) // envelope fwd to letter
3495     modelRep->primary_response_fn_weights(wts, recurse_flag);
3496   else // default does not support recursion (SimulationModel, NestedModel)
3497     primaryRespFnWts = wts;
3498 }
3499 
3500 
surrogate_function_indices(const IntSet & surr_fn_indices)3501 void Model::surrogate_function_indices(const IntSet& surr_fn_indices)
3502 {
3503   if (modelRep)
3504     modelRep->surrogate_function_indices(surr_fn_indices); // fwd to letter
3505   // else: default implementation is no-op
3506 }
3507 
3508 
3509 /** This function assumes derivatives with respect to the active
3510     continuous variables.  Therefore, concurrency with respect to the
3511     inactive continuous variables is not captured. */
derivative_concurrency() const3512 int Model::derivative_concurrency() const
3513 {
3514   if (modelRep)
3515     return modelRep->derivative_concurrency(); // envelope fwd to letter
3516   else { // not a virtual function: base class definition for all letters
3517     int deriv_conc = 1;
3518     if ( (gradientType=="numerical" || gradientType=="mixed") &&
3519 	 methodSource == "dakota" )
3520       deriv_conc += (intervalType == "central") ? 2*numDerivVars : numDerivVars;
3521     if ( hessianType == "numerical" ||
3522 	 ( hessianType == "mixed" && !hessIdNumerical.empty())) {
3523       if (gradientType == "analytic")
3524 	deriv_conc += numDerivVars;
3525       else if (gradientType == "numerical")
3526 	deriv_conc += 2*numDerivVars*numDerivVars;
3527       else if (gradientType == "mixed") {
3528 	bool first_order = false, second_order = false;
3529 	if (hessianType == "mixed") { // mixed Hessians with mixed gradients
3530 	  for (ISCIter cit=hessIdNumerical.begin();
3531 	       cit!=hessIdNumerical.end(); ++cit) {
3532 	    if (contains(gradIdAnalytic, *cit))
3533 	      first_order  = true;
3534 	    else
3535 	      second_order = true;
3536 	  }
3537 	}
3538 	else // numerical Hessians with mixed gradients
3539 	  first_order = second_order = true;
3540 	// First and second order differences are not mutually exclusive
3541 	if (first_order)  // 1st-order forward gradient differences
3542 	  deriv_conc += numDerivVars;
3543 	if (second_order) // 2nd-order central function differences
3544 	  deriv_conc += 2*numDerivVars*numDerivVars;
3545       }
3546     }
3547     return deriv_conc;
3548   }
3549 }
3550 
3551 
probability_transformation()3552 Pecos::ProbabilityTransformation& Model::probability_transformation()
3553 {
3554   if (!modelRep) { // letter lacking redefinition of virtual fn.
3555     Cerr << "Error: Letter lacking redefinition of virtual probability_"
3556 	 << "transformation() function.\n       Probability transformations "
3557 	 << "are not supported by this Model class." << std::endl;
3558     abort_handler(MODEL_ERROR);
3559   }
3560 
3561   return modelRep->probability_transformation(); // envelope fwd to letter
3562 }
3563 
3564 
initialize_mapping(ParLevLIter pl_iter)3565 bool Model::initialize_mapping(ParLevLIter pl_iter)
3566 {
3567   if (modelRep)
3568     return modelRep->initialize_mapping(pl_iter);
3569   else {
3570     // restore initial states for re-entrancy
3571     currentResponse.reset(); // for completeness
3572     if (!warmStartFlag) {
3573       // Dakota::Variables does not support reset() since initial points are not
3574       // cached in Model/Variables -- they are generally (re)set from Iterator.
3575       //currentVariables.reset(); // not supported
3576 
3577       if (!quasiHessians.empty()) {
3578 	for (size_t i=0; i<numFns; ++i)
3579 	  quasiHessians[i] = 0.;
3580 	numQuasiUpdates.assign(numFns, 0);// {x,fnGrads}Prev will be overwritten
3581       }
3582     }
3583 
3584     mappingInitialized = true;
3585 
3586     return false; // size did not change
3587   }
3588 }
3589 
3590 
finalize_mapping()3591 bool Model::finalize_mapping()
3592 {
3593   if (modelRep)
3594     return modelRep->finalize_mapping();
3595   else { // base class behavior
3596     mappingInitialized = false;
3597     return false; // size did not change
3598   }
3599 }
3600 
3601 
resize_pending() const3602 bool Model::resize_pending() const
3603 {
3604   if (modelRep)
3605     return modelRep->resize_pending();
3606   else // Base class default is false
3607     return false;
3608 }
3609 
3610 
build_approximation()3611 void Model::build_approximation()
3612 {
3613   if (modelRep) // envelope fwd to letter
3614     modelRep->build_approximation();
3615   else { // letter lacking redefinition of virtual fn.
3616     Cerr << "Error: Letter lacking redefinition of virtual build_approximation"
3617          << "() function.\nThis model does not support approximation "
3618 	 << "construction." << std::endl;
3619     abort_handler(MODEL_ERROR);
3620   }
3621 }
3622 
3623 
3624 bool Model::
build_approximation(const Variables & vars,const IntResponsePair & response_pr)3625 build_approximation(const Variables& vars, const IntResponsePair& response_pr)
3626 {
3627   if (!modelRep) { // letter lacking redefinition of virtual fn.
3628     Cerr << "Error: Letter lacking redefinition of virtual build_approximation"
3629          << "(Variables, IntResponsePair) function.\nThis model does not "
3630 	 << "support constrained approximation construction." << std::endl;
3631     abort_handler(MODEL_ERROR);
3632   }
3633 
3634   // envelope fwd to letter
3635   return modelRep->build_approximation(vars, response_pr);
3636 }
3637 
3638 
rebuild_approximation()3639 void Model::rebuild_approximation()
3640 {
3641   if (modelRep) // envelope fwd to letter
3642     modelRep->rebuild_approximation();
3643   else
3644     build_approximation(); // default: build from scratch
3645 }
3646 
3647 
update_approximation(bool rebuild_flag)3648 void Model::update_approximation(bool rebuild_flag)
3649 {
3650   if (modelRep) // envelope fwd to letter
3651     modelRep->update_approximation(rebuild_flag);
3652   else { // letter lacking redefinition of virtual fn.
3653     Cerr << "Error: Letter lacking redefinition of virtual update_"
3654 	 << "approximation(bool) function.\nThis model does not support "
3655 	 << "approximation updating." << std::endl;
3656     abort_handler(MODEL_ERROR);
3657   }
3658 }
3659 
3660 
3661 void Model::
update_approximation(const Variables & vars,const IntResponsePair & response_pr,bool rebuild_flag)3662 update_approximation(const Variables& vars, const IntResponsePair& response_pr,
3663 		     bool rebuild_flag)
3664 {
3665   if (modelRep) // envelope fwd to letter
3666     modelRep->update_approximation(vars, response_pr, rebuild_flag);
3667   else { // letter lacking redefinition of virtual fn.
3668     Cerr << "Error: Letter lacking redefinition of virtual update_approximation"
3669          << "(Variables, IntResponsePair) function.\nThis model does not "
3670 	 << "support approximation updating." << std::endl;
3671     abort_handler(MODEL_ERROR);
3672   }
3673 }
3674 
3675 
3676 void Model::
update_approximation(const VariablesArray & vars_array,const IntResponseMap & resp_map,bool rebuild_flag)3677 update_approximation(const VariablesArray& vars_array,
3678 		     const IntResponseMap& resp_map, bool rebuild_flag)
3679 {
3680   if (modelRep) // envelope fwd to letter
3681     modelRep->update_approximation(vars_array, resp_map, rebuild_flag);
3682   else { // letter lacking redefinition of virtual fn.
3683     Cerr << "Error: Letter lacking redefinition of virtual update_approximation"
3684          << "(VariablesArray, IntResponseMap) function.\nThis model does not "
3685          << "support approximation updating." << std::endl;
3686     abort_handler(MODEL_ERROR);
3687   }
3688 }
3689 
3690 
3691 void Model::
update_approximation(const RealMatrix & samples,const IntResponseMap & resp_map,bool rebuild_flag)3692 update_approximation(const RealMatrix& samples, const IntResponseMap& resp_map,
3693 		     bool rebuild_flag)
3694 {
3695   if (modelRep) // envelope fwd to letter
3696     modelRep->update_approximation(samples, resp_map, rebuild_flag);
3697   else { // letter lacking redefinition of virtual fn.
3698     Cerr << "Error: Letter lacking redefinition of virtual update_approximation"
3699          << "(RealMatrix, IntResponseMap) function.\nThis model does not "
3700          << "support approximation updating." << std::endl;
3701     abort_handler(MODEL_ERROR);
3702   }
3703 }
3704 
3705 
append_approximation(bool rebuild_flag)3706 void Model::append_approximation(bool rebuild_flag)
3707 {
3708   if (modelRep) // envelope fwd to letter
3709     modelRep->append_approximation(rebuild_flag);
3710   else { // letter lacking redefinition of virtual fn.
3711     Cerr << "Error: Letter lacking redefinition of virtual append_"
3712 	 << "approximation(bool) function.\nThis model does not support "
3713 	 << "approximation appending." << std::endl;
3714     abort_handler(MODEL_ERROR);
3715   }
3716 }
3717 
3718 
3719 void Model::
append_approximation(const Variables & vars,const IntResponsePair & response_pr,bool rebuild_flag)3720 append_approximation(const Variables& vars, const IntResponsePair& response_pr,
3721 		     bool rebuild_flag)
3722 {
3723   if (modelRep) // envelope fwd to letter
3724     modelRep->append_approximation(vars, response_pr, rebuild_flag);
3725   else { // letter lacking redefinition of virtual fn.
3726     Cerr << "Error: Letter lacking redefinition of virtual append_approximation"
3727          << "(Variables, IntResponsePair) function.\nThis model does not "
3728 	 << "support approximation appending." << std::endl;
3729     abort_handler(MODEL_ERROR);
3730   }
3731 }
3732 
3733 
3734 void Model::
append_approximation(const VariablesArray & vars_array,const IntResponseMap & resp_map,bool rebuild_flag)3735 append_approximation(const VariablesArray& vars_array,
3736 		     const IntResponseMap& resp_map, bool rebuild_flag)
3737 {
3738   if (modelRep) // envelope fwd to letter
3739     modelRep->append_approximation(vars_array, resp_map, rebuild_flag);
3740   else { // letter lacking redefinition of virtual fn.
3741     Cerr << "Error: Letter lacking redefinition of virtual append_approximation"
3742          << "(VariablesArray, IntResponseMap) function.\nThis model does not "
3743          << "support approximation appending." << std::endl;
3744     abort_handler(MODEL_ERROR);
3745   }
3746 }
3747 
3748 
3749 void Model::
append_approximation(const RealMatrix & samples,const IntResponseMap & resp_map,bool rebuild_flag)3750 append_approximation(const RealMatrix& samples, const IntResponseMap& resp_map,
3751 		     bool rebuild_flag)
3752 {
3753   if (modelRep) // envelope fwd to letter
3754     modelRep->append_approximation(samples, resp_map, rebuild_flag);
3755   else { // letter lacking redefinition of virtual fn.
3756     Cerr << "Error: Letter lacking redefinition of virtual append_approximation"
3757          << "(RealMatrix, IntResponseMap) function.\nThis model does not "
3758          << "support approximation appending." << std::endl;
3759     abort_handler(MODEL_ERROR);
3760   }
3761 }
3762 
3763 
pop_approximation(bool save_surr_data,bool rebuild_flag)3764 void Model::pop_approximation(bool save_surr_data, bool rebuild_flag)
3765 {
3766   if (modelRep) // envelope fwd to letter
3767     modelRep->pop_approximation(save_surr_data, rebuild_flag);
3768   else { // letter lacking redefinition of virtual fn.
3769     Cerr << "Error: Letter lacking redefinition of virtual\n       "
3770 	 << "pop_approximation(bool, bool) function.  This model does not\n"
3771 	 << "       support approximation data removal." << std::endl;
3772     abort_handler(MODEL_ERROR);
3773   }
3774 }
3775 
3776 
push_approximation()3777 void Model::push_approximation()
3778 {
3779   if (modelRep) // envelope fwd to letter
3780     modelRep->push_approximation();
3781   else { // letter lacking redefinition of virtual fn.
3782     Cerr << "Error: Letter lacking redefinition of virtual push_approximation()"
3783 	 << " function.\n       This model does not support approximation"
3784 	 << " augmentation." << std::endl;
3785     abort_handler(MODEL_ERROR);
3786   }
3787 }
3788 
3789 
push_available()3790 bool Model::push_available()
3791 { return (modelRep) ? modelRep->push_available() : false; }
3792 
3793 
finalize_approximation()3794 void Model::finalize_approximation()
3795 {
3796   if (modelRep) // envelope fwd to letter
3797     modelRep->finalize_approximation();
3798   else { // letter lacking redefinition of virtual fn.
3799     Cerr << "Error: Letter lacking redefinition of virtual finalize_"
3800 	 << "approximation() function.\n       This model does not support "
3801 	 << "approximation finalization." << std::endl;
3802     abort_handler(MODEL_ERROR);
3803   }
3804 }
3805 
3806 
combine_approximation()3807 void Model::combine_approximation()
3808 {
3809   if (modelRep) // envelope fwd to letter
3810     modelRep->combine_approximation();
3811   else { // letter lacking redefinition of virtual fn.
3812     Cerr << "Error: Letter lacking redefinition of virtual combine_"
3813 	 << "approximation() function.\n       This model does not support "
3814 	 << "approximation combination." << std::endl;
3815     abort_handler(MODEL_ERROR);
3816   }
3817 }
3818 
3819 
combined_to_active(bool clear_combined)3820 void Model::combined_to_active(bool clear_combined)
3821 {
3822   if (modelRep) // envelope fwd to letter
3823     modelRep->combined_to_active(clear_combined);
3824   else { // letter lacking redefinition of virtual fn.
3825     Cerr << "Error: Letter lacking redefinition of virtual combined_to_active()"
3826 	 << " function.\n       This model does not support approximation"
3827 	 << " combination." << std::endl;
3828     abort_handler(MODEL_ERROR);
3829   }
3830 }
3831 
3832 
clear_inactive()3833 void Model::clear_inactive()
3834 {
3835   if (modelRep) // envelope fwd to letter
3836     modelRep->clear_inactive();
3837   //else no op: no inactive data to clear
3838 }
3839 
3840 
advancement_available()3841 bool Model::advancement_available()
3842 {
3843   if (modelRep) return modelRep->advancement_available();
3844   else          return true; // only a few cases throttle advancements
3845 }
3846 
3847 
formulation_updated() const3848 bool Model::formulation_updated() const
3849 {
3850   if (modelRep) return modelRep->formulation_updated();
3851   else          return false;
3852 }
3853 
3854 
formulation_updated(bool update)3855 void Model::formulation_updated(bool update)
3856 {
3857   if (modelRep)
3858     modelRep->formulation_updated(update);
3859   //else no op
3860 }
3861 
3862 
run_dace()3863 void Model::run_dace()
3864 {
3865   if (modelRep) // envelope fwd to letter
3866     modelRep->run_dace();
3867   else { // letter lacking redefinition of virtual fn.
3868     Cerr << "Error: Letter lacking redefinition of virtual run_dace() function."
3869 	 << "\n       This model does not support DACE executions."<< std::endl;
3870     abort_handler(MODEL_ERROR);
3871   }
3872 }
3873 
3874 
3875 /*
3876 const VariablesArray Model::build_variables() const
3877 {
3878   if (modelRep) // envelope fwd to letter
3879     return modelRep->build_variables();
3880   else { // letter lacking redefinition of virtual fn.
3881     Cerr << "Error: Letter lacking redefinition of virtual build_variables()"
3882          << "\n       This model does not support build variables retrieval."
3883 	 << std::endl;
3884     abort_handler(MODEL_ERROR);
3885   }
3886 }
3887 
3888 
3889 const ResponseArray Model::build_responses() const
3890 {
3891   if (modelRep) // envelope fwd to letter
3892     return modelRep->build_responses();
3893   else { // letter lacking redefinition of virtual fn.
3894     Cerr << "Error: Letter lacking redefinition of virtual build_responses()"
3895          << "\n       This model does not support build responses retrieval."
3896 	 << std::endl;
3897     abort_handler(MODEL_ERROR);
3898   }
3899 }
3900 */
3901 
3902 
force_rebuild()3903 bool Model::force_rebuild()
3904 {
3905   if (modelRep) // envelope fwd to letter
3906     return modelRep->force_rebuild();
3907   else // default if no letter redefinition of virtual fn.
3908     return false;
3909 }
3910 
3911 
shared_approximation()3912 SharedApproxData& Model::shared_approximation()
3913 {
3914   if (!modelRep) { // letter lacking redefinition of virtual fn.
3915     Cerr << "Error: Letter lacking redefinition of virtual shared_approximation"
3916          << "() function.\nThis model does not support approximations."
3917 	 << std::endl;
3918     abort_handler(MODEL_ERROR);
3919   }
3920 
3921   // envelope fwd to letter
3922   return modelRep->shared_approximation();
3923 }
3924 
3925 
approximations()3926 std::vector<Approximation>& Model::approximations()
3927 {
3928   if (!modelRep) { // letter lacking redefinition of virtual fn.
3929     Cerr << "Error: Letter lacking redefinition of virtual approximations() "
3930          << "function.\nThis model does not support approximations."
3931 	 << std::endl;
3932     abort_handler(MODEL_ERROR);
3933   }
3934 
3935   // envelope fwd to letter
3936   return modelRep->approximations();
3937 }
3938 
3939 
approximation_coefficients(bool normalized)3940 const RealVectorArray& Model::approximation_coefficients(bool normalized)
3941 {
3942   if (!modelRep) { // letter lacking redefinition of virtual fn.
3943     Cerr << "Error: Letter lacking redefinition of virtual approximation_"
3944          << "coefficients() function.\nThis model does not support "
3945          << "approximations." << std::endl;
3946     abort_handler(MODEL_ERROR);
3947   }
3948 
3949   // envelope fwd to letter
3950   return modelRep->approximation_coefficients(normalized);
3951 }
3952 
3953 
3954 void Model::
approximation_coefficients(const RealVectorArray & approx_coeffs,bool normalized)3955 approximation_coefficients(const RealVectorArray& approx_coeffs,
3956 			   bool normalized)
3957 {
3958   if (modelRep) // envelope fwd to letter
3959     modelRep->approximation_coefficients(approx_coeffs, normalized);
3960   else { // letter lacking redefinition of virtual fn.
3961     Cerr << "Error: Letter lacking redefinition of virtual approximation_"
3962          << "coefficients() function.\n       This model does not support "
3963          << "approximations." << std::endl;
3964     abort_handler(MODEL_ERROR);
3965   }
3966 }
3967 
3968 
approximation_variances(const Variables & vars)3969 const RealVector& Model::approximation_variances(const Variables& vars)
3970 {
3971   if (!modelRep) { // letter lacking redefinition of virtual fn.
3972     Cerr << "Error: Letter lacking redefinition of virtual approximation_"
3973          << "variances() function.\nThis model does not support "
3974          << "approximations." << std::endl;
3975     abort_handler(MODEL_ERROR);
3976   }
3977 
3978   // envelope fwd to letter
3979   return modelRep->approximation_variances(vars);
3980 }
3981 
3982 
error_estimates()3983 const RealVector& Model::error_estimates()
3984 {
3985   if (!modelRep) { // letter lacking redefinition of virtual fn.
3986     Cerr << "Error: Letter lacking redefinition of virtual error_estimates() "
3987 	 << "function.\n       This model does not support error estimation."
3988 	 << std::endl;
3989     abort_handler(MODEL_ERROR);
3990   }
3991 
3992   // envelope fwd to letter
3993   return modelRep->error_estimates();
3994 }
3995 
3996 
approximation_data(size_t fn_index)3997 const Pecos::SurrogateData& Model::approximation_data(size_t fn_index)
3998 {
3999   if (!modelRep) { // letter lacking redefinition of virtual fn.
4000     Cerr << "Error: Letter lacking redefinition of virtual approximation_data()"
4001          << " function.\nThis model does not support approximations."
4002 	 << std::endl;
4003     abort_handler(MODEL_ERROR);
4004   }
4005 
4006   // envelope fwd to letter
4007   return modelRep->approximation_data(fn_index);
4008 }
4009 
4010 
surrogate_response_mode(short mode)4011 void Model::surrogate_response_mode(short mode)
4012 {
4013   if (modelRep) // envelope fwd to letter
4014     modelRep->surrogate_response_mode(mode);
4015   // else: default implementation is no-op
4016 }
4017 
4018 
surrogate_response_mode() const4019 short Model::surrogate_response_mode() const
4020 {
4021   if (modelRep) // envelope fwd to letter
4022     return modelRep->surrogate_response_mode();
4023   else // letter lacking redefinition of virtual fn.
4024     return 0; // default for non-surrogate models
4025 }
4026 
4027 
4028 /*
4029 void Model::link_multilevel_approximation_data()
4030 {
4031   if (modelRep) // envelope fwd to letter
4032     modelRep->link_multilevel_approximation_data();
4033   else { // letter lacking redefinition of virtual fn.
4034     Cerr << "Error: Letter lacking redefinition of virtual link_multilevel_"
4035 	 << "approximation_data() function.\nThis model does not support "
4036 	 << "multilevel data." << std::endl;
4037     abort_handler(MODEL_ERROR);
4038   }
4039 }
4040 */
4041 
4042 
discrepancy_correction()4043 DiscrepancyCorrection& Model::discrepancy_correction()
4044 {
4045   if (!modelRep) { // letter lacking redefinition of virtual fn.
4046     Cerr << "Error: Letter lacking redefinition of virtual discrepancy_"
4047 	 << "correction() function.\nThis model does not support corrections."
4048 	 << std::endl;
4049     abort_handler(MODEL_ERROR);
4050   }
4051 
4052   // envelope fwd to letter
4053   return modelRep->discrepancy_correction();
4054 }
4055 
4056 
correction_type()4057 short Model::correction_type()
4058 {
4059   if (modelRep) // envelope fwd to letter
4060     return modelRep->correction_type();
4061   else
4062     return NO_CORRECTION; // default for non-surrogate models
4063 }
4064 
4065 
correction_type(short corr_type)4066 void Model::correction_type(short corr_type)
4067 {
4068   if (modelRep) // envelope fwd to letter
4069     modelRep->correction_type(corr_type);
4070   //else no-op
4071 }
4072 
4073 
single_apply(const Variables & vars,Response & resp,const UShortArray & paired_key)4074 void Model::single_apply(const Variables& vars, Response& resp,
4075 			 const UShortArray& paired_key)
4076 {
4077   if (modelRep) // envelope fwd to letter
4078     modelRep->single_apply(vars, resp, paired_key);
4079   else { // letter lacking redefinition of virtual fn.
4080     Cerr << "Error: Letter lacking redefinition of virtual single_apply() "
4081 	 << "function.\n." << std::endl;
4082     abort_handler(MODEL_ERROR);
4083   }
4084 }
4085 
4086 
recursive_apply(const Variables & vars,Response & resp)4087 void Model::recursive_apply(const Variables& vars, Response& resp)
4088 {
4089   if (modelRep) // envelope fwd to letter
4090     modelRep->recursive_apply(vars, resp);
4091   else { // letter lacking redefinition of virtual fn.
4092     Cerr << "Error: Letter lacking redefinition of virtual recursive_apply() "
4093 	 << "function.\n." << std::endl;
4094     abort_handler(MODEL_ERROR);
4095   }
4096 }
4097 
4098 
component_parallel_mode(short mode)4099 void Model::component_parallel_mode(short mode)
4100 {
4101   if (modelRep) // envelope fwd to letter
4102     modelRep->component_parallel_mode(mode);
4103   // else: default implementation is no-op
4104 }
4105 
4106 
estimate_partition_bounds(int max_eval_concurrency)4107 IntIntPair Model::estimate_partition_bounds(int max_eval_concurrency)
4108 {
4109   if (!modelRep) { // letter lacking redefinition of virtual fn.
4110     Cerr << "Error: Letter lacking redefinition of virtual "
4111 	 << "estimate_partition_bounds() function.\n." << std::endl;
4112     abort_handler(MODEL_ERROR);
4113   }
4114 
4115   return modelRep->estimate_partition_bounds(max_eval_concurrency);
4116 }
4117 
4118 
mi_parallel_level_index() const4119 size_t Model::mi_parallel_level_index() const
4120 {
4121   return (modelRep) ?
4122     modelRep->mi_parallel_level_index() : // envelope fwd to letter
4123     modelPCIter->mi_parallel_level_last_index(); // default definition
4124                    // (for Models without additional mi_pl recursions)
4125 }
4126 
4127 
cache_unmatched_response(int raw_id)4128 void Model::cache_unmatched_response(int raw_id)
4129 {
4130   if (modelRep)
4131     modelRep->cache_unmatched_response(raw_id);
4132   else {
4133     // due to deriv estimation rekeying and removal of intermediate bookkeeping
4134     // data in Model::synchronize{,_nowait}(), caching needs to occur at the
4135     // base class level using Model::responseMap, rather than derived maps.
4136     IntRespMIter rr_it = responseMap.find(raw_id);
4137     if (rr_it != responseMap.end()) {
4138       // insert unmatched record into cache:
4139       cachedResponseMap.insert(*rr_it);
4140       // not essential due to subsequent clear(), but avoid any redundancy:
4141       responseMap.erase(rr_it);
4142     }
4143   }
4144 }
4145 
4146 
4147 /** SimulationModels and HierarchSurrModels redefine this virtual function.
4148     A default value of "synchronous" prevents asynch local operations for:
4149 \li NestedModels: a subIterator can support message passing parallelism,
4150     but not asynch local.
4151 \li DataFitSurrModels: while asynch evals on approximations will work due
4152     to some added bookkeeping, avoiding them is preferable. */
local_eval_synchronization()4153 short Model::local_eval_synchronization()
4154 {
4155   if (modelRep) // should not occur: protected fn only used by the letter
4156     return modelRep->local_eval_synchronization(); // envelope fwd to letter
4157   else // letter lacking redefinition of virtual fn.
4158     return SYNCHRONOUS_INTERFACE; // default value
4159 }
4160 
4161 
4162 /** SimulationModels and HierarchSurrModels redefine this virtual function. */
local_eval_concurrency()4163 int Model::local_eval_concurrency()
4164 {
4165   if (modelRep) // should not occur: protected fn only used by the letter
4166     return modelRep->local_eval_concurrency(); // envelope fwd to letter
4167   else // letter lacking redefinition of virtual fn.
4168     return 0; // default value
4169 }
4170 
4171 
serve_run(ParLevLIter pl_iter,int max_eval_concurrency)4172 void Model::serve_run(ParLevLIter pl_iter, int max_eval_concurrency)
4173 {
4174   if (modelRep) // envelope fwd to letter
4175     modelRep->serve_run(pl_iter, max_eval_concurrency);
4176   else { // letter lacking redefinition of virtual fn.
4177     Cerr << "Error: Letter lacking redefinition of virtual serve_run() function"
4178 	 << ".\nThis model does not support server operations." << std::endl;
4179     abort_handler(MODEL_ERROR);
4180   }
4181 }
4182 
4183 
stop_servers()4184 void Model::stop_servers()
4185 {
4186   if (modelRep) // envelope fwd to letter
4187     modelRep->stop_servers();
4188   else { // letter lacking redefinition of virtual fn.
4189     Cerr << "Error: Letter lacking redefinition of virtual stop_servers() "
4190          << "function.\nThis model does not support server operations."
4191 	 << std::endl;
4192     abort_handler(MODEL_ERROR);
4193   }
4194 }
4195 
4196 
4197 /** The init_communicators() and derived_init_communicators() functions are
4198     stuctured to avoid performing the messageLengths estimation more than once.
4199     init_communicators() (not virtual) performs the estimation and then
4200     forwards the results to derived_init_communicators (virtual) which uses
4201     the data in different contexts. */
4202 void Model::
init_communicators(ParLevLIter pl_iter,int max_eval_concurrency,bool recurse_flag)4203 init_communicators(ParLevLIter pl_iter, int max_eval_concurrency,
4204 		   bool recurse_flag)
4205 {
4206   if (modelRep) // envelope fwd to letter
4207     modelRep->init_communicators(pl_iter, max_eval_concurrency, recurse_flag);
4208   else { // not a virtual function: base class definition for all letters
4209 
4210     // Undefined mi_pl can happen for IteratorScheduler::configure(), as
4211     // estimation of concurrency involves instantiation of Iterators
4212     // prior to IteratorScheduler::partition(), and some Iterators invoke
4213     // init_communicators() for contained helper iterators.  Abandoning a
4214     // parallel configuration means that these iterator instances should
4215     // be discarded and replaced once the mi_pl context is available.
4216     //if (!parallelLib.mi_parallel_level_defined())
4217     //  return;
4218     // Note for updated design: could replace with check miPLIters.size() <= 1,
4219     // but w_pl has now been expanded to be a sufficient starting pt for ie/ea
4220     // (meta-iterator partitioning is no longer required) --> leave commented
4221     // out for now.
4222 
4223     // matches bcast in Model::serve_init() called from
4224     // IteratorScheduler::init_iterator().  bcastFlag assures that, when Model
4225     // recursions are present in Iterator instantiations, only the matching
4226     // Model instance participates in this collective communication.
4227     if (initCommsBcastFlag && pl_iter->server_communicator_rank() == 0)
4228       parallelLib.bcast(max_eval_concurrency, *pl_iter);
4229 
4230     // estimate messageLengths
4231     if (messageLengths.empty())
4232       estimate_message_lengths();
4233 
4234     // since the same Model instance may be reused in multiple contexts
4235     // (involving multiple calls to init_communicators()), multiple parallel
4236     // configurations must be supported.  This is managed using a map<> with
4237     // concurrency level as the lookup key.  Creation of a new parallel
4238     // configuration is avoided if an equivalent one already exists.
4239     SizetIntPair key(parallelLib.parallel_level_index(pl_iter),
4240 		     max_eval_concurrency);
4241     std::map<SizetIntPair, ParConfigLIter>::iterator map_iter
4242       = modelPCIterMap.find(key);
4243 
4244     // NOTE: modelPCIter update belongs in set_communicators().  However, also
4245     // updating it here allows passing of analysisComm into a parallel plugin
4246     // interface constructor (see main.cpp).
4247     if (map_iter == modelPCIterMap.end()) { // this config does not yet exist
4248 
4249       // increment the PC every time (the first PC instance is wasted), such
4250       // that each Model points to its corresponding configuration, even if
4251       // incomplete (for a Model only contained subModels, no increment for
4252       // incomplete results in a shared complete configuration between top
4253       // level Model and subModel, and erroneous settings in top level
4254       // set_communicators()).
4255       //if ( parallelLib.num_parallel_configurations() > 1 ||
4256       //     parallelLib.parallel_configuration_is_complete() )
4257       parallelLib.increment_parallel_configuration(pl_iter);
4258 
4259       // Setting modelPCIter here is insufficient; it must be set at run time
4260       // (within set_communicators()) according to the iterator context.
4261       modelPCIterMap[key] = modelPCIter
4262 	= parallelLib.parallel_configuration_iterator();
4263       derived_init_communicators(pl_iter, max_eval_concurrency, recurse_flag);
4264     }
4265     else
4266       modelPCIter = map_iter->second;
4267       // Parallel configuration already exists within the Model for this
4268       // concurrency level.  Configurations must also exist for any sub-models
4269       // -> no call to derived_init_communicators() needed.
4270   }
4271 }
4272 
4273 
stop_init_communicators(ParLevLIter pl_iter)4274 void Model::stop_init_communicators(ParLevLIter pl_iter)
4275 {
4276   if (modelRep) // envelope fwd to letter
4277     modelRep->stop_init_communicators(pl_iter);
4278   else { // not a virtual function: base class definition for all letters
4279     int term_code = 0;
4280     parallelLib.bcast(term_code, *pl_iter);
4281   }
4282 }
4283 
4284 
serve_init_communicators(ParLevLIter pl_iter)4285 int Model::serve_init_communicators(ParLevLIter pl_iter)
4286 {
4287   if (modelRep) // envelope fwd to letter
4288     return modelRep->serve_init_communicators(pl_iter);
4289   else { // not a virtual function: base class definition for all letters
4290     int max_eval_concurrency = 1, last_eval_concurrency = 1;
4291     while (max_eval_concurrency) {
4292       parallelLib.bcast(max_eval_concurrency, *pl_iter);
4293       if (max_eval_concurrency) {
4294 	init_communicators(pl_iter, max_eval_concurrency);
4295 	last_eval_concurrency = max_eval_concurrency;
4296       }
4297     }
4298     return last_eval_concurrency;
4299   }
4300 }
4301 
4302 
stop_init_mapping(ParLevLIter pl_iter)4303 void Model::stop_init_mapping(ParLevLIter pl_iter)
4304 {
4305   if (modelRep) // envelope fwd to letter
4306     modelRep->stop_init_mapping(pl_iter);
4307   else {
4308     // Base class is a no-op
4309   }
4310 }
4311 
4312 
serve_init_mapping(ParLevLIter pl_iter)4313 int Model::serve_init_mapping(ParLevLIter pl_iter)
4314 {
4315   if (modelRep) // envelope fwd to letter
4316     return modelRep->serve_init_mapping(pl_iter);
4317   else {
4318     // Base class is a no-op, return 0 since init_communicators() was not called
4319     return 0;
4320   }
4321 }
4322 
4323 
stop_finalize_mapping(ParLevLIter pl_iter)4324 void Model::stop_finalize_mapping(ParLevLIter pl_iter)
4325 {
4326   if (modelRep) // envelope fwd to letter
4327     modelRep->stop_finalize_mapping(pl_iter);
4328   else {
4329     // Base class is a no-op
4330   }
4331 }
4332 
4333 
serve_finalize_mapping(ParLevLIter pl_iter)4334 int Model::serve_finalize_mapping(ParLevLIter pl_iter)
4335 {
4336   if (modelRep) // envelope fwd to letter
4337     return modelRep->serve_finalize_mapping(pl_iter);
4338   else {
4339     // Base class is a no-op, return 0 since init_communicators() was not called
4340     return 0;
4341   }
4342 }
4343 
4344 
warm_start_flag(const bool flag)4345 void Model::warm_start_flag(const bool flag)
4346 {
4347   if (modelRep) modelRep->warm_start_flag(flag);
4348   else          warmStartFlag = flag;
4349 }
4350 
4351 
declare_sources()4352 void Model::declare_sources() {
4353   if(modelRep) modelRep->declare_sources();
4354   else return;
4355 }
4356 
4357 
4358 void Model::
set_communicators(ParLevLIter pl_iter,int max_eval_concurrency,bool recurse_flag)4359 set_communicators(ParLevLIter pl_iter, int max_eval_concurrency,
4360 		  bool recurse_flag)
4361 {
4362   if (modelRep) // envelope fwd to letter
4363     modelRep->set_communicators(pl_iter, max_eval_concurrency, recurse_flag);
4364   else { // not a virtual function: base class definition for all letters
4365 
4366     SizetIntPair key(parallelLib.parallel_level_index(pl_iter),
4367 		     max_eval_concurrency);
4368     std::map<SizetIntPair, ParConfigLIter>::iterator map_iter
4369       = modelPCIterMap.find(key);
4370     if (map_iter == modelPCIterMap.end()) { // this config does not exist
4371       Cerr << "Error: failure in parallel configuration lookup in "
4372            << "Model::set_communicators() for key(" << key.first << ", "
4373            << key.second << ")." << std::endl;
4374       abort_handler(MODEL_ERROR);
4375     }
4376     else
4377       modelPCIter = map_iter->second;
4378 
4379     // Unlike init_comms, set_comms DOES need to be recursed each time
4380     // to activate the correct comms at each level of the recursion.
4381     derived_set_communicators(pl_iter, max_eval_concurrency, recurse_flag);
4382   }
4383 }
4384 
4385 
set_ie_asynchronous_mode(int max_eval_concurrency)4386 void Model::set_ie_asynchronous_mode(int max_eval_concurrency)
4387 {
4388   // no rep forward required: called from derived classes
4389 
4390   // Set asynchEvalFlag for either evaluation message passing or asynch local
4391   // evaluations (or both).  Note that asynch local analysis concurrency by
4392   // itself does not trigger an asynchronous model, since this concurrency
4393   // can be handled within a synchronous model evaluation.
4394   // In the case of Surrogate or Nested models, this sets the asynch flag for
4395   // the top level iterator & model; the asynch flag for the sub-iterator &
4396   // sub-model must be set by calling init_communicators on the sub-model
4397   // within derived_init_communicators.
4398   if (modelPCIter->ie_parallel_level_defined()) {
4399     const ParallelLevel& ie_pl = modelPCIter->ie_parallel_level();
4400 
4401     // Note: local_eval_synchronization() handles case of eval concurrency==1
4402     bool message_passing = ie_pl.message_pass(), asynch_local_eval
4403       = (local_eval_synchronization() == ASYNCHRONOUS_INTERFACE);
4404     if ( message_passing || asynch_local_eval )
4405       asynchEvalFlag = true;
4406 
4407     // Set evaluationCapacity for use by iterators (e.g., COLINY).
4408     int local_eval_conc = local_eval_concurrency();
4409     if (message_passing) { // message passing mode
4410       evaluationCapacity = ie_pl.num_servers();
4411       if (local_eval_conc) // hybrid mode: capacity augmented
4412 	evaluationCapacity *= local_eval_conc;
4413     }
4414     else if (asynch_local_eval) // asynch local mode: capacity limited
4415       evaluationCapacity = (local_eval_conc)
4416 	?  local_eval_conc : max_eval_concurrency;
4417   }
4418 }
4419 
4420 
4421 void Model::
free_communicators(ParLevLIter pl_iter,int max_eval_concurrency,bool recurse_flag)4422 free_communicators(ParLevLIter pl_iter, int max_eval_concurrency,
4423 		   bool recurse_flag)
4424 {
4425   if (modelRep) // envelope fwd to letter
4426     modelRep->free_communicators(pl_iter, max_eval_concurrency, recurse_flag);
4427   else { // not a virtual function: base class definition for all letters
4428 
4429     // Note: deallocations do not utilize reference counting -> the _first_
4430     // call to free a particular configuration deallocates it and all
4431     // subsequent calls are ignored (to prevent multiple deallocations).
4432     SizetIntPair key(parallelLib.parallel_level_index(pl_iter),
4433 		     max_eval_concurrency);
4434     std::map<SizetIntPair, ParConfigLIter>::iterator map_iter
4435       = modelPCIterMap.find(key);
4436     if (map_iter != modelPCIterMap.end()) { // this config still exists
4437       modelPCIter = map_iter->second;
4438       derived_free_communicators(pl_iter, max_eval_concurrency, recurse_flag);
4439       modelPCIterMap.erase(key);
4440     }
4441   }
4442 }
4443 
4444 
analysis_comm() const4445 MPI_Comm Model::analysis_comm() const
4446 {
4447   if (modelRep) // envelope fwd to letter
4448     return modelRep->analysis_comm();
4449   else
4450     return modelPCIter->ea_parallel_level().server_intra_communicator();
4451 }
4452 
4453 
4454 /** This functionality has been pulled out of init_communicators() and
4455     defined separately so that it may be used in those cases when
4456     messageLengths is needed but model.init_communicators() is not
4457     called, e.g., for the master processor in the self-scheduling of a
4458     concurrent iterator strategy. */
estimate_message_lengths()4459 void Model::estimate_message_lengths()
4460 {
4461   if (modelRep) // envelope fwd to letter
4462     modelRep->estimate_message_lengths();
4463   else { // not a virtual function: base class definition for all letters
4464     // currently, every processor does this estimation (no Bcast needed)
4465     messageLengths.assign(4, 0);
4466 
4467     if (parallelLib.mpirun_flag()) {
4468       MPIPackBuffer buff;
4469 
4470       // A Variables object could later be larger if it has string set
4471       // elements that are longer than the current value.  Create a
4472       // new Variables object and set the string variables to the
4473       // worst case before packing. Variables aren't aware of the set
4474       // elements, so set them here with helper functions.
4475       Variables new_vars(currentVariables.copy());
4476       assign_max_strings(mvDist, new_vars);
4477 
4478       buff << new_vars;
4479       messageLengths[0] = buff.size(); // length of message containing vars
4480 
4481       // The grad/Hessian arrays in currentResponse get dynamically resized as
4482       // needed.  Thesefore, the buffer estimation must assume the worst case.
4483       size_t num_deriv_vars = std::max(currentVariables.cv(),
4484                                        currentVariables.icv());
4485       Response new_response;
4486       if (num_deriv_vars >
4487 	  currentResponse.active_set_derivative_vector().size()) {
4488 	new_response = currentResponse.copy(); // deep copy
4489 	ActiveSet new_set(numFns, num_deriv_vars);
4490 	new_response.active_set(new_set); // resizes grad/Hessian arrays
4491       }
4492       else
4493 	new_response = currentResponse; // shallow copy (shared representation)
4494 
4495       buff << new_response.active_set();
4496       messageLengths[1] = buff.size(); // length of message containing vars/set
4497       buff.reset();
4498       buff << new_response;
4499       messageLengths[2] = buff.size(); // length of message containing response
4500       buff.reset();
4501       ParamResponsePair current_pair(new_vars, interface_id(), new_response);
4502       buff << current_pair;
4503       messageLengths[3] = buff.size(); // length of message containing a PRPair
4504 #ifdef MPI_DEBUG
4505       Cout << "Message Lengths:\n" << messageLengths << std::endl;
4506 #endif // MPI_DEBUG
4507     }
4508   }
4509 }
4510 
4511 
4512 void Model::
assign_max_strings(const Pecos::MultivariateDistribution & mv_dist,Variables & vars)4513 assign_max_strings(const Pecos::MultivariateDistribution& mv_dist,
4514 		   Variables& vars)
4515 {
4516   std::shared_ptr<Pecos::MarginalsCorrDistribution> mvd_rep =
4517     std::static_pointer_cast<Pecos::MarginalsCorrDistribution>
4518     (mv_dist.multivar_dist_rep());
4519   const SharedVariablesData& svd = vars.shared_data();
4520   StringSet ss; StringRealMap srm;
4521   size_t rv, start_rv, end_rv, adsv_index = 0,
4522     num_cv, num_div, num_dsv, num_drv;
4523 
4524   // discrete design set string
4525   svd.design_counts(num_cv, num_div, num_dsv, num_drv);
4526   start_rv = num_cv + num_div;  end_rv = start_rv + num_dsv;
4527   for (rv=start_rv; rv<end_rv; ++rv, ++adsv_index) {
4528     mvd_rep->pull_parameter<StringSet>(rv, Pecos::DSS_VALUES, ss);
4529     SSCIter max_it = max_string(ss);
4530     vars.all_discrete_string_variable(*max_it, adsv_index);
4531   }
4532   start_rv = end_rv + num_drv;
4533 
4534   // histogram pt string
4535   svd.aleatory_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
4536   start_rv += num_cv + num_div;  end_rv = start_rv + num_dsv;
4537   for (rv=start_rv; rv<end_rv; ++rv, ++adsv_index) {
4538     mvd_rep->pull_parameter<StringRealMap>(rv, Pecos::H_BIN_PAIRS, srm);
4539     SRMCIter max_it = max_string(srm);
4540     vars.all_discrete_string_variable(max_it->first, adsv_index);
4541   }
4542   start_rv = end_rv + num_drv;
4543 
4544   // discrete epistemic set string
4545   svd.epistemic_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
4546   start_rv += num_cv + num_div;  end_rv = start_rv + num_dsv;
4547   for (rv=start_rv; rv<end_rv; ++rv, ++adsv_index) {
4548     mvd_rep->pull_parameter<StringRealMap>(rv, Pecos::DUSS_VALUES_PROBS, srm);
4549     SRMCIter max_it = max_string(srm);
4550     vars.all_discrete_string_variable(max_it->first, adsv_index);
4551   }
4552   start_rv = end_rv + num_drv;
4553 
4554   // discrete state set string
4555   svd.state_counts(num_cv, num_div, num_dsv, num_drv);
4556   start_rv += num_cv + num_div;  end_rv = start_rv + num_dsv;
4557   for (rv=start_rv; rv<end_rv; ++rv, ++adsv_index) {
4558     mvd_rep->pull_parameter<StringSet>(rv, Pecos::DSS_VALUES, ss);
4559     SSCIter max_it = max_string(ss);
4560     vars.all_discrete_string_variable(*max_it, adsv_index);
4561   }
4562   //start_rv = end_rv + num_drv;
4563 }
4564 
4565 
4566 /** The init_serial() and derived_init_serial() functions are
4567     stuctured to separate base class (common) operations from derived
4568     class (specialized) operations. */
init_serial()4569 void Model::init_serial()
4570 {
4571   if (modelRep) // envelope fwd to letter
4572     modelRep->init_serial();
4573   else { // not a virtual function: base class definition for all letters
4574 
4575     derived_init_serial();
4576 
4577     // restricted parallelism support: allow local asynchronous
4578     // operations but not message passing parallelism.
4579     if ( local_eval_synchronization() == ASYNCHRONOUS_INTERFACE )
4580       asynchEvalFlag = true;
4581   }
4582 }
4583 
4584 
4585 void Model::
derived_init_communicators(ParLevLIter pl_iter,int max_eval_concurrency,bool recurse_flag)4586 derived_init_communicators(ParLevLIter pl_iter, int max_eval_concurrency,
4587 			   bool recurse_flag)
4588 {
4589   if (modelRep) // envelope fwd to letter
4590     modelRep->
4591       derived_init_communicators(pl_iter, max_eval_concurrency, recurse_flag);
4592   else { // letter lacking redefinition of virtual fn.
4593     Cerr << "Error: Letter lacking redefinition of virtual derived_init_"
4594 	 << "communicators() function.\n       This model does not support "
4595 	 << "communicator operations." << std::endl;
4596     abort_handler(MODEL_ERROR);
4597   }
4598 }
4599 
4600 
derived_init_serial()4601 void Model::derived_init_serial()
4602 {
4603   if (modelRep) // envelope fwd to letter
4604     modelRep->derived_init_serial();
4605   else { // letter lacking redefinition of virtual fn.
4606     Cerr << "Error: Letter lacking redefinition of virtual derived_init_serial"
4607          << "() function.\nNo default defined at base class." << std::endl;
4608     abort_handler(MODEL_ERROR);
4609   }
4610 }
4611 
4612 
4613 void Model::
derived_set_communicators(ParLevLIter pl_iter,int max_eval_concurrency,bool recurse_flag)4614 derived_set_communicators(ParLevLIter pl_iter, int max_eval_concurrency,
4615 			  bool recurse_flag)
4616 {
4617   if (modelRep) // envelope fwd to letter
4618     modelRep->
4619       derived_set_communicators(pl_iter, max_eval_concurrency, recurse_flag);
4620   // else default is nothing additional beyond set_communicators()
4621 }
4622 
4623 
4624 void Model::
derived_free_communicators(ParLevLIter pl_iter,int max_eval_concurrency,bool recurse_flag)4625 derived_free_communicators(ParLevLIter pl_iter, int max_eval_concurrency,
4626 			   bool recurse_flag)
4627 {
4628   if (modelRep) // envelope fwd to letter
4629     modelRep->
4630       derived_free_communicators(pl_iter, max_eval_concurrency, recurse_flag);
4631   // else default is nothing additional beyond free_communicators()
4632 }
4633 
4634 
4635 void Model::
nested_variable_mappings(const SizetArray & c_index1,const SizetArray & di_index1,const SizetArray & ds_index1,const SizetArray & dr_index1,const ShortArray & c_target2,const ShortArray & di_target2,const ShortArray & ds_target2,const ShortArray & dr_target2)4636 nested_variable_mappings(const SizetArray& c_index1,
4637 			 const SizetArray& di_index1,
4638 			 const SizetArray& ds_index1,
4639 			 const SizetArray& dr_index1,
4640 			 const ShortArray& c_target2,
4641 			 const ShortArray& di_target2,
4642 			 const ShortArray& ds_target2,
4643 			 const ShortArray& dr_target2)
4644 {
4645   if (modelRep)
4646     modelRep->nested_variable_mappings(c_index1, di_index1, ds_index1,
4647 				       dr_index1, c_target2, di_target2,
4648 				       ds_target2, dr_target2);
4649   //else no-op
4650 }
4651 
4652 
nested_acv1_indices() const4653 const SizetArray& Model::nested_acv1_indices() const
4654 {
4655   if (!modelRep) {
4656     Cerr << "Error: Letter lacking redefinition of virtual nested_acv1_indices"
4657          << "() function.\nNo default defined at base class." << std::endl;
4658     abort_handler(MODEL_ERROR);
4659   }
4660   return modelRep->nested_acv1_indices();
4661 }
4662 
4663 
nested_acv2_targets() const4664 const ShortArray& Model::nested_acv2_targets() const
4665 {
4666   if (!modelRep) {
4667     Cerr << "Error: Letter lacking redefinition of virtual nested_acv2_targets"
4668          << "() function.\nNo default defined at base class." << std::endl;
4669     abort_handler(MODEL_ERROR);
4670   }
4671   return modelRep->nested_acv2_targets();
4672 }
4673 
4674 
query_distribution_parameter_derivatives() const4675 short Model::query_distribution_parameter_derivatives() const
4676 {
4677   if (modelRep)
4678     return modelRep->query_distribution_parameter_derivatives();
4679   else // default implementation
4680     return NO_DERIVS;
4681 }
4682 
4683 
activate_distribution_parameter_derivatives()4684 void Model::activate_distribution_parameter_derivatives()
4685 {
4686   if (modelRep)
4687     return modelRep->activate_distribution_parameter_derivatives();
4688   // else no-op
4689 }
4690 
4691 
deactivate_distribution_parameter_derivatives()4692 void Model::deactivate_distribution_parameter_derivatives()
4693 {
4694   if (modelRep)
4695     return modelRep->deactivate_distribution_parameter_derivatives();
4696   // else no-op
4697 }
4698 
4699 
4700 void Model::
trans_grad_X_to_U(const RealVector & fn_grad_x,RealVector & fn_grad_u,const RealVector & x_vars)4701 trans_grad_X_to_U(const RealVector& fn_grad_x, RealVector& fn_grad_u,
4702 		  const RealVector& x_vars)
4703 {
4704   if (modelRep)
4705     modelRep->trans_grad_X_to_U(fn_grad_x, fn_grad_u, x_vars);
4706   else {
4707     Cerr << "Error: Letter lacking redefinition of virtual trans_grad_X_to_U"
4708          << "() function.\nNo default defined at base class." << std::endl;
4709     abort_handler(MODEL_ERROR);
4710   }
4711 }
4712 
4713 
4714 void Model::
trans_grad_U_to_X(const RealVector & fn_grad_u,RealVector & fn_grad_x,const RealVector & x_vars)4715 trans_grad_U_to_X(const RealVector& fn_grad_u, RealVector& fn_grad_x,
4716 		  const RealVector& x_vars)
4717 {
4718   if (modelRep)
4719     modelRep->trans_grad_U_to_X(fn_grad_u, fn_grad_x, x_vars);
4720   else {
4721     Cerr << "Error: Letter lacking redefinition of virtual trans_grad_U_to_X"
4722          << "() function.\nNo default defined at base class." << std::endl;
4723     abort_handler(MODEL_ERROR);
4724   }
4725 }
4726 
4727 
4728 void Model::
trans_grad_X_to_S(const RealVector & fn_grad_x,RealVector & fn_grad_s,const RealVector & x_vars)4729 trans_grad_X_to_S(const RealVector& fn_grad_x, RealVector& fn_grad_s,
4730 		  const RealVector& x_vars)
4731 {
4732   if (modelRep)
4733     modelRep->trans_grad_X_to_S(fn_grad_x, fn_grad_s, x_vars);
4734   else {
4735     Cerr << "Error: Letter lacking redefinition of virtual trans_grad_X_to_S"
4736          << "() function.\nNo default defined at base class." << std::endl;
4737     abort_handler(MODEL_ERROR);
4738   }
4739 }
4740 
4741 
4742 void Model::
trans_hess_X_to_U(const RealSymMatrix & fn_hess_x,RealSymMatrix & fn_hess_u,const RealVector & x_vars,const RealVector & fn_grad_x)4743 trans_hess_X_to_U(const RealSymMatrix& fn_hess_x, RealSymMatrix& fn_hess_u,
4744 		  const RealVector& x_vars, const RealVector& fn_grad_x)
4745 {
4746   if (modelRep)
4747     modelRep->trans_hess_X_to_U(fn_hess_x, fn_hess_u, x_vars, fn_grad_x);
4748   else {
4749     Cerr << "Error: Letter lacking redefinition of virtual trans_hess_X_to_U"
4750          << "() function.\nNo default defined at base class." << std::endl;
4751     abort_handler(MODEL_ERROR);
4752   }
4753 }
4754 
4755 
default_active_set()4756 ActiveSet Model::default_active_set()
4757 {
4758   if (modelRep)
4759     return modelRep->default_active_set();
4760   else {
4761     // This member fn is called from Model::evaluate(_no_wait) and the
4762     // ActiveSet returned is used to allocate evaluation storage in HDF5
4763 
4764     ActiveSet set;
4765     set.derivative_vector(currentVariables.all_continuous_variable_ids());
4766     ShortArray asv(numFns, 1);
4767     if (!set.derivative_vector().empty()) {
4768       if ( gradientType != "none" &&
4769 	   ( gradientType == "analytic" || supportsEstimDerivs ) )
4770 	for(auto &a : asv)
4771 	  a |=  2;
4772 
4773       if ( hessianType != "none" &&
4774 	   ( hessianType == "analytic" || supportsEstimDerivs ) )
4775 	for(auto &a : asv)
4776 	  a |=  4;
4777     }
4778 
4779     set.request_vector(asv);
4780     return set;
4781   }
4782 }
4783 
4784 
inactive_view(short view,bool recurse_flag)4785 void Model::inactive_view(short view, bool recurse_flag)
4786 {
4787   if (modelRep) // envelope fwd to letter
4788     modelRep->inactive_view(view, recurse_flag);
4789   else { // default does not support recursion (SimulationModel, NestedModel)
4790     currentVariables.inactive_view(view);
4791     userDefinedConstraints.inactive_view(view);
4792   }
4793 }
4794 
4795 
discrete_int_sets(short active_view)4796 const BitArray& Model::discrete_int_sets(short active_view)
4797 {
4798   if (modelRep)
4799     return modelRep->discrete_int_sets(active_view);
4800 
4801   // identify discrete integer sets within active discrete int variables
4802   // (excluding discrete integer ranges)
4803 
4804   bool relax = (active_view == RELAXED_ALL ||
4805     ( active_view >= RELAXED_DESIGN && active_view <= RELAXED_STATE ) );
4806   const SharedVariablesData&  svd = currentVariables.shared_data();
4807   const SizetArray& active_totals = svd.active_components_totals();
4808 
4809   discreteIntSets.resize(currentVariables.div()); discreteIntSets.reset();
4810   size_t i, di_cntr = 0;
4811   if (relax) {
4812     // This case is complicated by the promotion of active discrete variables
4813     // into active continuous variables.  all_relax_di and ardi_cntr operate
4814     // over all of the discrete variables from the input specification, but
4815     // discreteIntSets operates only over the non-relaxed/categorical active
4816     // discrete variables, for which it distinguishes sets from ranges.
4817     const BitArray& all_relax_di = svd.all_relaxed_discrete_int();
4818     const SizetArray& all_totals = svd.components_totals();
4819     size_t ardi_cntr = 0;
4820     // discrete design
4821     if (active_totals[TOTAL_DDIV]) {
4822       size_t num_ddriv = svd.vc_lookup(DISCRETE_DESIGN_RANGE),
4823 	     num_ddsiv = svd.vc_lookup(DISCRETE_DESIGN_SET_INT);
4824       for (i=0; i<num_ddriv; ++i, ++ardi_cntr)
4825 	if (!all_relax_di[ardi_cntr]) // part of active discrete vars
4826 	  ++di_cntr;                  // leave bit as false
4827       for (i=0; i<num_ddsiv; ++i, ++ardi_cntr)
4828 	if (!all_relax_di[ardi_cntr]) // part of active discrete vars
4829 	  { discreteIntSets.set(di_cntr); ++di_cntr; } // set bit to true
4830     }
4831     else ardi_cntr += all_totals[TOTAL_DDIV];
4832     // discrete aleatory uncertain
4833     if (active_totals[TOTAL_DAUIV]) {
4834       size_t num_dausiv = svd.vc_lookup(HISTOGRAM_POINT_UNCERTAIN_INT),
4835 	     num_dauriv = all_totals[TOTAL_DAUIV] - num_dausiv;
4836       for (i=0; i<num_dauriv; ++i, ++ardi_cntr)
4837 	if (!all_relax_di[ardi_cntr]) // part of active discrete vars
4838 	  ++di_cntr;                  // leave bit as false
4839       for (i=0; i<num_dausiv; ++i, ++ardi_cntr)
4840 	if (!all_relax_di[ardi_cntr]) // part of active discrete vars
4841 	  { discreteIntSets.set(di_cntr); ++di_cntr; } // set bit to true
4842     }
4843     else ardi_cntr += all_totals[TOTAL_DAUIV];
4844     // discrete epistemic uncertain
4845     if (active_totals[TOTAL_DEUIV]) {
4846       size_t num_deuriv = svd.vc_lookup(DISCRETE_INTERVAL_UNCERTAIN),
4847 	     num_deusiv = svd.vc_lookup(DISCRETE_UNCERTAIN_SET_INT);
4848       for (i=0; i<num_deuriv; ++i, ++ardi_cntr)
4849 	if (!all_relax_di[ardi_cntr]) // part of active discrete vars
4850 	  ++di_cntr;                  // leave bit as false
4851       for (i=0; i<num_deusiv; ++i, ++ardi_cntr)
4852 	if (!all_relax_di[ardi_cntr]) // part of active discrete vars
4853 	  { discreteIntSets.set(di_cntr); ++di_cntr; } // set bit to true
4854     }
4855     else ardi_cntr += all_totals[TOTAL_DEUIV];
4856     // discrete state
4857     if (active_totals[TOTAL_DSIV]) {
4858       size_t num_dsriv = svd.vc_lookup(DISCRETE_STATE_RANGE),
4859 	     num_dssiv = svd.vc_lookup(DISCRETE_STATE_SET_INT);
4860       for (i=0; i<num_dsriv; ++i, ++ardi_cntr)
4861 	if (!all_relax_di[ardi_cntr]) // part of active discrete vars
4862 	  ++di_cntr;                  // leave bit as false
4863       for (i=0; i<num_dssiv; ++i, ++ardi_cntr)
4864 	if (!all_relax_di[ardi_cntr]) // part of active discrete vars
4865 	  { discreteIntSets.set(di_cntr); ++di_cntr; } // set bit to true
4866     }
4867   }
4868   else { // MIXED_*
4869     size_t num_ddiv, num_dauiv, num_deuiv, num_dsiv;
4870     if (num_ddiv = active_totals[TOTAL_DDIV]) {
4871       size_t set_ddiv = svd.vc_lookup(DISCRETE_DESIGN_SET_INT);
4872       di_cntr += num_ddiv - set_ddiv;//svd.vc_lookup(DISCRETE_DESIGN_RANGE)
4873       for (i=0; i<set_ddiv; ++i, ++di_cntr)
4874 	discreteIntSets.set(di_cntr);
4875     }
4876     if (num_dauiv = active_totals[TOTAL_DAUIV]) {
4877       size_t set_dauiv = svd.vc_lookup(HISTOGRAM_POINT_UNCERTAIN_INT);
4878       di_cntr += num_dauiv - set_dauiv; // range_dauiv
4879       for (i=0; i<set_dauiv; ++i, ++di_cntr)
4880 	discreteIntSets.set(di_cntr);
4881     }
4882     if (num_deuiv = active_totals[TOTAL_DEUIV]) {
4883       size_t set_deuiv = svd.vc_lookup(DISCRETE_UNCERTAIN_SET_INT);
4884       di_cntr += num_deuiv - set_deuiv;//vc_lookup(DISCRETE_INTERVAL_UNCERTAIN)
4885       for (i=0; i<set_deuiv; ++i, ++di_cntr)
4886 	discreteIntSets.set(di_cntr);
4887     }
4888     if (num_dsiv = active_totals[TOTAL_DSIV]) {
4889       size_t set_dsiv = svd.vc_lookup(DISCRETE_STATE_SET_INT);
4890       di_cntr += num_dsiv - set_dsiv;//svd.vc_lookup(DISCRETE_STATE_RANGE)
4891       for (i=0; i<set_dsiv; ++i, ++di_cntr)
4892 	discreteIntSets.set(di_cntr);
4893     }
4894   }
4895 
4896   return discreteIntSets;
4897 }
4898 
4899 
4900 /*
4901 const BitArray& Model::discrete_string_sets()
4902 {
4903   if (modelRep)
4904     return modelRep->discrete_string_sets();
4905 
4906   discreteStringSets.resize(currentVariables.dsv());
4907   discreteStringSets.set(); // all active discrete string vars are set types
4908   return discreteStringSets;
4909 }
4910 
4911 
4912 const BitArray& Model::discrete_real_sets()
4913 {
4914   if (modelRep)
4915     return modelRep->discrete_real_sets();
4916 
4917   discreteRealSets.resize(currentVariables.drv());
4918   discreteRealSets.set(); // all active discrete real vars are set types
4919   return discreteRealSets;
4920 }
4921 */
4922 
4923 
discrete_set_int_values(short active_view)4924 const IntSetArray& Model::discrete_set_int_values(short active_view)
4925 {
4926   if (modelRep)
4927     return modelRep->discrete_set_int_values(active_view);
4928 
4929   // return previous result for previous invocation with consistent view
4930   // Note: any external update of DSI values should reset prevDSIView to 0
4931   if (active_view == prevDSIView) return activeDiscSetIntValues;
4932 
4933   std::shared_ptr<Pecos::MarginalsCorrDistribution> mvd_rep =
4934     std::static_pointer_cast<Pecos::MarginalsCorrDistribution>
4935     (mvDist.multivar_dist_rep());
4936   const SharedVariablesData& svd = currentVariables.shared_data();
4937   switch (active_view) {
4938   case MIXED_DESIGN: {
4939     size_t num_rv = svd.vc_lookup(DISCRETE_DESIGN_SET_INT),
4940          start_rv = svd.vc_lookup(CONTINUOUS_DESIGN)
4941                   + svd.vc_lookup(DISCRETE_DESIGN_RANGE);
4942     mvd_rep->pull_parameters<IntSet>(start_rv, num_rv, Pecos::DSI_VALUES,
4943 				     activeDiscSetIntValues);
4944     break;
4945   }
4946   case MIXED_ALEATORY_UNCERTAIN: {
4947     IntRealMapArray h_pt_prs;
4948     mvd_rep->pull_parameters<IntRealMap>(Pecos::HISTOGRAM_PT_INT,
4949       Pecos::H_PT_INT_PAIRS, h_pt_prs);
4950     size_t i, num_dausiv = h_pt_prs.size();
4951     activeDiscSetIntValues.resize(num_dausiv);
4952     for (i=0; i<num_dausiv; ++i)
4953       map_keys_to_set(h_pt_prs[i], activeDiscSetIntValues[i]);
4954     break;
4955   }
4956   case MIXED_EPISTEMIC_UNCERTAIN: {
4957     IntRealMapArray deusi_vals_probs;
4958     mvd_rep->pull_parameters<IntRealMap>(Pecos::DISCRETE_UNCERTAIN_SET_INT,
4959       Pecos::DUSI_VALUES_PROBS, deusi_vals_probs);
4960     size_t i, num_deusiv = deusi_vals_probs.size();
4961     activeDiscSetIntValues.resize(num_deusiv);
4962     for (i=0; i<num_deusiv; ++i)
4963       map_keys_to_set(deusi_vals_probs[i], activeDiscSetIntValues[i]);
4964     break;
4965   }
4966   case MIXED_UNCERTAIN: {
4967     IntRealMapArray h_pt_prs, deusi_vals_probs;
4968     mvd_rep->pull_parameters<IntRealMap>(Pecos::HISTOGRAM_PT_INT,
4969       Pecos::H_PT_INT_PAIRS, h_pt_prs);
4970     mvd_rep->pull_parameters<IntRealMap>(Pecos::DISCRETE_UNCERTAIN_SET_INT,
4971       Pecos::DUSI_VALUES_PROBS, deusi_vals_probs);
4972     size_t i, num_dausiv = h_pt_prs.size(),num_deusiv = deusi_vals_probs.size();
4973     activeDiscSetIntValues.resize(num_dausiv+num_deusiv);
4974     for (i=0; i<num_dausiv; ++i)
4975       map_keys_to_set(h_pt_prs[i], activeDiscSetIntValues[i]);
4976     for (i=0; i<num_deusiv; ++i)
4977       map_keys_to_set(deusi_vals_probs[i],activeDiscSetIntValues[i+num_dausiv]);
4978     break;
4979   }
4980   case MIXED_STATE: {
4981     size_t num_cv, num_div, num_dsv, num_drv, start_rv = 0,
4982       num_rv = svd.vc_lookup(DISCRETE_STATE_SET_INT);
4983     svd.design_counts(num_cv, num_div, num_dsv, num_drv);
4984     start_rv += num_cv + num_div + num_dsv + num_drv;
4985     svd.aleatory_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
4986     start_rv += num_cv + num_div + num_dsv + num_drv;
4987     svd.epistemic_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
4988     start_rv += num_cv + num_div + num_dsv + num_drv
4989       + svd.vc_lookup(CONTINUOUS_STATE) + svd.vc_lookup(DISCRETE_STATE_RANGE);
4990     mvd_rep->pull_parameters<IntSet>(start_rv, num_rv, Pecos::DSI_VALUES,
4991 				     activeDiscSetIntValues);
4992     break;
4993   }
4994   case MIXED_ALL: {
4995     IntRealMapArray h_pt_prs, deusi_vals_probs;
4996     mvd_rep->pull_parameters<IntRealMap>(Pecos::HISTOGRAM_PT_INT,
4997       Pecos::H_PT_INT_PAIRS, h_pt_prs);
4998     mvd_rep->pull_parameters<IntRealMap>(Pecos::DISCRETE_UNCERTAIN_SET_INT,
4999       Pecos::DUSI_VALUES_PROBS, deusi_vals_probs);
5000     size_t i, di_cntr = 0, num_ddsi = svd.vc_lookup(DISCRETE_DESIGN_SET_INT),
5001       num_dausi = h_pt_prs.size(), num_deusi = deusi_vals_probs.size(),
5002       num_dssi  = svd.vc_lookup(DISCRETE_STATE_SET_INT);
5003     activeDiscSetIntValues.resize(num_ddsi + num_dausi + num_deusi + num_dssi);
5004     size_t num_cv, num_div, num_dsv, num_drv;
5005     svd.design_counts(num_cv, num_div, num_dsv, num_drv);
5006     size_t rv_cntr = num_cv + num_div - num_ddsi;
5007     for (i=0; i<num_ddsi; ++i, ++rv_cntr, ++di_cntr)
5008       mvd_rep->pull_parameter<IntSet>(rv_cntr, Pecos::DSI_VALUES,
5009 				      activeDiscSetIntValues[di_cntr]);
5010     rv_cntr += num_dsv + num_drv;
5011     svd.aleatory_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
5012     for (i=0; i<num_dausi; ++i, ++di_cntr)
5013       map_keys_to_set(h_pt_prs[i], activeDiscSetIntValues[di_cntr]);
5014     rv_cntr += num_cv + num_div + num_dsv + num_drv;
5015     svd.epistemic_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
5016     for (i=0; i<num_deusi; ++i, ++di_cntr)
5017       map_keys_to_set(deusi_vals_probs[i], activeDiscSetIntValues[di_cntr]);
5018     rv_cntr += num_cv + num_div + num_dsv + num_drv +
5019       svd.vc_lookup(CONTINUOUS_STATE) + svd.vc_lookup(DISCRETE_STATE_RANGE);
5020     for (i=0; i<num_dssi; ++i, ++rv_cntr, ++di_cntr)
5021       mvd_rep->pull_parameter<IntSet>(rv_cntr, Pecos::DSI_VALUES,
5022 				      activeDiscSetIntValues[di_cntr]);
5023     break;
5024   }
5025   default: { // RELAXED_*
5026     const BitArray&    all_relax_di = svd.all_relaxed_discrete_int();
5027     const SizetArray&    all_totals = svd.components_totals();
5028     const SizetArray& active_totals = svd.active_components_totals();
5029     size_t i, num_cv, num_div, num_dsv, num_drv,
5030            di_cntr = 0, ardi_cntr = 0, rv_cntr = 0;
5031     // discrete design
5032     svd.design_counts(num_cv, num_div, num_dsv, num_drv);
5033     if (active_totals[TOTAL_DDIV]) {
5034       size_t num_ddsi = svd.vc_lookup(DISCRETE_DESIGN_SET_INT),
5035 	     num_ddri = num_div - num_ddsi;
5036       rv_cntr = num_cv;
5037       for (i=0; i<num_ddri; ++i, ++ardi_cntr, ++rv_cntr)
5038 	if (!all_relax_di[ardi_cntr]) // part of active discrete vars
5039 	  ++di_cntr;
5040       for (i=0; i<num_ddsi; ++i, ++ardi_cntr, ++rv_cntr)
5041 	if (!all_relax_di[ardi_cntr]) // part of active discrete vars
5042 	  mvd_rep->pull_parameter<IntSet>(rv_cntr, Pecos::DSI_VALUES,
5043 					  activeDiscSetIntValues[di_cntr++]);
5044       rv_cntr += num_dsv + num_drv;
5045     }
5046     else {
5047       ardi_cntr += num_div;
5048       rv_cntr   += num_cv + num_div + num_dsv + num_drv;
5049     }
5050     // discrete aleatory uncertain
5051     svd.aleatory_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
5052     if (active_totals[TOTAL_DAUIV]) {
5053       IntRealMapArray h_pt_prs;
5054       mvd_rep->pull_parameters<IntRealMap>(Pecos::HISTOGRAM_PT_INT,
5055         Pecos::H_PT_INT_PAIRS, h_pt_prs);
5056       size_t num_dausi = h_pt_prs.size(), num_dauri = num_div - num_dausi;
5057       for (i=0; i<num_dauri; ++i, ++ardi_cntr)
5058 	if (!all_relax_di[ardi_cntr]) // part of active discrete vars
5059 	  ++di_cntr;
5060       for (i=0; i<num_dausi; ++i, ++ardi_cntr)
5061 	if (!all_relax_di[ardi_cntr]) // part of active discrete vars
5062 	  map_keys_to_set(h_pt_prs[i], activeDiscSetIntValues[di_cntr++]);
5063     }
5064     else
5065       ardi_cntr += num_div;
5066     rv_cntr += num_cv + num_div + num_dsv + num_drv;
5067     // discrete epistemic uncertain
5068     svd.epistemic_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
5069     if (active_totals[TOTAL_DEUIV]) {
5070       IntRealMapArray deusi_vals_probs;
5071       mvd_rep->pull_parameters<IntRealMap>(Pecos::DISCRETE_UNCERTAIN_SET_INT,
5072         Pecos::DUSI_VALUES_PROBS, deusi_vals_probs);
5073       size_t num_deuri = svd.vc_lookup(DISCRETE_INTERVAL_UNCERTAIN),
5074 	     num_deusi = deusi_vals_probs.size();
5075       for (i=0; i<num_deuri; ++i, ++ardi_cntr)
5076 	if (!all_relax_di[ardi_cntr]) // part of active discrete vars
5077 	  ++di_cntr;
5078       for (i=0; i<num_deusi; ++i, ++ardi_cntr)
5079 	if (!all_relax_di[ardi_cntr]) // part of active discrete vars
5080 	  map_keys_to_set(deusi_vals_probs[i],
5081 			  activeDiscSetIntValues[di_cntr++]);
5082     }
5083     else
5084       ardi_cntr += num_div;
5085     rv_cntr += num_cv + num_div + num_dsv + num_drv;
5086     // discrete state
5087     if (active_totals[TOTAL_DSIV]) {
5088       size_t num_dssi = svd.vc_lookup(DISCRETE_STATE_SET_INT),
5089 	     num_dsri = all_totals[TOTAL_DSIV] - num_dssi;
5090       rv_cntr += all_totals[TOTAL_CSV];
5091       for (i=0; i<num_dsri; ++i, ++ardi_cntr, ++rv_cntr)
5092 	if (!all_relax_di[ardi_cntr]) // part of active discrete vars
5093 	  ++di_cntr;                  // leave bit as false
5094       for (i=0; i<num_dssi; ++i, ++ardi_cntr, ++rv_cntr)
5095 	if (!all_relax_di[ardi_cntr]) // part of active discrete vars
5096 	  mvd_rep->pull_parameter<IntSet>(rv_cntr, Pecos::DSI_VALUES,
5097 					  activeDiscSetIntValues[di_cntr++]);
5098     }
5099     break;
5100   }
5101   }
5102 
5103   prevDSIView = active_view;
5104   return activeDiscSetIntValues;
5105 }
5106 
5107 
discrete_set_string_values(short active_view)5108 const StringSetArray& Model::discrete_set_string_values(short active_view)
5109 {
5110   if (modelRep)
5111     return modelRep->discrete_set_string_values(active_view);
5112 
5113   // return previous result for previous invocation with consistent view
5114   // Note: any external update of DSS values should reset prevDSSView to 0
5115   if (active_view == prevDSSView) return activeDiscSetStringValues;
5116 
5117   std::shared_ptr<Pecos::MarginalsCorrDistribution> mvd_rep =
5118     std::static_pointer_cast<Pecos::MarginalsCorrDistribution>
5119     (mvDist.multivar_dist_rep());
5120   const SharedVariablesData& svd = currentVariables.shared_data();
5121   switch (active_view) {
5122   case MIXED_DESIGN: case RELAXED_DESIGN: {
5123     size_t num_cv, num_div, num_dsv, num_drv;
5124     svd.design_counts(num_cv, num_div, num_dsv, num_drv);
5125     mvd_rep->pull_parameters<StringSet>(num_cv + num_div, num_dsv,
5126       Pecos::DSS_VALUES, activeDiscSetStringValues);
5127     break;
5128   }
5129   case MIXED_ALEATORY_UNCERTAIN: case RELAXED_ALEATORY_UNCERTAIN: {
5130     StringRealMapArray h_pt_prs;
5131     mvd_rep->pull_parameters<StringRealMap>(Pecos::HISTOGRAM_PT_STRING,
5132       Pecos::H_PT_STR_PAIRS, h_pt_prs);
5133     size_t i, num_dauss = h_pt_prs.size();
5134     activeDiscSetStringValues.resize(num_dauss);
5135     for (i=0; i<num_dauss; ++i)
5136       map_keys_to_set(h_pt_prs[i], activeDiscSetStringValues[i]);
5137     break;
5138   }
5139   case MIXED_EPISTEMIC_UNCERTAIN: case RELAXED_EPISTEMIC_UNCERTAIN: {
5140     StringRealMapArray deuss_vals_probs;
5141     mvd_rep->pull_parameters<StringRealMap>(
5142       Pecos::DISCRETE_UNCERTAIN_SET_STRING, Pecos::DUSS_VALUES_PROBS,
5143       deuss_vals_probs);
5144     size_t i, num_deuss = deuss_vals_probs.size();
5145     activeDiscSetStringValues.resize(num_deuss);
5146     for (i=0; i<num_deuss; ++i)
5147       map_keys_to_set(deuss_vals_probs[i], activeDiscSetStringValues[i]);
5148     break;
5149   }
5150   case MIXED_UNCERTAIN: case RELAXED_UNCERTAIN: {
5151     StringRealMapArray h_pt_prs, deuss_vals_probs;
5152     mvd_rep->pull_parameters<StringRealMap>(Pecos::HISTOGRAM_PT_STRING,
5153       Pecos::H_PT_STR_PAIRS, h_pt_prs);
5154     mvd_rep->pull_parameters<StringRealMap>(
5155       Pecos::DISCRETE_UNCERTAIN_SET_STRING,
5156       Pecos::DUSS_VALUES_PROBS, deuss_vals_probs);
5157     size_t i, num_dauss = h_pt_prs.size(), num_deuss = deuss_vals_probs.size();
5158     activeDiscSetStringValues.resize(num_dauss + num_deuss);
5159     for (i=0; i<num_dauss; ++i)
5160       map_keys_to_set(h_pt_prs[i], activeDiscSetStringValues[i]);
5161     for (i=0; i<num_deuss; ++i)
5162       map_keys_to_set(deuss_vals_probs[i],
5163 		      activeDiscSetStringValues[i+num_dauss]);
5164     break;
5165   }
5166   case MIXED_STATE: case RELAXED_STATE: {
5167     size_t num_cv, num_div, num_dsv, num_drv, start_rv = 0;
5168     svd.design_counts(num_cv, num_div, num_dsv, num_drv);
5169     start_rv += num_cv + num_div + num_dsv + num_drv;
5170     svd.aleatory_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
5171     start_rv += num_cv + num_div + num_dsv + num_drv;
5172     svd.epistemic_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
5173     start_rv += num_cv + num_div + num_dsv + num_drv;
5174     svd.state_counts(num_cv, num_div, num_dsv, num_drv);
5175     start_rv += num_cv + num_div;
5176     mvd_rep->pull_parameters<StringSet>(start_rv, num_dsv, Pecos::DSS_VALUES,
5177 					activeDiscSetStringValues);
5178     break;
5179   }
5180   case MIXED_ALL: case RELAXED_ALL: {
5181     StringRealMapArray h_pt_prs, deuss_vals_probs;
5182     mvd_rep->pull_parameters<StringRealMap>(Pecos::HISTOGRAM_PT_STRING,
5183       Pecos::H_PT_STR_PAIRS, h_pt_prs);
5184     mvd_rep->pull_parameters<StringRealMap>(
5185       Pecos::DISCRETE_UNCERTAIN_SET_STRING, Pecos::DUSS_VALUES_PROBS,
5186       deuss_vals_probs);
5187     size_t i, ds_cntr = 0,
5188       num_ddss  = svd.vc_lookup(DISCRETE_DESIGN_SET_STRING),
5189       num_dauss = h_pt_prs.size(), num_deuss= deuss_vals_probs.size(),
5190       num_dsss  = svd.vc_lookup(DISCRETE_STATE_SET_STRING);
5191     activeDiscSetStringValues.resize(num_ddss + num_dauss +
5192 				     num_deuss + num_dsss);
5193     size_t num_cv, num_div, num_dsv, num_drv;
5194     svd.design_counts(num_cv, num_div, num_dsv, num_drv);
5195     size_t rv_cntr = num_cv + num_div;
5196     for (i=0; i<num_ddss; ++i, ++rv_cntr, ++ds_cntr)
5197       mvd_rep->pull_parameter<StringSet>(rv_cntr, Pecos::DSS_VALUES,
5198 					 activeDiscSetStringValues[ds_cntr]);
5199     rv_cntr += num_drv;
5200     svd.aleatory_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
5201     for (i=0; i<num_dauss; ++i, ++ds_cntr)
5202       map_keys_to_set(h_pt_prs[i], activeDiscSetStringValues[ds_cntr]);
5203     rv_cntr += num_cv + num_div + num_dsv + num_drv;
5204     svd.epistemic_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
5205     for (i=0; i<num_deuss; ++i, ++ds_cntr)
5206       map_keys_to_set(deuss_vals_probs[i], activeDiscSetStringValues[ds_cntr]);
5207     rv_cntr += num_cv + num_div + num_dsv + num_drv;
5208     svd.state_counts(num_cv, num_div, num_dsv, num_drv);
5209     rv_cntr += num_cv + num_div;
5210     for (i=0; i<num_dsss; ++i, ++rv_cntr, ++ds_cntr)
5211       mvd_rep->pull_parameter<StringSet>(rv_cntr, Pecos::DSS_VALUES,
5212 					 activeDiscSetStringValues[ds_cntr]);
5213     break;
5214   }
5215   }
5216 
5217   prevDSSView = active_view;
5218   return activeDiscSetStringValues;
5219 }
5220 
5221 
discrete_set_real_values(short active_view)5222 const RealSetArray& Model::discrete_set_real_values(short active_view)
5223 {
5224   if (modelRep)
5225     return modelRep->discrete_set_real_values(active_view);
5226 
5227   // return previous result for previous invocation with consistent view
5228   // Note: any external update of DSR values should reset prevDSRView to 0
5229   if (active_view == prevDSRView) return activeDiscSetRealValues;
5230 
5231   std::shared_ptr<Pecos::MarginalsCorrDistribution> mvd_rep =
5232     std::static_pointer_cast<Pecos::MarginalsCorrDistribution>
5233     (mvDist.multivar_dist_rep());
5234   const SharedVariablesData& svd = currentVariables.shared_data();
5235   switch (active_view) {
5236   case MIXED_DESIGN: {
5237     size_t num_cv, num_div, num_dsv, num_drv;
5238     svd.design_counts(num_cv, num_div, num_dsv, num_drv);
5239     mvd_rep->pull_parameters<RealSet>(num_cv + num_div + num_dsv, num_drv,
5240       Pecos::DSR_VALUES, activeDiscSetRealValues);
5241     break;
5242   }
5243   case MIXED_ALEATORY_UNCERTAIN: {
5244     RealRealMapArray h_pt_prs;
5245     mvd_rep->pull_parameters<RealRealMap>(Pecos::HISTOGRAM_PT_REAL,
5246       Pecos::H_PT_REAL_PAIRS, h_pt_prs);
5247     size_t i, num_dausr = h_pt_prs.size();
5248     activeDiscSetRealValues.resize(num_dausr);
5249     for (i=0; i<num_dausr; ++i)
5250       map_keys_to_set(h_pt_prs[i], activeDiscSetRealValues[i]);
5251     break;
5252   }
5253   case MIXED_EPISTEMIC_UNCERTAIN: {
5254     RealRealMapArray deusr_vals_probs;
5255     mvd_rep->pull_parameters<RealRealMap>(Pecos::DISCRETE_UNCERTAIN_SET_REAL,
5256       Pecos::DUSR_VALUES_PROBS, deusr_vals_probs);
5257     size_t i, num_deusr = deusr_vals_probs.size();
5258     activeDiscSetRealValues.resize(num_deusr);
5259     for (i=0; i<num_deusr; ++i)
5260       map_keys_to_set(deusr_vals_probs[i], activeDiscSetRealValues[i]);
5261     break;
5262   }
5263   case MIXED_UNCERTAIN: {
5264     RealRealMapArray h_pt_prs, deusr_vals_probs;
5265     mvd_rep->pull_parameters<RealRealMap>(Pecos::HISTOGRAM_PT_REAL,
5266       Pecos::H_PT_REAL_PAIRS, h_pt_prs);
5267     mvd_rep->pull_parameters<RealRealMap>(Pecos::DISCRETE_UNCERTAIN_SET_REAL,
5268       Pecos::DUSR_VALUES_PROBS, deusr_vals_probs);
5269     size_t i, num_dausr = h_pt_prs.size(), num_deusr = deusr_vals_probs.size();
5270     activeDiscSetRealValues.resize(num_dausr + num_deusr);
5271     for (i=0; i<num_dausr; ++i)
5272       map_keys_to_set(h_pt_prs[i], activeDiscSetRealValues[i]);
5273     for (i=0; i<num_deusr; ++i)
5274       map_keys_to_set(deusr_vals_probs[i],activeDiscSetRealValues[i+num_dausr]);
5275     break;
5276   }
5277   case MIXED_STATE: {
5278     size_t num_cv, num_div, num_dsv, num_drv, start_rv = 0;
5279     svd.design_counts(num_cv, num_div, num_dsv, num_drv);
5280     start_rv += num_cv + num_div + num_dsv + num_drv;
5281     svd.aleatory_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
5282     start_rv += num_cv + num_div + num_dsv + num_drv;
5283     svd.epistemic_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
5284     start_rv += num_cv + num_div + num_dsv + num_drv;
5285     svd.state_counts(num_cv, num_div, num_dsv, num_drv);
5286     start_rv += num_cv + num_div + num_dsv;
5287     mvd_rep->pull_parameters<RealSet>(start_rv, num_drv, Pecos::DSR_VALUES,
5288 				      activeDiscSetRealValues);
5289     break;
5290   }
5291   case MIXED_ALL: {
5292     RealRealMapArray h_pt_prs, deusr_vals_probs;
5293     mvd_rep->pull_parameters<RealRealMap>(Pecos::HISTOGRAM_PT_REAL,
5294       Pecos::H_PT_REAL_PAIRS, h_pt_prs);
5295     mvd_rep->pull_parameters<RealRealMap>(Pecos::DISCRETE_UNCERTAIN_SET_REAL,
5296       Pecos::DUSR_VALUES_PROBS, deusr_vals_probs);
5297     size_t i, dr_cntr = 0, num_dausr = h_pt_prs.size(),
5298       num_deusr = deusr_vals_probs.size(),
5299       num_dssr  = svd.vc_lookup(DISCRETE_STATE_SET_REAL);
5300     size_t num_cv, num_div, num_dsv, num_drv;
5301     svd.design_counts(num_cv, num_div, num_dsv, num_drv);
5302     activeDiscSetRealValues.resize(num_drv + num_dausr + num_deusr + num_dssr);
5303     size_t rv_cntr = num_cv + num_div + num_dsv;
5304     for (i=0; i<num_drv; ++i, ++rv_cntr, ++dr_cntr)
5305       mvd_rep->pull_parameter<RealSet>(rv_cntr, Pecos::DSR_VALUES,
5306 				       activeDiscSetRealValues[dr_cntr]);
5307     svd.aleatory_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
5308     for (i=0; i<num_dausr; ++i, ++dr_cntr)
5309       map_keys_to_set(h_pt_prs[i], activeDiscSetRealValues[dr_cntr]);
5310     rv_cntr += num_cv + num_div + num_dsv + num_drv;
5311     svd.epistemic_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
5312     for (i=0; i<num_deusr; ++i, ++dr_cntr)
5313       map_keys_to_set(deusr_vals_probs[i], activeDiscSetRealValues[dr_cntr]);
5314     rv_cntr += num_cv + num_div + num_dsv + num_drv;
5315     svd.state_counts(num_cv, num_div, num_dsv, num_drv);
5316     rv_cntr += num_cv + num_div + num_dsv;
5317     for (i=0; i<num_drv; ++i, ++rv_cntr, ++dr_cntr)
5318       mvd_rep->pull_parameter<RealSet>(rv_cntr, Pecos::DSR_VALUES,
5319 				       activeDiscSetRealValues[dr_cntr]);
5320     break;
5321   }
5322   default: { // RELAXED_*
5323     const BitArray&    all_relax_dr = svd.all_relaxed_discrete_real();
5324     const SizetArray&    all_totals = svd.components_totals();
5325     const SizetArray& active_totals = svd.active_components_totals();
5326     size_t i, num_cv, num_div, num_dsv, num_drv,
5327            dr_cntr = 0, ardr_cntr = 0, rv_cntr = 0;
5328     // discrete design
5329     svd.design_counts(num_cv, num_div, num_dsv, num_drv);
5330     if (active_totals[TOTAL_DDRV]) {
5331       rv_cntr = num_cv + num_div + num_dsv;
5332       for (i=0; i<num_drv; ++i, ++ardr_cntr, ++rv_cntr)
5333 	if (!all_relax_dr[ardr_cntr]) // part of active discrete vars
5334 	  mvd_rep->pull_parameter<RealSet>(rv_cntr, Pecos::DSR_VALUES,
5335 					   activeDiscSetRealValues[dr_cntr++]);
5336     }
5337     else {
5338       ardr_cntr += num_drv;
5339       rv_cntr   += num_cv + num_div + num_dsv + num_drv;
5340     }
5341     // discrete aleatory uncertain
5342     svd.aleatory_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
5343     if (active_totals[TOTAL_DAURV]) {
5344       RealRealMapArray h_pt_prs;
5345       mvd_rep->pull_parameters<RealRealMap>(Pecos::HISTOGRAM_PT_REAL,
5346         Pecos::H_PT_REAL_PAIRS, h_pt_prs);
5347       size_t num_dausr = h_pt_prs.size();
5348       for (i=0; i<num_dausr; ++i, ++ardr_cntr)
5349 	if (!all_relax_dr[ardr_cntr]) // part of active discrete vars
5350 	  map_keys_to_set(h_pt_prs[i], activeDiscSetRealValues[dr_cntr++]);
5351     }
5352     else
5353       ardr_cntr += num_drv;
5354     rv_cntr += num_cv + num_div + num_dsv + num_drv;
5355     // discrete epistemic uncertain
5356     svd.epistemic_uncertain_counts(num_cv, num_div, num_dsv, num_drv);
5357     if (active_totals[TOTAL_DEURV]) {
5358       RealRealMapArray deusr_vals_probs;
5359       mvd_rep->pull_parameters<RealRealMap>(Pecos::DISCRETE_UNCERTAIN_SET_REAL,
5360         Pecos::DUSR_VALUES_PROBS, deusr_vals_probs);
5361       size_t num_deusr = deusr_vals_probs.size();
5362       for (i=0; i<num_deusr; ++i, ++ardr_cntr)
5363 	if (!all_relax_dr[ardr_cntr]) // part of active discrete vars
5364 	  map_keys_to_set(deusr_vals_probs[i],
5365 			  activeDiscSetRealValues[dr_cntr++]);
5366     }
5367     else
5368       ardr_cntr += num_drv;
5369     rv_cntr += num_cv + num_div + num_dsv + num_drv;
5370     // discrete state
5371     if (active_totals[TOTAL_DSRV]) {
5372       svd.state_counts(num_cv, num_div, num_dsv, num_drv);
5373       rv_cntr += num_cv + num_div + num_dsv;
5374       for (i=0; i<num_drv; ++i, ++ardr_cntr, ++rv_cntr)
5375 	if (!all_relax_dr[ardr_cntr]) // part of active discrete vars
5376 	  mvd_rep->pull_parameter<RealSet>(rv_cntr, Pecos::DSR_VALUES,
5377 					   activeDiscSetRealValues[dr_cntr++]);
5378     }
5379     break;
5380   }
5381   }
5382 
5383   prevDSRView = active_view;
5384   return activeDiscSetRealValues;
5385 }
5386 
5387 
derived_evaluation_id() const5388 int Model::derived_evaluation_id() const
5389 {
5390   if (!modelRep) {
5391     Cerr << "Error: Letter lacking redefinition of virtual "
5392 	 << "derived_evaluation_id() function.\n" << std::endl;
5393     abort_handler(MODEL_ERROR);
5394   }
5395 
5396   return modelRep->derived_evaluation_id();
5397 }
5398 
5399 
5400 /** Only Models including ApplicationInterfaces support an evaluation cache:
5401     surrogate, nested, and recast mappings are not stored in the cache.
5402     Possible exceptions: HierarchSurrModel, NestedModel::optionalInterface. */
evaluation_cache(bool recurse_flag) const5403 bool Model::evaluation_cache(bool recurse_flag) const
5404 {
5405   if (modelRep) // envelope fwd to letter
5406     return modelRep->evaluation_cache(recurse_flag);
5407   else // letter lacking redefinition of virtual fn.
5408     return false; // default
5409 }
5410 
5411 
5412 /** Only Models including ApplicationInterfaces interact with the restart
5413     file: surrogate, nested, and recast mappings are not stored in restart.
5414     Possible exceptions: DataFitSurrModel::import_points(),
5415     NestedModel::optionalInterface. */
restart_file(bool recurse_flag) const5416 bool Model::restart_file(bool recurse_flag) const
5417 {
5418   if (modelRep) // envelope fwd to letter
5419     return modelRep->restart_file(recurse_flag);
5420   else // letter lacking redefinition of virtual fn.
5421     return false; // default
5422 }
5423 
5424 
set_evaluation_reference()5425 void Model::set_evaluation_reference()
5426 {
5427   if (modelRep) // envelope fwd to letter
5428     modelRep->set_evaluation_reference();
5429   else { // letter lacking redefinition of virtual fn.
5430     Cerr << "Error: Letter lacking redefinition of virtual set_evaluation_"
5431 	 << "reference() function.\n" << std::endl;
5432     abort_handler(MODEL_ERROR);
5433   }
5434 }
5435 
5436 
fine_grained_evaluation_counters()5437 void Model::fine_grained_evaluation_counters()
5438 {
5439   if (modelRep) // envelope fwd to letter
5440     modelRep->fine_grained_evaluation_counters();
5441   else { // letter lacking redefinition of virtual fn.
5442     Cerr << "Error: Letter lacking redefinition of virtual fine_grained_"
5443 	 << "evaluation_counters() function.\n" << std::endl;
5444     abort_handler(MODEL_ERROR);
5445   }
5446 }
5447 
5448 
5449 void Model::
print_evaluation_summary(std::ostream & s,bool minimal_header,bool relative_count) const5450 print_evaluation_summary(std::ostream& s, bool minimal_header,
5451 			 bool relative_count) const
5452 {
5453   if (modelRep) // envelope fwd to letter
5454     modelRep->print_evaluation_summary(s, minimal_header, relative_count);
5455   else { // letter lacking redefinition of virtual fn.
5456     Cerr << "Error: Letter lacking redefinition of virtual print_evaluation_"
5457 	 << "summary() function.\n" << std::endl;
5458     abort_handler(MODEL_ERROR);
5459   }
5460 }
5461 
5462 /// Derived classes containing additional models or interfaces should
5463 /// implement this function to pass along to their sub Models/Interfaces
eval_tag_prefix(const String & eval_id_str)5464 void Model::eval_tag_prefix(const String& eval_id_str)
5465 {
5466   if (modelRep) {
5467     // update the base class cached value
5468     modelRep->evalTagPrefix = eval_id_str;
5469     // then derived classes may further forward this ID
5470     modelRep->eval_tag_prefix(eval_id_str);
5471   }
5472   else
5473     evalTagPrefix = eval_id_str;  // default is to set at base only
5474   // Models are not required to forward this as they may not have an Interface
5475   // else { // letter lacking redefinition of virtual fn.
5476   //   Cerr << "Error: Letter lacking redefinition of virtual eval_tag_prefix()"
5477   // 	 << "function.\n" << std::endl;
5478   //   abort_handler(MODEL_ERROR);
5479   // }
5480 }
5481 
5482 
db_lookup(const Variables & search_vars,const ActiveSet & search_set,Response & found_resp)5483 bool Model::db_lookup(const Variables& search_vars, const ActiveSet& search_set,
5484 		      Response& found_resp)
5485 {
5486   if (modelRep) // envelope fwd to letter
5487     return modelRep->db_lookup(search_vars, search_set, found_resp);
5488   else { // default implementation
5489     // dependence on interface_id() restricts successful find() operation to
5490     // cases where response is generated by a single non-approximate interface
5491     // at this level.  For Nested and Surrogate models, duplication detection
5492     // must occur at a lower level.
5493     PRPCacheHIter cache_it
5494       = lookup_by_val(data_pairs, interface_id(), search_vars, search_set);
5495     if (cache_it != data_pairs.get<hashed>().end()) {
5496       found_resp.active_set(search_set);
5497       found_resp.update(cache_it->response());
5498       return true;
5499     }
5500     return false;
5501   }
5502 }
5503 
5504 
5505 /** config_vars consists of [continuous, integer, string, real]. */
active_variables(const RealVector & config_vars,Model & model)5506 void Model::active_variables(const RealVector& config_vars, Model& model)
5507 {
5508   // TODO: If (as hoped) we convert the configuration reader to read
5509   // values instead of indices for strings (and actually read
5510   // integers), we can avoid the conversions below.
5511 
5512   size_t offset = 0;  // current index into configuration variables
5513 
5514   RealVector ccv(Teuchos::View, config_vars.values() + offset, model.cv());
5515   model.continuous_variables(ccv);
5516   offset += model.cv();
5517 
5518   RealVector dicv(Teuchos::View, config_vars.values() + offset, model.div());
5519   IntVector dicv_as_int(model.div());
5520   iround(dicv, dicv_as_int);
5521   model.discrete_int_variables(dicv_as_int);
5522   offset += model.div();
5523 
5524   RealVector dscv(Teuchos::View, config_vars.values() + offset, model.dsv());
5525   const StringSetArray& discrete_str_vals = model.discrete_set_string_values();
5526   for (size_t i=0; i<model.dsv(); ++i) {
5527     String str_value =
5528       set_index_to_value(boost::math::iround(dscv[i]), discrete_str_vals[i]);
5529     model.current_variables().discrete_string_variable(str_value, i);
5530   }
5531   offset += model.dsv();
5532 
5533   RealVector drcv(Teuchos::View, config_vars.values() + offset, model.drv());
5534   model.discrete_real_variables(drcv);
5535   //offset += model.drv();
5536 }
5537 
5538 
5539 
5540 /** config_vars consists of [continuous, integer, string, real]. */
inactive_variables(const RealVector & config_vars,Model & model)5541 void Model::inactive_variables(const RealVector& config_vars, Model& model)
5542 {
5543   inactive_variables(config_vars, model, model.current_variables());
5544 }
5545 
5546 
5547 /** config_vars consists of [continuous, integer, string, real]. */
inactive_variables(const RealVector & config_vars,Model & model,Variables & vars)5548 void Model::inactive_variables(const RealVector& config_vars, Model& model,
5549 			       Variables& vars)
5550 {
5551   // TODO: If (as hoped) we convert the configuration reader to read
5552   // values instead of indices for strings (and actually read
5553   // integers), we can avoid the conversions below.
5554 
5555   size_t offset = 0;  // current index into configuration variables
5556 
5557   RealVector ccv(Teuchos::View, config_vars.values() + offset, model.icv());
5558   vars.inactive_continuous_variables(ccv);
5559   offset += model.icv();
5560 
5561   RealVector dicv(Teuchos::View, config_vars.values() + offset, model.idiv());
5562   IntVector dicv_as_int(model.idiv());
5563   iround(dicv, dicv_as_int);
5564   vars.inactive_discrete_int_variables(dicv_as_int);
5565   offset += model.idiv();
5566 
5567   RealVector dscv(Teuchos::View, config_vars.values() + offset, model.idsv());
5568   // the admissible _inactive_ discrete string values
5569   const StringSetArray& discrete_str_vals =
5570     model.discrete_set_string_values(model.current_variables().view().second);
5571   for (size_t i=0; i<model.idsv(); ++i) {
5572     String str_value =
5573       set_index_to_value(boost::math::iround(dscv[i]), discrete_str_vals[i]);
5574     vars.inactive_discrete_string_variable(str_value, i);
5575   }
5576   offset += model.idsv();
5577 
5578   RealVector drcv(Teuchos::View, config_vars.values() + offset, model.idrv());
5579   vars.inactive_discrete_real_variables(drcv);
5580   //offset += model.idrv();
5581 }
5582 
5583 
evaluate(const RealMatrix & samples_matrix,Model & model,RealMatrix & resp_matrix)5584 void Model::evaluate(const RealMatrix& samples_matrix,
5585 		     Model& model, RealMatrix& resp_matrix)
5586 {
5587   // TODO: option for setting its active or inactive variables
5588 
5589   RealMatrix::ordinalType i, num_evals = samples_matrix.numCols();
5590   resp_matrix.shape(model.response_size(), num_evals);
5591 
5592   for (i=0; i<num_evals; ++i) {
5593 
5594     const RealVector sample_i =
5595       Teuchos::getCol(Teuchos::View, const_cast<RealMatrix&>(samples_matrix), i);
5596     Model::active_variables(sample_i, model);
5597 
5598     if (model.asynch_flag())
5599       model.evaluate_nowait();
5600     else {
5601       model.evaluate();
5602       const RealVector& fn_vals = model.current_response().function_values();
5603       Teuchos::setCol(fn_vals, i, resp_matrix);
5604     }
5605   }
5606 
5607   // synchronize asynchronous evaluations
5608   if (model.asynch_flag()) {
5609     const IntResponseMap& resp_map = model.synchronize();
5610     IntRespMCIter r_cit;
5611     for (i=0, r_cit=resp_map.begin(); r_cit!=resp_map.end(); ++i, ++r_cit)
5612       Teuchos::setCol(r_cit->second.function_values(), i, resp_matrix);
5613   }
5614 }
5615 
5616 // Called from rekey_response_map to allow Models to store their interfaces asynchronous
5617 // evaluations. When the meta_object is a model, no action is performed.
asynch_eval_store(const Model & model,const int & id,const Response & response)5618 void Model::asynch_eval_store(const Model &model, const int &id, const Response &response) {
5619   return;
5620 }
5621 
5622 // Called from rekey_response_map to allow Models to store their interfaces asynchronous
5623 // evaluations. I strongly suspect that there's a better design for this.
asynch_eval_store(const Interface & interface,const int & id,const Response & response)5624 void Model::asynch_eval_store(const Interface &interface, const int &id, const Response &response) {
5625   evaluationsDB.store_interface_response(modelId, interface.interface_id(), id, response);
5626 }
5627 
5628 /// Return the interface flag for the EvaluationsDB state
evaluations_db_state(const Interface & interface)5629 EvaluationsDBState Model::evaluations_db_state(const Interface &interface) {
5630   return interfEvaluationsDBState;
5631 }
5632   /// Return the model flag for the EvaluationsDB state
evaluations_db_state(const Model & model)5633 EvaluationsDBState Model::evaluations_db_state(const Model &model) {
5634   // always return INACTIVE because models don't store evaluations of their
5635   // submodels
5636   return EvaluationsDBState::INACTIVE;
5637 }
5638 
5639 /** Rationale: The parser allows multiple user-specified models with
5640     empty (unspecified) ID. However, only a single Model with empty
5641     ID can be constructed (if it's the only one present, or the "last
5642     one parsed"). Therefore decided to prefer NO_MODEL_ID over
5643     NO_MODEL_ID_<num> for (some) consistency with interface
5644     NO_ID convention. _MODEL_ was inserted in the middle to distinguish
5645     "anonymous" MODELS from methods and interfaces in the hdf5 output.
5646     Note that this function is not used to name recast models; see their
5647     constructors for how its done. */
user_auto_id()5648 String Model::user_auto_id()
5649 {
5650   // // increment and then use the current ID value
5651   // return String("NO_ID_") + std::to_string(++userAutoIdNum);
5652   return String("NO_MODEL_ID");
5653 }
5654 
5655 /** Rationale: For now NOSPEC_MODEL_ID_ is chosen due to historical
5656     id="NO_SPECIFICATION" used for internally-constructed
5657     Models. Longer-term, consider auto-generating an ID that
5658     includes the context from which the method is constructed, e.g.,
5659     the parent method or model's ID, together with its name.
5660     Note that this function is not used to name recast models; see
5661     their constructors for how its done.
5662 **/
no_spec_id()5663 String Model::no_spec_id()
5664 {
5665   // increment and then use the current ID value
5666   return String("NOSPEC_MODEL_ID_") + std::to_string(++noSpecIdNum);
5667 }
5668 
5669 // This is overridden by RecastModel so that it and its derived classes return
5670 // the root_model_id() of their subModels. The base Model class version terminates
5671 // the "recursion" for models of other types.
root_model_id()5672 String Model::root_model_id() {
5673   if(modelRep) return modelRep->root_model_id();
5674   else return modelId;
5675 }
5676 
5677 } // namespace Dakota
5678