1 /* _______________________________________________________________________
2
3 DAKOTA: Design Analysis Kit for Optimization and Terascale Applications
4 Copyright 2014-2020 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
5 This software is distributed under the GNU Lesser General Public License.
6 For more information, see the README file in the top Dakota directory.
7 _______________________________________________________________________ */
8
9 //- Class: HierarchSurrModel
10 //- Description: Implementation code for the HierarchSurrModel class
11 //- Owner: Mike Eldred
12 //- Checked by:
13
14 #include "HierarchSurrModel.hpp"
15 #include "ProblemDescDB.hpp"
16
17 static const char rcsId[]=
18 "@(#) $Id: HierarchSurrModel.cpp 6656 2010-02-26 05:20:48Z mseldre $";
19
20 namespace Dakota {
21
22 extern Model dummy_model; // defined in DakotaModel.cpp
23
24
HierarchSurrModel(ProblemDescDB & problem_db)25 HierarchSurrModel::HierarchSurrModel(ProblemDescDB& problem_db):
26 SurrogateModel(problem_db),
27 corrOrder(problem_db.get_short("model.surrogate.correction_order")),
28 correctionMode(SINGLE_CORRECTION)
29 {
30 // Hierarchical surrogate models pass through numerical derivatives
31 supportsEstimDerivs = false;
32 // initialize ignoreBounds even though it's irrelevant for pass through
33 ignoreBounds = problem_db.get_bool("responses.ignore_bounds");
34 // initialize centralHess even though it's irrelevant for pass through
35 centralHess = problem_db.get_bool("responses.central_hess");
36
37 const StringArray& ordered_model_ptrs
38 = problem_db.get_sa("model.surrogate.ordered_model_pointers");
39
40 size_t i, num_models = ordered_model_ptrs.size(),
41 model_index = problem_db.get_db_model_node(); // for restoration
42
43 const std::pair<short,short>& cv_view = currentVariables.view();
44 orderedModels.resize(num_models);
45 for (i=0; i<num_models; ++i) {
46 problem_db.set_db_model_nodes(ordered_model_ptrs[i]);
47 orderedModels[i] = problem_db.get_model();
48 check_submodel_compatibility(orderedModels[i]);
49 //if (cv_view != orderedModels[i].current_variables().view()) {
50 // Cerr << "Error: variable views in hierarchical models must be "
51 // << "identical." << std::endl;
52 // abort_handler(-1);
53 //}
54 }
55
56 problem_db.set_db_model_nodes(model_index); // restore
57
58 // default index values, to be overridden at run time
59 surrModelKey.assign(3, 0); truthModelKey.assign(3, 0);
60 if (num_models == 1) // first and last solution level (1 model)
61 truthModelKey[2] = orderedModels[0].solution_levels() - 1;
62 else { // first and last model form (solution levels ignored)
63 truthModelKey[1] = num_models - 1;
64 surrModelKey[2] = truthModelKey[2] = USHRT_MAX;
65 }
66 Pecos::DiscrepancyCalculator::
67 aggregate_keys(truthModelKey, surrModelKey, activeKey);
68 check_model_interface_instance();
69
70 // Correction is required in HierarchSurrModel for some responseModes.
71 // Enforcement of a correction type for these modes occurs in
72 // surrogate_response_mode(short).
73 switch (responseMode) {
74 case MODEL_DISCREPANCY: case AUTO_CORRECTED_SURROGATE:
75 if (corrType) // initialize DiscrepancyCorrection using initial keys
76 deltaCorr[activeKey].initialize(surrogate_model(), surrogateFnIndices,
77 corrType, corrOrder);
78 break;
79 }
80 //truthResponseRef[truthModelKey] = currentResponse.copy();
81 }
82
83
check_submodel_compatibility(const Model & sub_model)84 void HierarchSurrModel::check_submodel_compatibility(const Model& sub_model)
85 {
86 SurrogateModel::check_submodel_compatibility(sub_model);
87
88 bool error_flag = false;
89 // Check for compatible array sizing between sub_model and currentResponse.
90 // HierarchSurrModel creates aggregations and DataFitSurrModel consumes them.
91 // For now, allow either a factor of 2 or 1 from aggregation or not. In the
92 // future, aggregations may span a broader model hierarchy (e.g., factor =
93 // orderedModels.size()). In general, the fn count check needs to be
94 // specialized in the derived classes.
95 size_t sm_qoi = sub_model.qoi();//, aggregation = numFns / sm_qoi;
96 if ( numFns % sm_qoi ) { //|| aggregation < 1 || aggregation > 2 ) {
97 Cerr << "Error: incompatibility between approximate and actual model "
98 << "response function sets\n within HierarchSurrModel: "<< numFns
99 << " approximate and " << sm_qoi << " actual functions.\n "
100 << "Check consistency of responses specifications." << std::endl;
101 error_flag = true;
102 }
103
104 // TO DO: Bayes exp design (hi2lo) introduces new requirements on a
105 // hierarchical model, and MF active subspaces will as well.
106 // > For (simulation-based) OED, one option is to enforce consistency in
107 // inactive state (config vars) and allow active parameterization to vary.
108 // > For hi2lo, this implies that the active variable subset could be null
109 // for HF, as the active calibration variables only exist for LF.
110 size_t sm_icv = sub_model.icv(), sm_idiv = sub_model.idiv(),
111 sm_idsv = sub_model.idsv(), sm_idrv = sub_model.idrv(),
112 icv = currentVariables.icv(), idiv = currentVariables.idiv(),
113 idsv = currentVariables.idsv(), idrv = currentVariables.idrv();
114 if (sm_icv != icv || sm_idiv != idiv || sm_idsv != idsv || sm_idrv != idrv) {
115 Cerr << "Error: incompatibility between approximate and actual model "
116 << "variable sets within\n HierarchSurrModel: inactive "
117 << "approximate = " << icv << " continuous, " << idiv
118 << " discrete int, " << idsv << " discrete string, and " << idrv
119 << " discrete real and\n inactive actual = " << sm_icv
120 << " continuous, " << sm_idiv << " discrete int, " << sm_idsv
121 << " discrete string, and " << sm_idrv << " discrete real. Check "
122 << "consistency of variables specifications." << std::endl;
123 error_flag = true;
124 }
125
126 if (error_flag)
127 abort_handler(-1);
128 }
129
130
131 void HierarchSurrModel::
derived_init_communicators(ParLevLIter pl_iter,int max_eval_concurrency,bool recurse_flag)132 derived_init_communicators(ParLevLIter pl_iter, int max_eval_concurrency,
133 bool recurse_flag)
134 {
135 // responseMode is a run-time setting (in SBLMinimizer, it is switched among
136 // AUTO_CORRECTED_SURROGATE, BYPASS_SURROGATE, and UNCORRECTED_SURROGATE;
137 // in NonDExpansion, it is switching between MODEL_DISCREPANCY and
138 // UNCORRECTED_SURROGATE). Since it is neither static nor generally
139 // available at construct/init time, take a conservative approach with init
140 // and free and a more aggressive approach with set.
141
142 if (recurse_flag) {
143 size_t i, model_index = probDescDB.get_db_model_node(), // for restoration
144 num_models = orderedModels.size();
145
146 // init and free must cover possible subset of active responseModes and
147 // ordered model fidelities, but only 2 models at mpst will be active at
148 // runtime. In order to reduce the number of parallel configurations, we
149 // group the responseModes into two sets: (1) the correction-based set
150 // commonly used in surrogate-based optimization et al., and (2) the
151 // aggregation-based set commonly used in multilevel/multifidelity UQ.
152
153 // TO DO: would like a better detection option, but passing the mode from
154 // the Iterator does not work in parallel w/o an additional bcast (Iterator
155 // only instantiated on iteratorComm rank 0). For now, we will infer it
156 // from an associated method spec at init time.
157 // Note: responseMode gets bcast at run time in component_parallel_mode()
158 bool extra_deriv_config
159 = (probDescDB.get_ushort("method.algorithm") & MINIMIZER_BIT);
160 //(responseMode == UNCORRECTED_SURROGATE ||
161 // responseMode == BYPASS_SURROGATE ||
162 // responseMode == AUTO_CORRECTED_SURROGATE);
163
164 for (i=0; i<num_models; ++i) {
165 Model& model_i = orderedModels[i];
166 // superset of possible init calls (two configurations for i > 0)
167 probDescDB.set_db_model_nodes(model_i.model_id());
168 model_i.init_communicators(pl_iter, max_eval_concurrency);
169 if (extra_deriv_config) // && i) // mid and high fidelity only?
170 model_i.init_communicators(pl_iter, model_i.derivative_concurrency());
171 }
172
173
174 /* This version inits only two models
175 Model& lf_model = surrogate_model();
176 Model& hf_model = truth_model();
177
178 // superset of possible init calls (two configurations for HF)
179 probDescDB.set_db_model_nodes(lf_model.model_id());
180 lf_model.init_communicators(pl_iter, max_eval_concurrency);
181
182 probDescDB.set_db_model_nodes(hf_model.model_id());
183 hf_model.init_communicators(pl_iter, hf_model.derivative_concurrency());
184 hf_model.init_communicators(pl_iter, max_eval_concurrency);
185 */
186
187
188 /* This version does not support runtime updating of responseMode
189 switch (responseMode) {
190 case UNCORRECTED_SURROGATE:
191 // LF are used in iterator evals
192 lf_model.init_communicators(pl_iter, max_eval_concurrency);
193 break;
194 case AUTO_CORRECTED_SURROGATE:
195 // LF are used in iterator evals
196 lf_model.init_communicators(pl_iter, max_eval_concurrency);
197 // HF evals are for correction and validation:
198 // concurrency = one eval at a time * derivative concurrency per eval
199 hf_model.init_communicators(pl_iter, hf_model.derivative_concurrency());
200 break;
201 case BYPASS_SURROGATE:
202 // HF are used in iterator evals
203 hf_model.init_communicators(pl_iter, max_eval_concurrency);
204 break;
205 case MODEL_DISCREPANCY: case AGGREGATED_MODELS:
206 // LF and HF are used in iterator evals
207 lf_model.init_communicators(pl_iter, max_eval_concurrency);
208 hf_model.init_communicators(pl_iter, max_eval_concurrency);
209 break;
210 }
211 */
212
213 probDescDB.set_db_model_nodes(model_index); // restore all model nodes
214 }
215 }
216
217
218 void HierarchSurrModel::
derived_set_communicators(ParLevLIter pl_iter,int max_eval_concurrency,bool recurse_flag)219 derived_set_communicators(ParLevLIter pl_iter, int max_eval_concurrency,
220 bool recurse_flag)
221 {
222 miPLIndex = modelPCIter->mi_parallel_level_index(pl_iter);// run time setting
223
224 // HierarchSurrModels do not utilize default set_ie_asynchronous_mode() as
225 // they do not define the ie_parallel_level
226
227 // This aggressive logic is appropriate for invocations of the Model via
228 // Iterator::run(), but is fragile w.r.t. invocations of the Model outside
229 // this scope (e.g., Model::evaluate() within SBLMinimizer). The default
230 // responseMode value is {AUTO_,UN}CORRECTED_SURROGATE, which mitigates
231 // the specific case of SBLMinimizer, but the general fragility remains.
232 if (recurse_flag) {
233
234 // bcast not needed for recurse_flag=false in serve_run call to set_comms
235 //if (pl_iter->server_communicator_size() > 1)
236 // parallelLib.bcast(responseMode, *pl_iter);
237
238 switch (responseMode) {
239
240 // CASES WITH A SINGLE ACTIVE MODEL:
241
242 case UNCORRECTED_SURROGATE: {
243 Model& lf_model = surrogate_model();
244 lf_model.set_communicators(pl_iter, max_eval_concurrency);
245 asynchEvalFlag = lf_model.asynch_flag();
246 evaluationCapacity = lf_model.evaluation_capacity();
247 break;
248 }
249 case BYPASS_SURROGATE: {
250 Model& hf_model = truth_model();
251 hf_model.set_communicators(pl_iter, max_eval_concurrency);
252 asynchEvalFlag = hf_model.asynch_flag();
253 evaluationCapacity = hf_model.evaluation_capacity();
254 break;
255 }
256
257 // CASES WHERE ANY/ALL MODELS COULD BE ACTIVE:
258
259 case AUTO_CORRECTED_SURROGATE: {
260 // Lowest fidelity model is interfaced with minimizer:
261 Model& model_0 = orderedModels[0];
262 model_0.set_communicators(pl_iter, max_eval_concurrency);
263 asynchEvalFlag = model_0.asynch_flag();
264 evaluationCapacity = model_0.evaluation_capacity();
265
266 // TO DO: this will not be true for multigrid optimization:
267 bool use_deriv_conc = true; // only verifications/corrections
268 // Either need detection logic, a passed option, or to abandon the
269 // specialization and just generalize init/set/free to use the max
270 // of the two values...
271
272 // Loop over all higher fidelity models:
273 size_t i, num_models = orderedModels.size(); int cap_i;
274 for (i=1; i<num_models; ++i) {
275 Model& model_i = orderedModels[i];
276 if (use_deriv_conc) {
277 int deriv_conc_i = model_i.derivative_concurrency();
278 model_i.set_communicators(pl_iter, deriv_conc_i);
279 if (deriv_conc_i > 1 && model_i.asynch_flag()) asynchEvalFlag = true;
280 }
281 else {
282 model_i.set_communicators(pl_iter, max_eval_concurrency);
283 if (model_i.asynch_flag()) asynchEvalFlag = true;
284 }
285 cap_i = model_i.evaluation_capacity();
286 if (cap_i > evaluationCapacity) evaluationCapacity = cap_i;
287 }
288 break;
289 }
290 case MODEL_DISCREPANCY: case AGGREGATED_MODELS: {
291 size_t i, num_models = orderedModels.size(); int cap_i;
292 asynchEvalFlag = false; evaluationCapacity = 1;
293 for (i=0; i<num_models; ++i) {
294 Model& model_i = orderedModels[i];
295 model_i.set_communicators(pl_iter, max_eval_concurrency);
296 if (model_i.asynch_flag()) asynchEvalFlag = true;
297 cap_i = model_i.evaluation_capacity();
298 if (cap_i > evaluationCapacity) evaluationCapacity = cap_i;
299 }
300 break;
301 }
302 }
303 }
304 }
305
306
307 void HierarchSurrModel::
derived_free_communicators(ParLevLIter pl_iter,int max_eval_concurrency,bool recurse_flag)308 derived_free_communicators(ParLevLIter pl_iter, int max_eval_concurrency,
309 bool recurse_flag)
310 {
311 if (recurse_flag) {
312
313 size_t i, num_models = orderedModels.size();
314 bool extra_deriv_config = true;//(responseMode == UNCORRECTED_SURROGATE ||
315 // responseMode == BYPASS_SURROGATE ||
316 // responseMode == AUTO_CORRECTED_SURROGATE);
317 for (i=0; i<num_models; ++i) {
318 Model& model_i = orderedModels[i];
319 // superset of possible init calls (two configurations for i > 0)
320 model_i.free_communicators(pl_iter, max_eval_concurrency);
321 if (extra_deriv_config) // && i) // mid and high fidelity only?
322 model_i.free_communicators(pl_iter, model_i.derivative_concurrency());
323 }
324
325
326 /* This version frees only two models:
327 // superset of possible free calls (two configurations for HF)
328 surrogate_model().free_communicators(pl_iter, max_eval_concurrency);
329 Model& hf_model = truth_model();
330 hf_model.free_communicators(pl_iter, hf_model.derivative_concurrency());
331 hf_model.free_communicators(pl_iter, max_eval_concurrency);
332 */
333
334
335 /* This version does not support runtime updating of responseMode:
336 switch (responseMode) {
337 case UNCORRECTED_SURROGATE:
338 lf_model.free_communicators(pl_iter, max_eval_concurrency);
339 break;
340 case AUTO_CORRECTED_SURROGATE:
341 lf_model.free_communicators(pl_iter, max_eval_concurrency);
342 hf_model.free_communicators(pl_iter, hf_model.derivative_concurrency());
343 break;
344 case BYPASS_SURROGATE:
345 hf_model.free_communicators(pl_iter, max_eval_concurrency);
346 break;
347 case MODEL_DISCREPANCY: case AGGREGATED_MODELS:
348 lf_model.free_communicators(pl_iter, max_eval_concurrency);
349 hf_model.free_communicators(pl_iter, max_eval_concurrency);
350 break;
351 }
352 */
353 }
354 }
355
356
357 /** Inactive variables must be propagated when a HierarchSurrModel
358 is employed by a sub-iterator (e.g., OUU with MLMC or MLPCE).
359 In current use cases, this can occur once per sub-iterator
360 execution within Model::initialize_mapping(). */
initialize_mapping(ParLevLIter pl_iter)361 bool HierarchSurrModel::initialize_mapping(ParLevLIter pl_iter)
362 {
363 Model::initialize_mapping(pl_iter);
364
365 // push inactive variable values/bounds from currentVariables and
366 // userDefinedConstraints into orderedModels
367 size_t i, num_models = orderedModels.size();
368 for (i=0; i<num_models; ++i) {
369 orderedModels[i].initialize_mapping(pl_iter);
370 init_model(orderedModels[i]);
371 }
372
373 return false; // no change to problem size
374 }
375
376
377 /** Inactive variables must be propagated when a HierarchSurrModel
378 is employed by a sub-iterator (e.g., OUU with MLMC or MLPCE).
379 In current use cases, this can occur once per sub-iterator
380 execution within Model::initialize_mapping(). */
finalize_mapping()381 bool HierarchSurrModel::finalize_mapping()
382 {
383 size_t i, num_models = orderedModels.size();
384 for (i=0; i<num_models; ++i)
385 orderedModels[i].finalize_mapping();
386
387 Model::finalize_mapping();
388
389 return false; // no change to problem size
390 }
391
392
build_approximation()393 void HierarchSurrModel::build_approximation()
394 {
395 Cout << "\n>>>>> Building hierarchical approximation.\n";
396
397 // perform the eval for the low fidelity model
398 // NOTE: For SBO, the low fidelity eval is performed externally and its
399 // response is passed into compute_correction.
400 // -->> move LF model out and restructure if(!approxBuilds)
401 //ActiveSet temp_set = lf_model.current_response().active_set();
402 //temp_set.request_values(1);
403 //if (sameModelInstance)
404 // lf_model.solution_level_index(surrogate_level_index());
405 //lf_model.evaluate(temp_set);
406 //const Response& lo_fi_response = lf_model.current_response();
407
408 Model& hf_model = truth_model();
409 if (hierarchicalTagging) {
410 String eval_tag = evalTagPrefix + '.' + std::to_string(surrModelEvalCntr+1);
411 hf_model.eval_tag_prefix(eval_tag);
412 }
413
414 // set HierarchSurrModel parallelism mode to HF model
415 component_parallel_mode(TRUTH_MODEL_MODE);
416
417 // update HF model with current variable values/bounds/labels
418 update_model(hf_model);
419
420 // store inactive variable values for use in determining whether an
421 // automatic rebuild of an approximation is required
422 // (reference{C,D}{L,U}Bnds are not needed in the hierarchical case)
423 const Variables& hf_vars = hf_model.current_variables();
424 copy_data(hf_vars.inactive_continuous_variables(), referenceICVars);
425 copy_data(hf_vars.inactive_discrete_int_variables(), referenceIDIVars);
426 referenceIDSVars = hf_vars.inactive_discrete_string_variables();
427 copy_data(hf_vars.inactive_discrete_real_variables(), referenceIDRVars);
428
429 // compute the response for the high fidelity model
430 ShortArray total_asv, hf_asv, lf_asv;
431 std::map<UShortArray, DiscrepancyCorrection>::iterator dc_it
432 = deltaCorr.find(activeKey);
433 if (dc_it!=deltaCorr.end() && dc_it->second.initialized())
434 total_asv.assign(numFns, dc_it->second.data_order());
435 else
436 total_asv.assign(numFns, 1); // default: values only if no deriv correction
437 asv_split(total_asv, hf_asv, lf_asv, true);
438
439 if ( truthResponseRef.find(truthModelKey) == truthResponseRef.end() )
440 truthResponseRef[truthModelKey] = currentResponse.copy();
441 ActiveSet hf_set = currentResponse.active_set(); // copy
442 hf_set.request_vector(hf_asv);
443 if (sameModelInstance)
444 hf_model.solution_level_index(truth_level_index());
445 hf_model.evaluate(hf_set);
446 truthResponseRef[truthModelKey].update(hf_model.current_response());
447
448 // could compute the correction to LF model here, but rely on an external
449 // call for consistency with DataFitSurr and to facilitate SBO logic. In
450 // particular, lo_fi_response involves find_center(), hard conv check, etc.
451 //deltaCorr[activeKey].compute(..., truthResponseRef, lo_fi_response);
452
453 Cout << "\n<<<<< Hierarchical approximation build completed.\n";
454 ++approxBuilds;
455 }
456
457
458 /*
459 bool HierarchSurrModel::
460 build_approximation(const RealVector& c_vars, const Response& response)
461 {
462 // NOTE: this fn not currently used by SBO, but it could be.
463
464 // Verify data content in incoming response
465 const ShortArray& asrv = response.active_set_request_vector();
466 bool data_complete = true; short corr_order = dataCorr.correction_order();
467 for (size_t i=0; i<numFns; i++)
468 if ( ( corr_order == 2 && (asrv[i] & 7) != 7 ) ||
469 ( corr_order == 1 && (asrv[i] & 3) != 3 ) ||
470 ( corr_order == 0 && !(asrv[i] & 1) ) )
471 data_complete = false;
472 if (data_complete) {
473 Cout << "\n>>>>> Updating hierarchical approximation.\n";
474
475 // are these updates necessary?
476 Model& hf_model = truth_model();
477 currentVariables.continuous_variables(c_vars);
478 update_model(hf_model);
479 const Variables& hf_vars = hf_model.current_variables();
480 copy_data(hf_vars.inactive_continuous_variables(), referenceICVars);
481 copy_data(hf_vars.inactive_discrete_variables(), referenceIDVars);
482
483 truthResponseRef.update(response);
484
485 Cout << "\n<<<<< Hierarchical approximation update completed.\n";
486 }
487 else {
488 Cerr << "Warning: cannot use anchor point in HierarchSurrModel::"
489 << "build_approximation(RealVector&, Response&).\n";
490 currentVariables.continuous_variables(c_vars);
491 build_approximation();
492 }
493 return false; // correction is not embedded and must be computed (by SBO)
494 }
495 */
496
497
498 /** Compute the response synchronously using LF model, HF model, or
499 both (mixed case). For the LF model portion, compute the high
500 fidelity response if needed with build_approximation(), and, if
501 correction is active, correct the low fidelity results. */
derived_evaluate(const ActiveSet & set)502 void HierarchSurrModel::derived_evaluate(const ActiveSet& set)
503 {
504 ++surrModelEvalCntr;
505
506 // define LF/HF evaluation requirements
507 ShortArray hi_fi_asv, lo_fi_asv;
508 bool hi_fi_eval, lo_fi_eval, mixed_eval;
509 Response lo_fi_response, hi_fi_response; // don't use truthResponseRef
510 switch (responseMode) {
511 case UNCORRECTED_SURROGATE: case AUTO_CORRECTED_SURROGATE:
512 case AGGREGATED_MODELS:
513 asv_split(set.request_vector(), hi_fi_asv, lo_fi_asv, false);
514 hi_fi_eval = !hi_fi_asv.empty(); lo_fi_eval = !lo_fi_asv.empty();
515 mixed_eval = (hi_fi_eval && lo_fi_eval); break;
516 case BYPASS_SURROGATE:
517 hi_fi_eval = true; lo_fi_eval = mixed_eval = false; break;
518 case MODEL_DISCREPANCY:
519 hi_fi_eval = lo_fi_eval = mixed_eval = true; break;
520 }
521
522 Model& hf_model = (hi_fi_eval) ? truth_model() : dummy_model;
523 Model& lf_model = (lo_fi_eval) ? surrogate_model() : dummy_model;
524 Model& same_model = (hi_fi_eval) ? hf_model : lf_model;
525 if (hierarchicalTagging) {
526 String eval_tag = evalTagPrefix + '.' + std::to_string(surrModelEvalCntr+1);
527 if (sameModelInstance)
528 same_model.eval_tag_prefix(eval_tag);
529 else {
530 if (hi_fi_eval) hf_model.eval_tag_prefix(eval_tag);
531 if (lo_fi_eval) lf_model.eval_tag_prefix(eval_tag);
532 }
533 }
534
535 if (sameModelInstance) update_model(same_model);
536
537 // Notes on repetitive setting of model.solution_level_index():
538 // > when LF & HF are the same model, then setting the index for low or high
539 // invalidates the other fidelity definition.
540 // > within a single derived_evaluate(), could protect these updates with
541 // "if (sameModelInstance && mixed_eval)", but this does not guard against
542 // changes in eval requirements from the previous evaluation. Detecting
543 // the current solution index state is currently as expensive as resetting
544 // it, so just reset each time.
545
546 // ------------------------------
547 // Compute high fidelity response
548 // ------------------------------
549 if (hi_fi_eval) {
550 component_parallel_mode(TRUTH_MODEL_MODE); // TO DO: sameModelInstance
551 if (sameModelInstance)
552 hf_model.solution_level_index(truth_level_index());
553 else
554 update_model(hf_model);
555 switch (responseMode) {
556 case UNCORRECTED_SURROGATE: case AUTO_CORRECTED_SURROGATE:
557 case AGGREGATED_MODELS: {
558 ActiveSet hi_fi_set(hi_fi_asv, set.derivative_vector());
559 hf_model.evaluate(hi_fi_set);
560 if (mixed_eval)
561 hi_fi_response = (sameModelInstance) ? // deep copy or shared rep
562 hf_model.current_response().copy() : hf_model.current_response();
563 else {
564 currentResponse.active_set(hi_fi_set);
565 currentResponse.update(hf_model.current_response());
566 }
567 break;
568 }
569 case BYPASS_SURROGATE:
570 hf_model.evaluate(set);
571 currentResponse.active_set(set);
572 currentResponse.update(hf_model.current_response());
573 break;
574 case MODEL_DISCREPANCY:
575 hf_model.evaluate(set);
576 hi_fi_response = (sameModelInstance) ? hf_model.current_response().copy()
577 : hf_model.current_response(); // shared rep
578 break;
579 }
580 }
581
582 // -----------------------------
583 // Compute low fidelity response
584 // -----------------------------
585 if (lo_fi_eval) {
586 // pre-process
587 switch (responseMode) {
588 case AUTO_CORRECTED_SURROGATE:
589 // if build_approximation has not yet been called, call it now
590 if (!approxBuilds || force_rebuild())
591 build_approximation();
592 break;
593 }
594 // compute the LF response
595 component_parallel_mode(SURROGATE_MODEL_MODE); // TO DO: sameModelInstance
596 if (sameModelInstance)
597 lf_model.solution_level_index(surrogate_level_index());
598 else
599 update_model(lf_model);
600 ActiveSet lo_fi_set;
601 switch (responseMode) {
602 case UNCORRECTED_SURROGATE: case AUTO_CORRECTED_SURROGATE:
603 case AGGREGATED_MODELS:
604 lo_fi_set.request_vector(lo_fi_asv);
605 lo_fi_set.derivative_vector(set.derivative_vector());
606 lf_model.evaluate(lo_fi_set);
607 break;
608 case MODEL_DISCREPANCY:
609 lf_model.evaluate(set);
610 break;
611 }
612
613 // post-process
614 switch (responseMode) {
615 case AUTO_CORRECTED_SURROGATE: {
616 // LF resp should not be corrected directly (see derived_synchronize())
617 lo_fi_response = lf_model.current_response().copy();
618 recursive_apply(currentVariables, lo_fi_response);
619 if (!mixed_eval) {
620 currentResponse.active_set(lo_fi_set);
621 currentResponse.update(lo_fi_response);
622 }
623 break;
624 }
625 case UNCORRECTED_SURROGATE:
626 if (mixed_eval)
627 lo_fi_response = lf_model.current_response(); // shared rep
628 else {
629 currentResponse.active_set(lo_fi_set);
630 currentResponse.update(lf_model.current_response());
631 }
632 break;
633 }
634 }
635
636 // ------------------------------
637 // perform any LF/HF aggregations
638 // ------------------------------
639 switch (responseMode) {
640 case MODEL_DISCREPANCY: {
641 // don't update surrogate data within deltaCorr[key]'s Approximations;
642 // just update currentResponse (managed as surrogate data at a higher level)
643 bool quiet_flag = (outputLevel < NORMAL_OUTPUT);
644 currentResponse.active_set(set);
645 deltaCorr[activeKey].compute(hi_fi_response, lf_model.current_response(),
646 currentResponse, quiet_flag);
647 break;
648 }
649 case AGGREGATED_MODELS:
650 aggregate_response(hi_fi_response, lf_model.current_response(),
651 currentResponse);
652 break;
653 case UNCORRECTED_SURROGATE: case AUTO_CORRECTED_SURROGATE:
654 if (mixed_eval) {
655 currentResponse.active_set(set);
656 response_combine(hi_fi_response, lo_fi_response, currentResponse);
657 }
658 break;
659 }
660 }
661
662
663 /** Compute the response asynchronously using LF model, HF model, or
664 both (mixed case). For the LF model portion, compute the high
665 fidelity response with build_approximation() (for correcting the
666 low fidelity results in derived_synchronize() and
667 derived_synchronize_nowait()) if not performed previously. */
derived_evaluate_nowait(const ActiveSet & set)668 void HierarchSurrModel::derived_evaluate_nowait(const ActiveSet& set)
669 {
670 ++surrModelEvalCntr;
671
672 ShortArray hi_fi_asv, lo_fi_asv; bool hi_fi_eval, lo_fi_eval;
673 switch (responseMode) {
674 case UNCORRECTED_SURROGATE: case AUTO_CORRECTED_SURROGATE:
675 case AGGREGATED_MODELS:
676 asv_split(set.request_vector(), hi_fi_asv, lo_fi_asv, false);
677 hi_fi_eval = !hi_fi_asv.empty(); lo_fi_eval = !lo_fi_asv.empty(); break;
678 case BYPASS_SURROGATE:
679 hi_fi_eval = true; lo_fi_eval = false; break;
680 case MODEL_DISCREPANCY:
681 hi_fi_eval = lo_fi_eval = true; break;
682 }
683
684 Model& hf_model = (hi_fi_eval) ? truth_model() : dummy_model;
685 Model& lf_model = (lo_fi_eval) ? surrogate_model() : dummy_model;
686 Model& same_model = (hi_fi_eval) ? hf_model : lf_model;
687 bool asynch_hi_fi = (hi_fi_eval) ? hf_model.asynch_flag() : false,
688 asynch_lo_fi = (lo_fi_eval) ? lf_model.asynch_flag() : false;
689
690 if (hierarchicalTagging) {
691 String eval_tag = evalTagPrefix + '.' + std::to_string(surrModelEvalCntr+1);
692 if (sameModelInstance)
693 same_model.eval_tag_prefix(eval_tag);
694 else {
695 if (hi_fi_eval) hf_model.eval_tag_prefix(eval_tag);
696 if (lo_fi_eval) lf_model.eval_tag_prefix(eval_tag);
697 }
698 }
699
700 if (sameModelInstance) update_model(same_model);
701
702 // perform Model updates and define active sets for LF and HF evaluations
703 ActiveSet hi_fi_set, lo_fi_set;
704 if (hi_fi_eval) {
705 // update HF model
706 if (!sameModelInstance) update_model(hf_model);
707 // update hi_fi_set
708 hi_fi_set.derivative_vector(set.derivative_vector());
709 switch (responseMode) {
710 case UNCORRECTED_SURROGATE: case AUTO_CORRECTED_SURROGATE:
711 case AGGREGATED_MODELS:
712 hi_fi_set.request_vector(hi_fi_asv); break;
713 case BYPASS_SURROGATE: case MODEL_DISCREPANCY:
714 hi_fi_set.request_vector(set.request_vector()); break;
715 }
716 }
717 if (lo_fi_eval) {
718 // if build_approximation has not yet been called, call it now
719 if ( responseMode == AUTO_CORRECTED_SURROGATE &&
720 ( !approxBuilds || force_rebuild() ) )
721 build_approximation();
722 // update LF model
723 if (!sameModelInstance) update_model(lf_model);
724 // update lo_fi_set
725 lo_fi_set.derivative_vector(set.derivative_vector());
726 switch (responseMode) {
727 case UNCORRECTED_SURROGATE: case AUTO_CORRECTED_SURROGATE:
728 case AGGREGATED_MODELS:
729 lo_fi_set.request_vector(lo_fi_asv); break;
730 case MODEL_DISCREPANCY:
731 lo_fi_set.request_vector(set.request_vector()); break;
732 }
733 }
734
735 // HierarchSurrModel's asynchEvalFlag is set if _either_ LF or HF is
736 // asynchronous, resulting in use of derived_evaluate_nowait().
737 // To manage general case of mixed asynch, launch nonblocking evals first,
738 // followed by blocking evals.
739
740 // For notes on repetitive setting of model.solution_level_index(), see
741 // derived_evaluate() above.
742
743 // launch nonblocking evals before any blocking ones
744 if (hi_fi_eval && asynch_hi_fi) { // HF model may be executed asynchronously
745 // don't need to set component parallel mode since only queues the job
746 if (sameModelInstance)
747 hf_model.solution_level_index(truth_level_index());
748 hf_model.evaluate_nowait(hi_fi_set);
749 // store map from HF eval id to HierarchSurrModel id
750 truthIdMap[hf_model.evaluation_id()] = surrModelEvalCntr;
751 }
752 if (lo_fi_eval && asynch_lo_fi) { // LF model may be executed asynchronously
753 // don't need to set component parallel mode since only queues the job
754 if (sameModelInstance)
755 lf_model.solution_level_index(surrogate_level_index());
756 lf_model.evaluate_nowait(lo_fi_set);
757 // store map from LF eval id to HierarchSurrModel id
758 surrIdMap[lf_model.evaluation_id()] = surrModelEvalCntr;
759 // store variables set needed for correction
760 if (responseMode == AUTO_CORRECTED_SURROGATE)
761 rawVarsMap[surrModelEvalCntr] = currentVariables.copy();
762 }
763
764 // now launch any blocking evals
765 if (hi_fi_eval && !asynch_hi_fi) { // execute HF synchronously & cache resp
766 component_parallel_mode(TRUTH_MODEL_MODE);
767 if (sameModelInstance)
768 hf_model.solution_level_index(truth_level_index());
769 hf_model.evaluate(hi_fi_set);
770 // not part of rekey_synch(); can rekey to surrModelEvalCntr immediately
771 cachedTruthRespMap[surrModelEvalCntr] = hf_model.current_response().copy();
772 }
773 if (lo_fi_eval && !asynch_lo_fi) { // execute LF synchronously & cache resp
774 component_parallel_mode(SURROGATE_MODEL_MODE);
775 if (sameModelInstance)
776 lf_model.solution_level_index(surrogate_level_index());
777 lf_model.evaluate(lo_fi_set);
778 Response lo_fi_response(lf_model.current_response().copy());
779 // correct LF response prior to caching
780 if (responseMode == AUTO_CORRECTED_SURROGATE)
781 // correct synch cases now (asynch cases get corrected in
782 // derived_synchronize_aggregate*)
783 recursive_apply(currentVariables, lo_fi_response);
784 // cache corrected LF response for retrieval during synchronization.
785 // not part of rekey_synch(); can rekey to surrModelEvalCntr immediately.
786 cachedApproxRespMap[surrModelEvalCntr] = lo_fi_response;// deep copied above
787 }
788 }
789
790
791 /** Blocking retrieval of asynchronous evaluations from LF model, HF
792 model, or both (mixed case). For the LF model portion, apply
793 correction (if active) to each response in the array.
794 derived_synchronize() is designed for the general case where
795 derived_evaluate_nowait() may be inconsistent in its use of low
796 fidelity evaluations, high fidelity evaluations, or both. */
derived_synchronize()797 const IntResponseMap& HierarchSurrModel::derived_synchronize()
798 {
799 surrResponseMap.clear();
800
801 if (sameModelInstance || sameInterfaceInstance ||
802 truthIdMap.empty() || surrIdMap.empty()) { // 1 queue: blocking synch
803 IntResponseMap hf_resp_map_rekey, lf_resp_map_rekey;
804 derived_synchronize_sequential(hf_resp_map_rekey, lf_resp_map_rekey, true);
805 derived_synchronize_combine(hf_resp_map_rekey, lf_resp_map_rekey,
806 surrResponseMap);
807 }
808 else // competing queues: nonblocking synch
809 derived_synchronize_competing();
810
811 return surrResponseMap;
812 }
813
814
815 /** Nonblocking retrieval of asynchronous evaluations from LF model,
816 HF model, or both (mixed case). For the LF model portion, apply
817 correction (if active) to each response in the map.
818 derived_synchronize_nowait() is designed for the general case
819 where derived_evaluate_nowait() may be inconsistent in its use of
820 actual evals, approx evals, or both. */
derived_synchronize_nowait()821 const IntResponseMap& HierarchSurrModel::derived_synchronize_nowait()
822 {
823 surrResponseMap.clear();
824
825 IntResponseMap hf_resp_map_rekey, lf_resp_map_rekey;
826 derived_synchronize_sequential(hf_resp_map_rekey, lf_resp_map_rekey, false);
827 derived_synchronize_combine_nowait(hf_resp_map_rekey, lf_resp_map_rekey,
828 surrResponseMap);
829
830 return surrResponseMap;
831 }
832
833
834 void HierarchSurrModel::
derived_synchronize_sequential(IntResponseMap & hf_resp_map_rekey,IntResponseMap & lf_resp_map_rekey,bool block)835 derived_synchronize_sequential(IntResponseMap& hf_resp_map_rekey,
836 IntResponseMap& lf_resp_map_rekey, bool block)
837 {
838 // --------------------------
839 // synchronize HF model evals
840 // --------------------------
841 IntRespMCIter r_cit;
842 if (!truthIdMap.empty()) { // synchronize HF evals
843 component_parallel_mode(TRUTH_MODEL_MODE);
844 rekey_synch(truth_model(), block, truthIdMap, hf_resp_map_rekey);
845 }
846 // add cached truth evals from:
847 // (a) recovered HF asynch evals that could not be returned since LF
848 // eval portions were still pending, or
849 // (b) synchronous HF evals performed within evaluate_nowait()
850 hf_resp_map_rekey.insert(cachedTruthRespMap.begin(),
851 cachedTruthRespMap.end());
852 cachedTruthRespMap.clear();
853
854 // --------------------------
855 // synchronize LF model evals
856 // --------------------------
857 if (!surrIdMap.empty()) { // synchronize LF evals
858 component_parallel_mode(SURROGATE_MODEL_MODE);
859 // Interface::rawResponseMap should _not_ be corrected directly since
860 // rawResponseMap, beforeSynchCorePRPQueue, and data_pairs all share a
861 // responseRep -> modifying rawResponseMap affects data_pairs.
862 bool deep_copy = (responseMode == AUTO_CORRECTED_SURROGATE);
863 rekey_synch(surrogate_model(), block, surrIdMap, lf_resp_map_rekey,
864 deep_copy);
865 }
866 // add cached approx evals from:
867 // (a) recovered LF asynch evals that could not be returned since HF
868 // eval portions were still pending, or
869 // (b) synchronous LF evals performed within evaluate_nowait()
870 lf_resp_map_rekey.insert(cachedApproxRespMap.begin(),
871 cachedApproxRespMap.end());
872 cachedApproxRespMap.clear();
873 }
874
875
derived_synchronize_competing()876 void HierarchSurrModel::derived_synchronize_competing()
877 {
878 // in this case, we don't want to starve either LF or HF scheduling by
879 // blocking on one or the other --> leverage derived_synchronize_nowait()
880 IntResponseMap aggregated_map; // accumulate surrResponseMap returns
881 while (!truthIdMap.empty() || !surrIdMap.empty()) {
882 // partial_map is a reference to surrResponseMap, returned by _nowait()
883 const IntResponseMap& partial_map = derived_synchronize_nowait();
884 if (!partial_map.empty())
885 aggregated_map.insert(partial_map.begin(), partial_map.end());
886 }
887
888 // Note: cached response maps and any LF/HF aggregations are managed
889 // within derived_synchronize_nowait()
890
891 std::swap(surrResponseMap, aggregated_map);
892 }
893
894
895 void HierarchSurrModel::
derived_synchronize_combine(const IntResponseMap & hf_resp_map,IntResponseMap & lf_resp_map,IntResponseMap & combined_resp_map)896 derived_synchronize_combine(const IntResponseMap& hf_resp_map,
897 IntResponseMap& lf_resp_map,
898 IntResponseMap& combined_resp_map)
899 {
900 // ------------------------------
901 // perform any LF/HF aggregations
902 // ------------------------------
903 // {hf,lf}_resp_map may be partial sets (partial surrogateFnIndices
904 // in {UN,AUTO_}CORRECTED_SURROGATE) or full sets (MODEL_DISCREPANCY,
905 // AGGREGATED_MODELS).
906
907 IntRespMCIter hf_cit = hf_resp_map.begin(), lf_cit = lf_resp_map.begin();
908 bool quiet_flag = (outputLevel < NORMAL_OUTPUT);
909 switch (responseMode) {
910 case MODEL_DISCREPANCY: {
911 DiscrepancyCorrection& delta_corr = deltaCorr[activeKey];
912 for (; hf_cit != hf_resp_map.end() && lf_cit != lf_resp_map.end();
913 ++hf_cit, ++lf_cit) {
914 check_key(hf_cit->first, lf_cit->first);
915 delta_corr.compute(hf_cit->second, lf_cit->second,
916 combined_resp_map[hf_cit->first], quiet_flag);
917 }
918 break;
919 }
920 case AGGREGATED_MODELS:
921 for (; hf_cit != hf_resp_map.end() && lf_cit != lf_resp_map.end();
922 ++hf_cit, ++lf_cit) {
923 check_key(hf_cit->first, lf_cit->first);
924 aggregate_response(hf_cit->second, lf_cit->second,
925 combined_resp_map[hf_cit->first]);
926 }
927 break;
928 default: { // {UNCORRECTED,AUTO_CORRECTED,BYPASS}_SURROGATE modes
929 if (lf_resp_map.empty()) {
930 combined_resp_map = hf_resp_map; // can't swap w/ const
931 return;
932 }
933 if (responseMode == AUTO_CORRECTED_SURROGATE)
934 compute_apply_delta(lf_resp_map);
935 if (hf_resp_map.empty()) {
936 std::swap(combined_resp_map, lf_resp_map);
937 return;
938 }
939 // process combinations of HF and LF completions
940 Response empty_resp;
941 while (hf_cit != hf_resp_map.end() || lf_cit != lf_resp_map.end()) {
942 // these have been rekeyed already to top-level surrModelEvalCntr:
943 int hf_eval_id = (hf_cit == hf_resp_map.end()) ?
944 INT_MAX : hf_cit->first;
945 int lf_eval_id = (lf_cit == lf_resp_map.end()) ?
946 INT_MAX : lf_cit->first;
947
948 if (hf_eval_id < lf_eval_id) { // only HF available
949 response_combine(hf_cit->second, empty_resp,
950 combined_resp_map[hf_eval_id]);
951 ++hf_cit;
952 }
953 else if (lf_eval_id < hf_eval_id) { // only LF available
954 response_combine(empty_resp, lf_cit->second,
955 combined_resp_map[lf_eval_id]);
956 ++lf_cit;
957 }
958 else { // both LF and HF available
959 response_combine(hf_cit->second, lf_cit->second,
960 combined_resp_map[hf_eval_id]);
961 ++hf_cit;
962 ++lf_cit;
963 }
964 }
965 break;
966 }
967 }
968 }
969
970
971 void HierarchSurrModel::
derived_synchronize_combine_nowait(const IntResponseMap & hf_resp_map,IntResponseMap & lf_resp_map,IntResponseMap & combined_resp_map)972 derived_synchronize_combine_nowait(const IntResponseMap& hf_resp_map,
973 IntResponseMap& lf_resp_map,
974 IntResponseMap& combined_resp_map)
975 {
976 // ------------------------------
977 // perform any LF/HF aggregations
978 // ------------------------------
979 // {hf,lf}_resp_map may be partial sets (partial surrogateFnIndices
980 // in {UN,AUTO_}CORRECTED_SURROGATE) or full sets (MODEL_DISCREPANCY).
981
982 // Early return options avoid some overhead:
983 if (lf_resp_map.empty() && surrIdMap.empty()) {// none completed, none pending
984 combined_resp_map = hf_resp_map; // can't swap w/ const
985 return;
986 }
987 if (responseMode == AUTO_CORRECTED_SURROGATE)
988 compute_apply_delta(lf_resp_map);
989 if (hf_resp_map.empty() && truthIdMap.empty()) {//none completed, none pending
990 std::swap(combined_resp_map, lf_resp_map);
991 return;
992 }
993
994 // invert remaining entries (pending jobs) in truthIdMap and surrIdMap
995 IntIntMap remain_truth_ids, remain_surr_ids;
996 IntIntMCIter id_it;
997 for (id_it=truthIdMap.begin(); id_it!=truthIdMap.end(); ++id_it)
998 remain_truth_ids[id_it->second] = id_it->first;
999 for (id_it=surrIdMap.begin(); id_it!=surrIdMap.end(); ++id_it)
1000 remain_surr_ids[id_it->second] = id_it->first;
1001
1002 // process any combination of HF and LF completions
1003 IntRespMCIter hf_cit = hf_resp_map.begin();
1004 IntRespMIter lf_it = lf_resp_map.begin();
1005 Response empty_resp;
1006 bool quiet_flag = (outputLevel < NORMAL_OUTPUT);
1007 std::map<UShortArray, DiscrepancyCorrection>::iterator dc_it;
1008 if (responseMode == MODEL_DISCREPANCY)
1009 dc_it = deltaCorr.find(activeKey);
1010 while (hf_cit != hf_resp_map.end() || lf_it != lf_resp_map.end()) {
1011 // these have been rekeyed already to top-level surrModelEvalCntr:
1012 int hf_eval_id = (hf_cit == hf_resp_map.end()) ? INT_MAX : hf_cit->first;
1013 int lf_eval_id = (lf_it == lf_resp_map.end()) ? INT_MAX : lf_it->first;
1014 // process LF/HF results or cache them for next pass
1015 if (hf_eval_id < lf_eval_id) { // only HF available
1016 switch (responseMode) {
1017 case MODEL_DISCREPANCY: case AGGREGATED_MODELS:
1018 // LF contribution is pending -> cache HF response
1019 cachedTruthRespMap[hf_eval_id] = hf_cit->second;
1020 break;
1021 default: // {UNCORRECTED,AUTO_CORRECTED,BYPASS}_SURROGATE modes
1022 if (remain_surr_ids.find(hf_eval_id) != remain_surr_ids.end())
1023 // LF contribution is pending -> cache HF response
1024 cachedTruthRespMap[hf_eval_id] = hf_cit->second;
1025 else // no LF component is pending -> HF contribution is sufficient
1026 response_combine(hf_cit->second, empty_resp,
1027 surrResponseMap[hf_eval_id]);
1028 break;
1029 }
1030 ++hf_cit;
1031 }
1032 else if (lf_eval_id < hf_eval_id) { // only LF available
1033 switch (responseMode) {
1034 case MODEL_DISCREPANCY: case AGGREGATED_MODELS:
1035 // HF contribution is pending -> cache LF response
1036 cachedApproxRespMap[lf_eval_id] = lf_it->second;
1037 break;
1038 default: // {UNCORRECTED,AUTO_CORRECTED,BYPASS}_SURROGATE modes
1039 if (remain_truth_ids.find(lf_eval_id) != remain_truth_ids.end())
1040 // HF contribution is pending -> cache LF response
1041 cachedApproxRespMap[lf_eval_id] = lf_it->second;
1042 else // no HF component is pending -> LF contribution is sufficient
1043 response_combine(empty_resp, lf_it->second,
1044 surrResponseMap[lf_eval_id]);
1045 break;
1046 }
1047 ++lf_it;
1048 }
1049 else { // both LF and HF available
1050 bool cache_for_pending_corr = false;
1051 switch (responseMode) {
1052 case MODEL_DISCREPANCY: {
1053 dc_it->second.compute(hf_cit->second, lf_it->second,
1054 surrResponseMap[hf_eval_id], quiet_flag);
1055 break;
1056 }
1057 case AGGREGATED_MODELS:
1058 aggregate_response(hf_cit->second, lf_it->second,
1059 surrResponseMap[hf_eval_id]);
1060 break;
1061 default: // {UNCORRECTED,AUTO_CORRECTED,BYPASS}_SURROGATE modes
1062 response_combine(hf_cit->second, lf_it->second,
1063 surrResponseMap[hf_eval_id]);
1064 break;
1065 }
1066 ++hf_cit;
1067 ++lf_it;
1068 }
1069 }
1070 }
1071
1072
compute_apply_delta(IntResponseMap & lf_resp_map)1073 void HierarchSurrModel::compute_apply_delta(IntResponseMap& lf_resp_map)
1074 {
1075 // Incoming we have a completed LF evaluation that may be used to compute a
1076 // correction and may be the target of application of a correction.
1077
1078 // First, test if a correction is previously available or can now be computed
1079 DiscrepancyCorrection& delta_corr = deltaCorr[activeKey];
1080 bool corr_comp = delta_corr.computed(), cache_for_pending_corr = false,
1081 quiet_flag = (outputLevel < NORMAL_OUTPUT);
1082 if (!corr_comp) {
1083 // compute a correction corresponding to the first entry in rawVarsMap
1084 IntVarsMCIter v_corr_cit = rawVarsMap.begin();
1085 if (v_corr_cit != rawVarsMap.end()) {
1086 // if corresponding LF response is complete, compute the delta
1087 IntRespMCIter lf_corr_cit = lf_resp_map.find(v_corr_cit->first);
1088 if (lf_corr_cit != lf_resp_map.end()) {
1089 delta_corr.compute(v_corr_cit->second, truthResponseRef[truthModelKey],
1090 lf_corr_cit->second, quiet_flag);
1091 corr_comp = true;
1092 }
1093 }
1094 }
1095
1096 // Next, apply the correction. We cache an uncorrected eval when the
1097 // components necessary for correction are still pending (returning
1098 // corrected evals with the first available LF response would lead to
1099 // nondeterministic results).
1100 IntVarsMIter v_it; IntRespMIter lf_it; int lf_eval_id;
1101 for (lf_it=lf_resp_map.begin(); lf_it!=lf_resp_map.end(); ++lf_it) {
1102 lf_eval_id = lf_it->first;
1103 v_it = rawVarsMap.find(lf_eval_id);
1104 if (v_it != rawVarsMap.end()) {
1105 if (corr_comp) { // apply the correction to the LF response
1106 recursive_apply(v_it->second, lf_it->second);
1107 rawVarsMap.erase(v_it);
1108 }
1109 else // no new corrections can be applied -> cache uncorrected
1110 cachedApproxRespMap.insert(*lf_it);
1111 }
1112 // else correction already applied
1113 }
1114 // remove cached responses from lf_resp_map
1115 if (!corr_comp)
1116 for (lf_it =cachedApproxRespMap.begin();
1117 lf_it!=cachedApproxRespMap.end(); ++lf_it)
1118 lf_resp_map.erase(lf_it->first);
1119 }
1120
1121
1122 void HierarchSurrModel::
single_apply(const Variables & vars,Response & resp,const UShortArray & paired_key)1123 single_apply(const Variables& vars, Response& resp,
1124 const UShortArray& paired_key)
1125 {
1126 bool quiet_flag = (outputLevel < NORMAL_OUTPUT);
1127 bool apply_corr = true;
1128 DiscrepancyCorrection& delta_corr = deltaCorr[paired_key];
1129 if (!delta_corr.computed()) {
1130 UShortArray truth_key;
1131 Pecos::DiscrepancyCalculator::extract_key(paired_key, truth_key, 0);
1132 std::map<UShortArray, Response>::iterator it
1133 = truthResponseRef.find(truth_key);
1134 if (it == truthResponseRef.end()) apply_corr = false; // not found
1135 else delta_corr.compute(vars, it->second, resp, quiet_flag);
1136 }
1137 if (apply_corr)
1138 delta_corr.apply(vars, resp, quiet_flag);
1139 }
1140
1141
recursive_apply(const Variables & vars,Response & resp)1142 void HierarchSurrModel::recursive_apply(const Variables& vars, Response& resp)
1143 {
1144 switch (correctionMode) {
1145 case SINGLE_CORRECTION: case DEFAULT_CORRECTION:
1146 single_apply(vars, resp, activeKey);
1147 break;
1148 case FULL_MODEL_FORM_CORRECTION: {
1149 // assume a consistent level index from surrModelKey
1150 size_t i, num_models = orderedModels.size(); UShortArray paired_key;
1151 unsigned short lf_form = surrModelKey[1];
1152 Pecos::DiscrepancyCalculator::
1153 aggregate_keys(surrModelKey, surrModelKey, paired_key); // initialize
1154 for (i = lf_form; i < num_models - 1; ++i) {
1155 paired_key[1] = i+1; // update HF model form
1156 paired_key[3] = i; // update LF model form
1157 single_apply(vars, resp, paired_key);
1158 }
1159 break;
1160 }
1161 case FULL_SOLUTION_LEVEL_CORRECTION: {
1162 // assume a consistent model index from surrModelKey
1163 unsigned short lf_lev = surrModelKey[2];
1164 if (lf_lev == USHRT_MAX) {
1165 Cerr << "Error: FULL_SOLUTION_LEVEL_CORRECTION requires solution level "
1166 << "within model key." << std::endl;
1167 abort_handler(MODEL_ERROR);
1168 }
1169 size_t i, num_levels = surrogate_model().solution_levels();
1170 UShortArray paired_key;
1171 Pecos::DiscrepancyCalculator::
1172 aggregate_keys(surrModelKey, surrModelKey, paired_key); // initialize
1173 for (i = lf_lev; i < num_levels - 1; ++i) {
1174 paired_key[2] = i+1; // update fine solution level
1175 paired_key[4] = i; // update coarse solution level
1176 single_apply(vars, resp, paired_key);
1177 }
1178 break;
1179 }
1180 //case SEQUENCE_CORRECTION: // apply sequence of discrepancy corrections
1181 // for (size_t i = 0; i < corrSequence.size(); ++i)
1182 // single_apply(vars, resp, corrSequence[i]);
1183 // break;
1184 }
1185 }
1186
1187
resize_response(bool use_virtual_counts)1188 void HierarchSurrModel::resize_response(bool use_virtual_counts)
1189 {
1190 size_t num_surr, num_truth;
1191 if (use_virtual_counts) { // allow models to consume lower-level aggregations
1192 num_surr = surrogate_model().qoi();
1193 num_truth = truth_model().qoi();
1194 }
1195 else { // raw counts align with currentResponse raw count
1196 num_surr = surrogate_model().response_size();
1197 num_truth = truth_model().response_size();
1198 }
1199
1200 switch (responseMode) {
1201 case AGGREGATED_MODELS:
1202 numFns = num_surr + num_truth; break;
1203 case MODEL_DISCREPANCY:
1204 if (num_surr != num_truth) {
1205 Cerr << "Error: mismatch in response sizes for MODEL_DISCREPANCY mode "
1206 << "in HierarchSurrModel::resize_response()." << std::endl;
1207 abort_handler(MODEL_ERROR);
1208 }
1209 numFns = num_truth; break;
1210 case BYPASS_SURROGATE: case NO_SURROGATE:
1211 numFns = num_truth; break;
1212 case UNCORRECTED_SURROGATE: case AUTO_CORRECTED_SURROGATE: default:
1213 numFns = num_surr; break;
1214 }
1215
1216 // gradient and Hessian settings are based on independent spec (not LF, HF)
1217 // --> preserve previous settings
1218 if (currentResponse.num_functions() != numFns) {
1219 currentResponse.reshape(numFns, currentVariables.cv(),
1220 !currentResponse.function_gradients().empty(),
1221 !currentResponse.function_hessians().empty());
1222
1223 // update message lengths for send/receive of parallel jobs (normally
1224 // performed once in Model::init_communicators() just after construct time)
1225 //estimate_message_lengths();
1226 //
1227 // NOT NECESSARY: Model::synchronize() and Model::serve_run() delegate to
1228 // HierarchSurrModel::{derived_synchronize,serve_run}() which delegate to
1229 // synchronize() and serve_run() by the LF or HF model.
1230 // --> Jobs are never returned using messages containing the expanded
1231 // Response object. Expansion by combination only happens on
1232 // iteratorCommRank 0 within derived_synchronize_combine{,_nowait}().
1233 }
1234 }
1235
1236
component_parallel_mode(short par_mode)1237 void HierarchSurrModel::component_parallel_mode(short par_mode)
1238 {
1239 // mode may be correct, but can't guarantee active parallel config is in sync
1240 //if (componentParallelMode == mode)
1241 // return; // already in correct parallel mode
1242
1243 // -----------------------------
1244 // terminate previous serve mode (if active)
1245 // -----------------------------
1246 // TO DO: restarting servers for a change in soln control index w/o change
1247 // in model may be overkill (send of state vars in vars buffer sufficient?)
1248 bool restart = false;
1249 if (componentParallelMode != par_mode || componentParallelKey != activeKey) {
1250 UShortArray old_hf_key, old_lf_key;
1251 extract_model_keys(componentParallelKey, old_hf_key, old_lf_key);
1252 switch (componentParallelMode) {
1253 case SURROGATE_MODEL_MODE: stop_model(old_lf_key[1]); break;
1254 case TRUTH_MODEL_MODE: stop_model(old_hf_key[1]); break;
1255 }
1256 restart = true;
1257 }
1258
1259 // ------------------------------------------------------------
1260 // set ParallelConfiguration for new mode and retrieve new data
1261 // ------------------------------------------------------------
1262 if (par_mode == TRUTH_MODEL_MODE) { // new mode
1263 // activation delegated to HF model
1264 }
1265 else if (par_mode == SURROGATE_MODEL_MODE) { // new mode
1266 // activation delegated to LF model
1267 }
1268
1269 // -----------------------
1270 // activate new serve mode (matches HierarchSurrModel::serve_run(pl_iter)).
1271 // -----------------------
1272 // These bcasts match the outer parallel context (pl_iter).
1273 if (restart && modelPCIter->mi_parallel_level_defined(miPLIndex)) {
1274 const ParallelLevel& mi_pl = modelPCIter->mi_parallel_level(miPLIndex);
1275 if (mi_pl.server_communicator_size() > 1) {
1276 parallelLib.bcast(par_mode, mi_pl);
1277 if (par_mode) { // send model index state corresponding to active mode
1278 MPIPackBuffer send_buff;
1279 send_buff << responseMode << activeKey;
1280 parallelLib.bcast(send_buff, mi_pl);
1281 }
1282 }
1283 }
1284
1285 componentParallelMode = par_mode; componentParallelKey = activeKey;
1286 }
1287
1288
serve_run(ParLevLIter pl_iter,int max_eval_concurrency)1289 void HierarchSurrModel::serve_run(ParLevLIter pl_iter, int max_eval_concurrency)
1290 {
1291 set_communicators(pl_iter, max_eval_concurrency, false); // don't recurse
1292
1293 // manage LF model and HF model servers, matching communication from
1294 // HierarchSurrModel::component_parallel_mode()
1295 // Note: could consolidate logic by bcasting componentParallelKey,
1296 // except for special handling of responseMode for TRUTH_MODEL_MODE.
1297 componentParallelMode = 1; // dummy value to be replaced inside loop
1298 while (componentParallelMode) {
1299 parallelLib.bcast(componentParallelMode, *pl_iter); // outer context
1300 if (componentParallelMode) {
1301 // use a quick size estimation for recv buffer i/o size bcast
1302 UShortArray dummy_key(5, 0); // for size estimation (worst 2-model case)
1303 MPIPackBuffer send_buff; send_buff << responseMode << dummy_key;
1304 int buffer_len = send_buff.size();
1305 // receive model state from HierarchSurrModel::component_parallel_mode()
1306 MPIUnpackBuffer recv_buffer(buffer_len);
1307 parallelLib.bcast(recv_buffer, *pl_iter);
1308 recv_buffer >> responseMode >> activeKey;
1309
1310 active_model_key(activeKey); // updates {truth,surr}ModelKey
1311 if (componentParallelMode == SURROGATE_MODEL_MODE) {
1312 // serve active LF model:
1313 surrogate_model().serve_run(pl_iter, max_eval_concurrency);
1314 // Note: ignores erroneous BYPASS_SURROGATE
1315 }
1316 else if (componentParallelMode == TRUTH_MODEL_MODE) {
1317 // serve active HF model, employing correct iterator concurrency:
1318 Model& hf_model = truth_model();
1319 switch (responseMode) {
1320 case UNCORRECTED_SURROGATE:
1321 Cerr << "Error: cannot set parallel mode to TRUTH_MODEL_MODE for a "
1322 << "response mode of UNCORRECTED_SURROGATE." << std::endl;
1323 abort_handler(-1); break;
1324 case AUTO_CORRECTED_SURROGATE:
1325 hf_model.serve_run(pl_iter, hf_model.derivative_concurrency()); break;
1326 case BYPASS_SURROGATE: case MODEL_DISCREPANCY: case AGGREGATED_MODELS:
1327 hf_model.serve_run(pl_iter, max_eval_concurrency); break;
1328 }
1329 }
1330 }
1331 }
1332 }
1333
1334
init_model(Model & model)1335 void HierarchSurrModel::init_model(Model& model)
1336 {
1337 // Set the low/high fidelity model variable descriptors with the variable
1338 // descriptors from currentVariables (eliminates the need to replicate
1339 // variable descriptors in the input file). This only needs to be performed
1340 // once (as opposed to the other updates above). However, performing this set
1341 // in the constructor does not propagate properly for multiple surrogates/
1342 // nestings since the sub-model construction (and therefore any sub-sub-model
1343 // constructions) must finish before calling any set functions on it. That
1344 // is, after-the-fact updating in constructors only propagates one level,
1345 // whereas before-the-fact updating in compute/build functions propagates
1346 // multiple levels.
1347 if (!approxBuilds) {
1348 size_t num_cv = currentVariables.cv(), num_div = currentVariables.div(),
1349 num_drv = currentVariables.drv(), num_dsv = currentVariables.dsv();
1350 if (num_cv && num_cv == model.cv())
1351 model.continuous_variable_labels(
1352 currentVariables.continuous_variable_labels());
1353 if (num_div && num_div == model.div())
1354 model.discrete_int_variable_labels(
1355 currentVariables.discrete_int_variable_labels());
1356 if (num_drv && num_drv == model.drv())
1357 model.discrete_real_variable_labels(
1358 currentVariables.discrete_real_variable_labels());
1359 if (num_dsv && num_dsv == model.dsv())
1360 model.discrete_string_variable_labels(
1361 currentVariables.discrete_string_variable_labels());
1362 }
1363
1364 // linear constraints
1365 if ( ( userDefinedConstraints.num_linear_ineq_constraints() ||
1366 userDefinedConstraints.num_linear_eq_constraints() ) &&
1367 currentVariables.cv() == model.cv() &&
1368 currentVariables.div() == model.div() &&
1369 currentVariables.drv() == model.drv() ) {
1370 model.linear_ineq_constraint_coeffs(
1371 userDefinedConstraints.linear_ineq_constraint_coeffs());
1372 model.linear_ineq_constraint_lower_bounds(
1373 userDefinedConstraints.linear_ineq_constraint_lower_bounds());
1374 model.linear_ineq_constraint_upper_bounds(
1375 userDefinedConstraints.linear_ineq_constraint_upper_bounds());
1376
1377 model.linear_eq_constraint_coeffs(
1378 userDefinedConstraints.linear_eq_constraint_coeffs());
1379 model.linear_eq_constraint_targets(
1380 userDefinedConstraints.linear_eq_constraint_targets());
1381 }
1382
1383 // nonlinear constraints
1384 if (userDefinedConstraints.num_nonlinear_ineq_constraints()) {
1385 model.nonlinear_ineq_constraint_lower_bounds(
1386 userDefinedConstraints.nonlinear_ineq_constraint_lower_bounds());
1387 model.nonlinear_ineq_constraint_upper_bounds(
1388 userDefinedConstraints.nonlinear_ineq_constraint_upper_bounds());
1389 }
1390 if (userDefinedConstraints.num_nonlinear_eq_constraints())
1391 model.nonlinear_eq_constraint_targets(
1392 userDefinedConstraints.nonlinear_eq_constraint_targets());
1393
1394 short active_view = currentVariables.view().first;
1395 if (active_view == RELAXED_ALL || active_view == MIXED_ALL)
1396 return;
1397
1398 // update model with inactive currentVariables/userDefinedConstraints data.
1399 // For efficiency, we avoid doing this on every evaluation, instead calling
1400 // it from a pre-execution initialization context (initialize_mapping()).
1401 size_t num_icv = currentVariables.icv(), num_idiv = currentVariables.idiv(),
1402 num_idrv = currentVariables.idrv(), num_idsv = currentVariables.idsv();
1403 if (num_icv && num_icv == model.icv()) {
1404 model.inactive_continuous_variables(
1405 currentVariables.inactive_continuous_variables());
1406 model.inactive_continuous_lower_bounds(
1407 userDefinedConstraints.inactive_continuous_lower_bounds());
1408 model.inactive_continuous_upper_bounds(
1409 userDefinedConstraints.inactive_continuous_upper_bounds());
1410 if (!approxBuilds)
1411 model.inactive_continuous_variable_labels(
1412 currentVariables.inactive_continuous_variable_labels());
1413 }
1414 if (num_idiv && num_idiv == model.idiv()) {
1415 model.inactive_discrete_int_variables(
1416 currentVariables.inactive_discrete_int_variables());
1417 model.inactive_discrete_int_lower_bounds(
1418 userDefinedConstraints.inactive_discrete_int_lower_bounds());
1419 model.inactive_discrete_int_upper_bounds(
1420 userDefinedConstraints.inactive_discrete_int_upper_bounds());
1421 if (!approxBuilds)
1422 model.inactive_discrete_int_variable_labels(
1423 currentVariables.inactive_discrete_int_variable_labels());
1424 }
1425 if (num_idrv && num_idrv == model.idrv()) {
1426 model.inactive_discrete_real_variables(
1427 currentVariables.inactive_discrete_real_variables());
1428 model.inactive_discrete_real_lower_bounds(
1429 userDefinedConstraints.inactive_discrete_real_lower_bounds());
1430 model.inactive_discrete_real_upper_bounds(
1431 userDefinedConstraints.inactive_discrete_real_upper_bounds());
1432 if (!approxBuilds)
1433 model.inactive_discrete_real_variable_labels(
1434 currentVariables.inactive_discrete_real_variable_labels());
1435 }
1436 if (num_idsv && num_idsv == model.idsv()) {
1437 model.inactive_discrete_string_variables(
1438 currentVariables.inactive_discrete_string_variables());
1439 if (!approxBuilds)
1440 model.inactive_discrete_string_variable_labels(
1441 currentVariables.inactive_discrete_string_variable_labels());
1442 }
1443 }
1444
1445
update_model(Model & model)1446 void HierarchSurrModel::update_model(Model& model)
1447 {
1448 // update model with currentVariables/userDefinedConstraints data. In the
1449 // hierarchical case, the variables view in LF/HF models correspond to the
1450 // currentVariables view. Note: updating the bounds is not strictly necessary
1451 // in common usage for the HF model (when a single model evaluated only at the
1452 // TR center), but is needed for the LF model and could be relevant in cases
1453 // where the HF model involves additional surrogates/nestings.
1454
1455 // active variable vals/bnds (active labels, inactive vals/bnds/labels, and
1456 // linear/nonlinear constraint coeffs/bnds updated in init_model())
1457 if (currentVariables.cv()) {
1458 model.continuous_variables(currentVariables.continuous_variables());
1459 model.continuous_lower_bounds(
1460 userDefinedConstraints.continuous_lower_bounds());
1461 model.continuous_upper_bounds(
1462 userDefinedConstraints.continuous_upper_bounds());
1463 }
1464 if (currentVariables.div()) {
1465 model.discrete_int_variables(currentVariables.discrete_int_variables());
1466 model.discrete_int_lower_bounds(
1467 userDefinedConstraints.discrete_int_lower_bounds());
1468 model.discrete_int_upper_bounds(
1469 userDefinedConstraints.discrete_int_upper_bounds());
1470 }
1471 if (currentVariables.drv()) {
1472 model.discrete_real_variables(currentVariables.discrete_real_variables());
1473 model.discrete_real_lower_bounds(
1474 userDefinedConstraints.discrete_real_lower_bounds());
1475 model.discrete_real_upper_bounds(
1476 userDefinedConstraints.discrete_real_upper_bounds());
1477 }
1478 if (currentVariables.dsv())
1479 model.discrete_string_variables(
1480 currentVariables.discrete_string_variables());
1481 }
1482
1483
update_from_model(Model & model)1484 void HierarchSurrModel::update_from_model(Model& model)
1485 {
1486 // update complement of active currentVariables using model data.
1487
1488 // Note: this approach makes a strong assumption about non-active variable
1489 // consistency, which is limiting. Better to perform an individual variable
1490 // mapping (e.g., solution control) when needed and allow for a different
1491 // ADV position.
1492
1493 // active variable vals/bnds (active labels, inactive vals/bnds/labels, and
1494 // linear/nonlinear constraint coeffs/bnds updated in init_model())
1495
1496 // *** TO DO: make this robust to differing inactive parameterizations using
1497 // tag lookups. Omit mappings for failed lookups.
1498
1499 const Variables& vars = model.current_variables();
1500 const Constraints& cons = model.user_defined_constraints();
1501
1502 const RealVector& acv = vars.all_continuous_variables();
1503 StringMultiArrayConstView acv_labels = vars.all_continuous_variable_labels();
1504 const RealVector& acv_l_bnds = cons.all_continuous_lower_bounds();
1505 const RealVector& acv_u_bnds = cons.all_continuous_upper_bounds();
1506 StringMultiArrayConstView cv_acv_labels
1507 = currentVariables.all_continuous_variable_labels();
1508 size_t i, index, cv_begin = vars.cv_start(), num_cv = vars.cv(),
1509 cv_end = cv_begin + num_cv, num_acv = vars.acv();
1510 for (i=0; i<cv_begin; ++i) {
1511 index = find_index(cv_acv_labels, acv_labels[i]);
1512 if (index != _NPOS) {
1513 currentVariables.all_continuous_variable(acv[i], index);
1514 userDefinedConstraints.all_continuous_lower_bound(acv_l_bnds[i], index);
1515 userDefinedConstraints.all_continuous_upper_bound(acv_u_bnds[i], index);
1516 }
1517 }
1518 for (i=cv_end; i<num_acv; ++i) {
1519 index = find_index(cv_acv_labels, acv_labels[i]);
1520 if (index != _NPOS) {
1521 currentVariables.all_continuous_variable(acv[i], index);
1522 userDefinedConstraints.all_continuous_lower_bound(acv_l_bnds[i], index);
1523 userDefinedConstraints.all_continuous_upper_bound(acv_u_bnds[i], index);
1524 }
1525 }
1526
1527 const IntVector& adiv = vars.all_discrete_int_variables();
1528 StringMultiArrayConstView adiv_labels
1529 = vars.all_discrete_int_variable_labels();
1530 const IntVector& adiv_l_bnds = cons.all_discrete_int_lower_bounds();
1531 const IntVector& adiv_u_bnds = cons.all_discrete_int_upper_bounds();
1532 StringMultiArrayConstView cv_adiv_labels
1533 = currentVariables.all_discrete_int_variable_labels();
1534 size_t div_begin = vars.div_start(), num_div = vars.div(),
1535 div_end = div_begin + num_div, num_adiv = vars.adiv();
1536 for (i=0; i<div_begin; ++i) {
1537 index = find_index(cv_adiv_labels, adiv_labels[i]);
1538 if (index != _NPOS) {
1539 currentVariables.all_discrete_int_variable(adiv[i], index);
1540 userDefinedConstraints.all_discrete_int_lower_bound(adiv_l_bnds[i],index);
1541 userDefinedConstraints.all_discrete_int_upper_bound(adiv_u_bnds[i],index);
1542 }
1543 }
1544 for (i=div_end; i<num_adiv; ++i) {
1545 index = find_index(cv_adiv_labels, adiv_labels[i]);
1546 if (index != _NPOS) {
1547 currentVariables.all_discrete_int_variable(adiv[i], index);
1548 userDefinedConstraints.all_discrete_int_lower_bound(adiv_l_bnds[i],index);
1549 userDefinedConstraints.all_discrete_int_upper_bound(adiv_u_bnds[i],index);
1550 }
1551 }
1552
1553 size_t dsv_begin = vars.dsv_start(), num_dsv = vars.dsv(),
1554 dsv_end = dsv_begin + num_dsv, num_adsv = vars.adsv();
1555 StringMultiArrayConstView adsv = vars.all_discrete_string_variables();
1556 StringMultiArrayConstView adsv_labels
1557 = vars.all_discrete_string_variable_labels();
1558 StringMultiArrayConstView cv_adsv_labels
1559 = currentVariables.all_discrete_string_variable_labels();
1560 for (i=0; i<dsv_begin; ++i) {
1561 index = find_index(cv_adsv_labels, adsv_labels[i]);
1562 if (index != _NPOS)
1563 currentVariables.all_discrete_string_variable(adsv[i], index);
1564 }
1565 for (i=dsv_end; i<num_adsv; ++i) {
1566 index = find_index(cv_adsv_labels, adsv_labels[i]);
1567 if (index != _NPOS)
1568 currentVariables.all_discrete_string_variable(adsv[i], index);
1569 }
1570
1571 const RealVector& adrv = vars.all_discrete_real_variables();
1572 StringMultiArrayConstView adrv_labels
1573 = vars.all_discrete_real_variable_labels();
1574 const RealVector& adrv_l_bnds = cons.all_discrete_real_lower_bounds();
1575 const RealVector& adrv_u_bnds = cons.all_discrete_real_upper_bounds();
1576 StringMultiArrayConstView cv_adrv_labels
1577 = currentVariables.all_discrete_real_variable_labels();
1578 size_t drv_begin = vars.drv_start(), num_drv = vars.drv(),
1579 drv_end = drv_begin + num_drv, num_adrv = vars.adrv();
1580 for (i=0; i<drv_begin; ++i) {
1581 index = find_index(cv_adrv_labels, adrv_labels[i]);
1582 if (index != _NPOS) {
1583 currentVariables.all_discrete_real_variable(adrv[i], index);
1584 userDefinedConstraints.all_discrete_real_lower_bound(adrv_l_bnds[i],
1585 index);
1586 userDefinedConstraints.all_discrete_real_upper_bound(adrv_u_bnds[i],
1587 index);
1588 }
1589 }
1590 for (i=drv_end; i<num_adrv; ++i) {
1591 index = find_index(cv_adrv_labels, adrv_labels[i]);
1592 if (index != _NPOS) {
1593 currentVariables.all_discrete_real_variable(adrv[i], index);
1594 userDefinedConstraints.all_discrete_real_lower_bound(adrv_l_bnds[i],
1595 index);
1596 userDefinedConstraints.all_discrete_real_upper_bound(adrv_u_bnds[i],
1597 index);
1598 }
1599 }
1600 }
1601
1602 } // namespace Dakota
1603