1 /*  _______________________________________________________________________
2 
3     DAKOTA: Design Analysis Kit for Optimization and Terascale Applications
4     Copyright 2014-2020 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
5     This software is distributed under the GNU Lesser General Public License.
6     For more information, see the README file in the top Dakota directory.
7     _______________________________________________________________________ */
8 
9 //- Class:	 NonDGlobalInterval
10 //- Description: Class for interval bound estimation for epistemic UQ
11 //- Owner:       Laura Swiler
12 //- Checked by:
13 //- Version:
14 
15 #include "NonDGlobalInterval.hpp"
16 #include "dakota_system_defs.hpp"
17 #include "dakota_data_io.hpp"
18 #include "NonDLHSSampling.hpp"
19 #include "DakotaModel.hpp"
20 #include "DakotaIterator.hpp"
21 #include "RecastModel.hpp"
22 #include "DataFitSurrModel.hpp"
23 #include "ProblemDescDB.hpp"
24 #ifdef HAVE_NCSU
25 #include "NCSUOptimizer.hpp"
26 #endif
27 #ifdef HAVE_ACRO
28 #include "COLINOptimizer.hpp"
29 #endif
30 #include "NormalRandomVariable.hpp"
31 
32 //#define DEBUG
33 
34 namespace Dakota {
35 
36 // initialization of statics
37 NonDGlobalInterval* NonDGlobalInterval::nondGIInstance(NULL);
38 
39 
NonDGlobalInterval(ProblemDescDB & problem_db,Model & model)40 NonDGlobalInterval::NonDGlobalInterval(ProblemDescDB& problem_db, Model& model):
41   NonDInterval(problem_db, model),
42   seedSpec(probDescDB.get_int("method.random_seed")),
43   numSamples(probDescDB.get_int("method.samples")),
44   rngName(probDescDB.get_string("method.random_number_generator")),
45   allResponsesPerIter(false), dataOrder(1), distanceTol(convergenceTol),
46   distanceConvergeLimit(1), improvementConvergeLimit(2)
47 {
48   bool err_flag = false;
49 
50   // Define optimization sub-problem solver
51   unsigned short opt_alg = probDescDB.get_ushort("method.sub_method");
52   bool discrete
53     = (numDiscreteIntVars || numDiscreteStringVars || numDiscreteRealVars);
54   if (opt_alg == SUBMETHOD_EGO) {
55     eifFlag = gpModelFlag = true;
56     if (discrete) {
57       Cerr << "Error: discrete variables are not currently supported for EGO "
58 	   << "solver in NonDGlobalInterval.  Please select SBO." << std::endl;
59       err_flag = true;
60     }
61   }
62   else if (opt_alg == SUBMETHOD_SBO)
63     { eifFlag = false; gpModelFlag = true; }
64   else if (opt_alg == SUBMETHOD_EA)
65     eifFlag = gpModelFlag = false;
66   else if (opt_alg == SUBMETHOD_DEFAULT)
67     { gpModelFlag = true; eifFlag = (discrete) ? false : true; }
68   else {
69     Cerr << "Error: unsupported optimization algorithm selection in "
70 	 << "NonDGlobalInterval.  Please select EGO, SBO, or EA." << std::endl;
71     err_flag = true;
72   }
73 
74   if (numContinuousVars     != numContIntervalVars                        ||
75       numDiscreteIntVars    != numDiscIntervalVars + numDiscSetIntUncVars ||
76       numDiscreteStringVars != 0 /* numDiscSetStringUncVars */            ||
77       numDiscreteRealVars   != numDiscSetRealUncVars) {
78     Cerr << "\nError: only continuous, discrete int, and discrete real "
79 	 << "epistemic variables are currently supported in NonDGlobalInterval."
80 	 << std::endl;
81     err_flag = true;
82   }
83 
84   if (gpModelFlag) {
85     size_t num_uv = numContIntervalVars + numDiscIntervalVars +
86       numDiscSetIntUncVars + numDiscreteRealVars;
87     if (!numSamples) // use a default of #terms in a quadratic polynomial
88       numSamples = (num_uv+1)*(num_uv+2)/2;
89     String approx_type = "global_kriging";
90     if (probDescDB.get_short("method.nond.emulator") == GP_EMULATOR)
91       approx_type = "global_gaussian";
92     else if (probDescDB.get_short("method.nond.emulator") == EXPGP_EMULATOR)
93       approx_type = "global_exp_gauss_proc";
94     unsigned short sample_type = SUBMETHOD_DEFAULT;
95     String sample_reuse = "none";
96     if (probDescDB.get_bool("method.derivative_usage")) {
97       if (approx_type == "global_gaussian") {
98 	Cerr << "\nError: efficient_global does not support gaussian_process "
99 	     << "when derivatives present; use kriging instead." << std::endl;
100 	err_flag = true;
101       }
102       if (iteratedModel.gradient_type() != "none") dataOrder |= 2;
103       if (iteratedModel.hessian_type()  != "none") dataOrder |= 4;
104     }
105     // get point samples file
106     const String& import_pts_file
107       = probDescDB.get_string("method.import_build_points_file");
108     if (!import_pts_file.empty())
109       { numSamples = 0; sample_reuse = "all"; }
110 
111     // instantiate the Gaussian Process Model/Iterator recursions
112 
113     // The following uses on the fly derived ctor:
114     short mode = (eifFlag) ? ACTIVE_UNIFORM : ACTIVE;
115     daceIterator.assign_rep(std::make_shared<NonDLHSSampling>
116 			    (iteratedModel, sample_type, numSamples, seedSpec,
117 			     rngName, false, mode));
118     // only use derivatives if the user requested and they are available
119     daceIterator.active_set_request_values(dataOrder);
120 
121     // Construct fHatModel using a GP approximation over the active/uncertain
122     // vars (same view as iteratedModel: not the typical All view for DACE).
123     //
124     // Note: low order trend is more robust when there is limited data, such as
125     // a few discrete values --> nan's observed for quad trend w/ 2 model forms
126     unsigned short trend_order = (discrete) ? 1 : 2;
127     UShortArray approx_order(num_uv, trend_order);
128     short corr_order = -1, corr_type = NO_CORRECTION;
129     //const Variables& curr_vars = iteratedModel.current_variables();
130     ActiveSet gp_set = iteratedModel.current_response().active_set(); // copy
131     gp_set.request_values(1);// no surr deriv evals, but GP may be grad-enhanced
132     fHatModel.assign_rep(std::make_shared<DataFitSurrModel>
133       (daceIterator, iteratedModel,
134        gp_set, approx_type, approx_order, corr_type, corr_order, dataOrder,
135        outputLevel, sample_reuse, import_pts_file,
136        probDescDB.get_ushort("method.import_build_format"),
137        probDescDB.get_bool("method.import_build_active_only"),
138        probDescDB.get_string("method.export_approx_points_file"),
139        probDescDB.get_ushort("method.export_approx_format")));
140 
141     // Following this ctor, IteratorScheduler::init_iterator() initializes the
142     // parallel configuration for NonDGlobalInterval + iteratedModel using
143     // NonDGlobalInterval's maxEvalConcurrency.  During fHatModel construction
144     // above, DataFitSurrModel::derived_init_communicators() initializes the
145     // parallel configuration for daceIterator + iteratedModel using
146     // daceIterator's maxEvalConcurrency.  The only iteratedModel concurrency
147     // currently exercised is that used by daceIterator within the initial GP
148     // construction, but the NonDGlobalInterval maxEvalConcurrency must still be
149     // set so as to avoid parallel config errors resulting from avail_procs
150     // > max_concurrency within IteratorScheduler::init_iterator().  Max of the
151     // local deriv concurrency & the DACE concurrency is used for this purpose.
152     maxEvalConcurrency = std::max(maxEvalConcurrency,
153       daceIterator.maximum_evaluation_concurrency());
154   }
155   else
156     fHatModel = iteratedModel; // shared rep
157 
158   if (err_flag)
159     abort_handler(-1);
160 
161   // Configure a RecastModel with one objective and no constraints using the
162   // alternate minimalist constructor: the recast fn pointers are reset for
163   // each level within the run fn.
164   SizetArray recast_vars_comps_total;  // default: empty; no change in size
165   BitArray all_relax_di, all_relax_dr; // default: empty; no discrete relaxation
166   short recast_resp_order = 1; // nongradient-based optimizers
167   intervalOptModel.assign_rep(std::make_shared<RecastModel>
168 			      (fHatModel, recast_vars_comps_total, all_relax_di,
169 			       all_relax_dr, 1, 0, 0, recast_resp_order));
170 
171   // Instantiate the optimizer used on the GP.
172   // TO DO: add support for discrete EGO
173   if (eifFlag) { // EGO solver
174 
175     // preserve these EGO settings for now, but eventually map through
176     // from spec (and update test baselines)
177     convergenceTol = 1.e-12; distanceTol = 1.e-8;
178     if (maxIterations < 0)
179       maxIterations  = 25*numContinuousVars;
180 
181     double min_box_size = 1.e-15, vol_box_size = 1.e-15;
182     int max_direct_iter = 1000, max_direct_eval = 10000; // 10*defaults
183 #ifdef HAVE_NCSU
184     // EGO with DIRECT (exploits GP variance)
185     intervalOptimizer.assign_rep(std::make_shared<NCSUOptimizer>
186 				 (intervalOptModel, max_direct_iter,
187 				  max_direct_eval, min_box_size, vol_box_size));
188 #else
189     Cerr << "NCSU DIRECT Optimizer is not available to use to find the"
190 	 << " interval bounds from the GP model." << std::endl;
191     abort_handler(-1);
192 #endif // HAVE_NCSU
193   }
194   else { // EAminlp, with or without GP emulation
195     int max_ea_iter, max_ea_eval;
196     if (gpModelFlag) // SBGO controls from user spec; EA controls hard-wired
197       { max_ea_iter = 50; max_ea_eval = 5000; } // default EA pop_size = 100
198     else // EA controls from user spec
199       { max_ea_iter = maxIterations; max_ea_eval = maxFunctionEvals; }
200 
201 #ifdef HAVE_ACRO
202     // mixed EA (ignores GP variance)
203     intervalOptimizer.assign_rep(std::make_shared<COLINOptimizer>
204 				 ("coliny_ea", intervalOptModel, seedSpec,
205 				  max_ea_iter, max_ea_eval));
206 //#elif HAVE_JEGA
207 //    intervalOptimizer.assign_rep(new
208 //      JEGAOptimizer(intervalOptModel, max_iter, max_eval, min_box_size,
209 //      vol_box_size), false);
210 #else
211     Cerr << "Error: mixed EA not available for computing interval bounds."
212 	 << std::endl;
213     abort_handler(-1);
214 #endif // HAVE_NCSU
215   }
216 }
217 
218 
~NonDGlobalInterval()219 NonDGlobalInterval::~NonDGlobalInterval()
220 { }
221 
222 
derived_init_communicators(ParLevLIter pl_iter)223 void NonDGlobalInterval::derived_init_communicators(ParLevLIter pl_iter)
224 {
225   iteratedModel.init_communicators(pl_iter, maxEvalConcurrency);
226 
227   // intervalOptModel.init_communicators() recursion is currently sufficient
228   // for fHatModel.  An additional fHatModel.init_communicators() call would
229   // be motivated by special parallel usage of fHatModel below that is not
230   // otherwise covered by the recursion.
231   //fHatMaxConcurrency = maxEvalConcurrency; // local derivative concurrency
232   //fHatModel.init_communicators(pl_iter, fHatMaxConcurrency);
233 
234   // intervalOptimizer uses NoDBBaseConstructor, so no need to manage
235   // DB list nodes at this level
236   intervalOptimizer.init_communicators(pl_iter);
237 }
238 
239 
derived_set_communicators(ParLevLIter pl_iter)240 void NonDGlobalInterval::derived_set_communicators(ParLevLIter pl_iter)
241 {
242   NonD::derived_set_communicators(pl_iter);
243 
244   //fHatMaxConcurrency = maxEvalConcurrency; // local derivative concurrency
245   //fHatModel.set_communicators(pl_iter, fHatMaxConcurrency);
246 
247   // intervalOptimizer uses NoDBBaseConstructor, so no need to manage
248   // DB list nodes at this level
249   intervalOptimizer.set_communicators(pl_iter);
250 }
251 
252 
derived_free_communicators(ParLevLIter pl_iter)253 void NonDGlobalInterval::derived_free_communicators(ParLevLIter pl_iter)
254 {
255   // intervalOptimizer uses NoDBBaseConstructor, so no need to manage
256   // DB list nodes at this level
257   intervalOptimizer.free_communicators(pl_iter);
258 
259   //fHatMaxConcurrency = maxEvalConcurrency; // local derivative concurrency
260   //fHatModel.free_communicators(pl_iter, fHatMaxConcurrency);
261 
262   iteratedModel.free_communicators(pl_iter, maxEvalConcurrency);
263 }
264 
265 
core_run()266 void NonDGlobalInterval::core_run()
267 {
268   // set the object instance pointer for use within static member functions
269   NonDGlobalInterval* prev_instance = nondGIInstance;
270   nondGIInstance = this;
271 
272   // now that vars/labels/bounds/targets have flowed down at run-time from
273   // any higher level recursions, propagate them up local Model recursions
274   // so that they are correct when they propagate back down.  There is no
275   // need to recur below iteratedModel.
276   size_t layers = (gpModelFlag) ? 2 : 1;
277   intervalOptModel.update_from_subordinate_model(layers-1);
278 
279   // Build initial GP once for all response functions
280   if (gpModelFlag)
281     fHatModel.build_approximation();
282 
283   Sizet2DArray vars_map, primary_resp_map(1), secondary_resp_map;
284   primary_resp_map[0].resize(1);
285   BoolDequeArray nonlinear_resp_map(1);
286   nonlinear_resp_map[0] = BoolDeque(numFunctions, false);
287   BoolDeque max_sense(1);
288   std::shared_ptr<RecastModel> int_opt_model_rep =
289     std::static_pointer_cast<RecastModel>(intervalOptModel.model_rep());
290 
291   initialize(); // virtual fn
292 
293   ParLevLIter pl_iter = methodPCIter->mi_parallel_level_iterator(miPLIndex);
294 
295   for (respFnCntr=0; respFnCntr<numFunctions; ++respFnCntr) {
296 
297     primary_resp_map[0][0] = respFnCntr;
298     nonlinear_resp_map[0][respFnCntr] = true;
299     if (!eifFlag)
300       int_opt_model_rep->init_maps(vars_map, false, NULL, NULL,
301 	primary_resp_map, secondary_resp_map, nonlinear_resp_map,
302 	extract_objective, NULL);
303 
304     for (cellCntr=0; cellCntr<numCells; ++cellCntr) {
305 
306       set_cell_bounds(); // virtual fn for setting bounds for local min/max
307 
308       // initialize the recast model for lower bound estimation
309       if (eifFlag)
310 	int_opt_model_rep->init_maps(vars_map, false, NULL, NULL,
311 	  primary_resp_map, secondary_resp_map, nonlinear_resp_map,
312 	  EIF_objective_min, NULL);
313       else {
314 	max_sense[0] = false;
315 	int_opt_model_rep->primary_response_fn_sense(max_sense);
316       }
317 
318       // Iterate until EGO converges
319       distanceConvergeCntr = improvementConvergeCntr = globalIterCntr = 0;
320       prevCVStar.size(0); prevDIVStar.size(0); prevDRVStar.size(0);
321       boundConverged = false;
322       while (!boundConverged) {
323 	++globalIterCntr;
324 
325 	// determine approxFnStar from minimum among sample data
326 	if (eifFlag)
327 	  get_best_sample(false, true);
328 
329 	// Execute GLOBAL search and retrieve results
330 	Cout << "\n>>>>> Initiating global minimization: response "
331 	     << respFnCntr+1 << " cell " << cellCntr+1 << " iteration "
332 	     << globalIterCntr << "\n\n";
333 	//intervalOptimizer.reset(); // redundant for COLINOptimizer::core_run()
334 	intervalOptimizer.run(pl_iter);
335 	// output iteration results, update convergence controls, and update GP
336 	post_process_run_results(false);
337       }
338       if (gpModelFlag)
339 	get_best_sample(false, false); // pull truthFnStar from sample data
340       post_process_cell_results(false); // virtual fn: post-process min
341 
342       // initialize the recast model for upper bound estimation
343       if (eifFlag)
344 	int_opt_model_rep->init_maps(vars_map, false, NULL, NULL,
345 	  primary_resp_map, secondary_resp_map, nonlinear_resp_map,
346 	  EIF_objective_max, NULL);
347       else {
348 	max_sense[0] = true;
349 	int_opt_model_rep->primary_response_fn_sense(max_sense);
350       }
351 
352       // Iterate until EGO converges
353       distanceConvergeCntr = improvementConvergeCntr = globalIterCntr = 0;
354       prevCVStar.size(0); prevDIVStar.size(0); prevDRVStar.size(0);
355       boundConverged = false;
356       while (!boundConverged) {
357 	++globalIterCntr;
358 
359 	// determine approxFnStar from maximum among sample data
360 	if (eifFlag)
361 	  get_best_sample(true, true);
362 
363 	// Execute GLOBAL search
364 	Cout << "\n>>>>> Initiating global maximization: response "
365 	     << respFnCntr+1 << " cell " << cellCntr+1 << " iteration "
366 	     << globalIterCntr << "\n\n";
367 	//intervalOptimizer.reset(); // redundant for COLINOptimizer::core_run()
368 	intervalOptimizer.run(pl_iter);
369 	// output iteration results, update convergence controls, and update GP
370 	post_process_run_results(true);
371       }
372       if (gpModelFlag)
373 	get_best_sample(true, false); // pull truthFnStar from sample data
374       post_process_cell_results(true); // virtual fn: post-process max
375     }
376     post_process_response_fn_results(); // virtual fn: post-process respFn
377     nonlinear_resp_map[0][respFnCntr] = false; // reset
378   }
379   post_process_final_results(); // virtual fn: final post-processing
380 
381   // (conditionally) export final surrogates
382   if (gpModelFlag)
383     export_final_surrogates(fHatModel);
384 
385   // restore in case of recursion
386   nondGIInstance = prev_instance;
387 }
388 
389 
initialize()390 void NonDGlobalInterval::initialize()
391 { } // default is no-op
392 
393 
set_cell_bounds()394 void NonDGlobalInterval::set_cell_bounds()
395 { } // default is no-op
396 
397 
post_process_run_results(bool maximize)398 void NonDGlobalInterval::post_process_run_results(bool maximize)
399 {
400   const Variables&     vars_star = intervalOptimizer.variables_results();
401   const RealVector&  c_vars_star = vars_star.continuous_variables();
402   const IntVector&  di_vars_star = vars_star.discrete_int_variables();
403   const RealVector& dr_vars_star = vars_star.discrete_real_variables();
404   const Response&      resp_star = intervalOptimizer.response_results();
405   Real fn_star = resp_star.function_value(0), fn_conv, dist_conv;
406 
407   Cout << "\nResults of interval optimization:\nFinal point             =\n";
408   if (vars_star.cv())  Cout <<  c_vars_star;
409   if (vars_star.div()) Cout << di_vars_star;
410   if (vars_star.drv()) Cout << dr_vars_star;
411   if (eifFlag)
412     Cout << "Expected Improvement    =\n                     "
413 	 << std::setw(write_precision+7) << -fn_star << '\n';
414   else {
415     if (gpModelFlag) Cout << "Estimate of ";
416     if (maximize)    Cout << "Upper Bound =\n                     ";
417     else             Cout << "Lower Bound =\n                     ";
418     Cout << std::setw(write_precision+7) << fn_star << '\n';
419   }
420 
421   if (!gpModelFlag)
422     { truthFnStar = fn_star; boundConverged = true; return; }
423 
424   if (prevCVStar.empty() && prevDIVStar.empty() && prevDRVStar.empty())
425     dist_conv = fn_conv = DBL_MAX; // first iteration
426   else if (eifFlag) {
427     // Euclidean distance of successive optimal solns: continuous variables only
428     dist_conv = rel_change_L2(c_vars_star, prevCVStar);
429 
430     // EIF values directly provide estimates of soln convergence
431     fn_conv = -fn_star; // EI negated for minimization
432     // If DIRECT failed to find a point with EIF>0, it returns the center point
433     // as the optimal solution. EGO may have converged, but DIRECT may have just
434     // failed to find a point with a good EIF value. Adding this midpoint can
435     // alter the GPs enough to allow DIRECT to find something useful, so we
436     // force max(EIF)<tol twice to make sure. Note that we cannot make this
437     // check more than 2 because it would cause EGO to add the center point
438     // more than once, which will damage the GPs.  Unfortunately, when it
439     // happens the second time, it may still be that DIRECT failed and not
440     // that EGO converged.
441 
442     // GT: The only reason we introduce this additional level of convergence is
443     // the hope that adding the midpoint will improve the GP enough so that
444     // 1. non-monotonic convergence can be addressed by improving the quality of
445     // the GP in the hope that if a better soln exists, it can be captured by
446     // the new GP or
447     // 2. we construct a 'better' GP in the hope that DIRECT will actually find
448     // a soln pt.  Furthermore, we do not require this to occur on consecutive
449     // runs for the same reasons that we do not add points within the dist_tol.
450   }
451   else {
452     // Euclidean distance of successive optimal solns: continuous,
453     // discrete int, and discrete real variables
454     dist_conv = rel_change_L2(c_vars_star, prevCVStar, di_vars_star,
455 			      prevDIVStar, dr_vars_star, prevDRVStar);
456     // for SBO, reference fn_star to previous value
457     fn_conv = std::abs(1. - fn_star / prevFnStar);// change in lower,upper bound
458   }
459 
460   // update convergence counters
461   if (dist_conv < distanceTol)    ++distanceConvergeCntr;
462   if (fn_conv   < convergenceTol) ++improvementConvergeCntr;
463 
464   // depending on convergence assessment, we may update the GP, converge
465   // the iteration and update the GP, or converge without updating the GP.
466   if (globalIterCntr >= maxIterations)
467     { boundConverged = true; evaluate_response_star_truth(); }
468   else if (distanceConvergeCntr    >= distanceConvergeLimit ||
469 	   improvementConvergeCntr >= improvementConvergeLimit)
470     // if successive iterates are very similar, we do not add the training pt,
471     // since the danger of damaging the GP outweighs small possible gains.
472     boundConverged = true;
473   else { // evaluate truth response and update GP + prev solution trackers
474     evaluate_response_star_truth();
475     // update previous solution tracking
476     if (vars_star.cv())  copy_data( c_vars_star, prevCVStar);
477     if (vars_star.div()) copy_data(di_vars_star, prevDIVStar);
478     if (vars_star.drv()) copy_data(dr_vars_star, prevDRVStar);
479     if (!eifFlag) prevFnStar = fn_star;
480   }
481 }
482 
483 
evaluate_response_star_truth()484 void NonDGlobalInterval::evaluate_response_star_truth()
485 {
486   //fHatModel.component_parallel_mode(TRUTH_MODEL_MODE);
487   const Variables& vars_star = intervalOptimizer.variables_results();
488   iteratedModel.active_variables(vars_star);
489   ActiveSet set = iteratedModel.current_response().active_set();
490   // GT: Get all responses per function evaluation
491   // changing this might break some of the logic needed to determine
492   // whether the inner loop surrogate needs to be reconstructed
493   if (allResponsesPerIter)
494     set.request_values(dataOrder);
495   else
496     { set.request_values(0); set.request_value(dataOrder, respFnCntr); }
497   iteratedModel.evaluate(set);
498 
499   // Update the GP approximation
500   IntResponsePair resp_star_truth(iteratedModel.evaluation_id(),
501 				  iteratedModel.current_response());
502   fHatModel.append_approximation(vars_star, resp_star_truth, true);
503 }
504 
505 
get_best_sample(bool maximize,bool eval_approx)506 void NonDGlobalInterval::get_best_sample(bool maximize, bool eval_approx)
507 { } // default is no-op
508 
509 
post_process_cell_results(bool maximize)510 void NonDGlobalInterval::post_process_cell_results(bool maximize)
511 { } // default is no-op
512 
513 
post_process_response_fn_results()514 void NonDGlobalInterval::post_process_response_fn_results()
515 { } // default is no-op
516 
517 
post_process_final_results()518 void NonDGlobalInterval::post_process_final_results()
519 { } // default is no-op
520 
521 
522 void NonDGlobalInterval::
extract_objective(const Variables & sub_model_vars,const Variables & recast_vars,const Response & sub_model_response,Response & recast_response)523 extract_objective(const Variables& sub_model_vars, const Variables& recast_vars,
524 		  const Response& sub_model_response, Response& recast_response)
525 {
526   // minimize or maximize sense is set separately, so this fn only
527   // extracts the active response fn using respFnCntr
528 
529   Real sub_model_fn
530     = sub_model_response.function_values()[nondGIInstance->respFnCntr];
531   const ShortArray& recast_asv = recast_response.active_set_request_vector();
532   if (recast_asv[0] & 1)
533     recast_response.function_value(sub_model_fn, 0);
534   // Note: could track c/di/drVarsStar and approxFnStar here
535 }
536 
537 
538 void NonDGlobalInterval::
EIF_objective_min(const Variables & sub_model_vars,const Variables & recast_vars,const Response & sub_model_response,Response & recast_response)539 EIF_objective_min(const Variables& sub_model_vars, const Variables& recast_vars,
540 		  const Response& sub_model_response, Response& recast_response)
541 {
542   // Means are passed in, but must retrieve variance from the GP
543   const RealVector& means = sub_model_response.function_values();
544   const RealVector& variances
545     = nondGIInstance->fHatModel.approximation_variances(recast_vars);
546 
547   const ShortArray& recast_asv = recast_response.active_set_request_vector();
548   if (recast_asv[0] & 1) {
549     // max(EI) for both interval bounds --> return -EI to the minimizer
550     const Real& mean = means[nondGIInstance->respFnCntr];
551     Real stdv = sqrt(variances[nondGIInstance->respFnCntr]);
552     const Real& approx_fn_star = nondGIInstance->approxFnStar;
553     // Calculate expected improvement: +/-approx_fn_star used for EIF_min/max
554     Real Phi_snv, phi_snv, snv = approx_fn_star - mean;
555     if(std::fabs(snv)>=std::fabs(stdv)*50.0) {
556       phi_snv = 0.;
557       Phi_snv = (snv > 0.) ? 1. : 0.;
558     }
559     else{
560       snv /= stdv; // now snv is the standard normal variate
561       Phi_snv = Pecos::NormalRandomVariable::std_cdf(snv);
562       phi_snv = Pecos::NormalRandomVariable::std_pdf(snv);
563     }
564     Real ei = (approx_fn_star - mean)*Phi_snv + stdv*phi_snv;
565     // both bounds maximize EIF -> minimize -EIF
566     recast_response.function_value(-ei, 0);
567 #ifdef DEBUG
568     Cout << "(Evaluation,ApproxFnStar,Phi,phi,vars): (" << mean << ","
569 	 << approx_fn_star << "," << Phi_snv << "," << phi_snv;
570     const RealVector& eval_vars = recast_vars.continuous_variables();
571     for (size_t i=0; i < eval_vars.length(); i++)
572       Cout << ", " << eval_vars[i];
573     Cout << ")\n";
574 #endif
575   }
576 }
577 
578 
579 void NonDGlobalInterval::
EIF_objective_max(const Variables & sub_model_vars,const Variables & recast_vars,const Response & sub_model_response,Response & recast_response)580 EIF_objective_max(const Variables& sub_model_vars, const Variables& recast_vars,
581 		  const Response& sub_model_response, Response& recast_response)
582 {
583   // Means are passed in, but must retrieve variance from the GP
584   const RealVector& means = sub_model_response.function_values();
585   const RealVector& variances
586     = nondGIInstance->fHatModel.approximation_variances(recast_vars);
587 
588   const ShortArray& recast_asv = recast_response.active_set_request_vector();
589   if (recast_asv[0] & 1) {
590     // max(EI) for both interval bounds --> return -EI to the minimizer
591     Real mean = -means[nondGIInstance->respFnCntr],
592          stdv = sqrt(variances[nondGIInstance->respFnCntr]);
593     const Real& approx_fn_star = nondGIInstance->approxFnStar;
594     // Calculate expected improvement: +/-approx_fn_star used for EIF_min/max
595     Real Phi_snv, phi_snv, snv = -approx_fn_star - mean;
596     if(std::fabs(snv)>=std::fabs(stdv)*50.0) {
597       phi_snv = 0.;
598       Phi_snv = (snv > 0.) ? 1. : 0.;
599     }
600     else{
601       snv /= stdv; // now snv is the standard normal variate
602       Phi_snv = Pecos::NormalRandomVariable::std_cdf(snv);
603       phi_snv = Pecos::NormalRandomVariable::std_pdf(snv);
604     }
605     Real ei = (-approx_fn_star - mean)*Phi_snv + stdv*phi_snv;
606     // both bounds maximize EIF -> minimize -EIF
607     recast_response.function_value(-ei, 0);
608 #ifdef DEBUG
609     Cout << "(Evaluation,ApproxFnStar,Phi,phi,vars): (" << mean << ","
610 	 << approx_fn_star << "," << Phi_snv << "," << phi_snv;
611     const RealVector& eval_vars = recast_vars.continuous_variables();
612     for (size_t i=0; i < eval_vars.length(); i++)
613       Cout << ", " << eval_vars[i];
614     Cout << ")\n";
615 #endif
616   }
617 }
618 
619 } // namespace Dakota
620