1 /* _______________________________________________________________________
2
3 DAKOTA: Design Analysis Kit for Optimization and Terascale Applications
4 Copyright 2014-2020 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
5 This software is distributed under the GNU Lesser General Public License.
6 For more information, see the README file in the top Dakota directory.
7 _______________________________________________________________________ */
8
9 //- Class: DiscrepancyCorrection
10 //- Description: Implementation code for the DiscrepancyCorrection class
11 //- Owner: Mike Eldred
12 //- Checked by:
13
14 #include "dakota_system_defs.hpp"
15 #include "dakota_data_io.hpp"
16 #include "DiscrepancyCorrection.hpp"
17 #include "ParamResponsePair.hpp"
18 #include "PRPMultiIndex.hpp"
19 #include "SurrogateData.hpp"
20 #include "DataMethod.hpp"
21
22 static const char rcsId[]="@(#) $Id: DiscrepancyCorrection.cpp 7024 2010-10-16 01:24:42Z mseldre $";
23
24 //#define DEBUG
25
26
27 namespace Dakota {
28
29 extern PRPCache data_pairs; // global container
30
31 void DiscrepancyCorrection::
initialize(Model & surr_model,const IntSet & surr_fn_indices,short corr_type,short corr_order)32 initialize(Model& surr_model, const IntSet& surr_fn_indices, short corr_type,
33 short corr_order)
34 {
35 surrModel = surr_model; // shallow copy
36 surrogateFnIndices = surr_fn_indices;
37 numFns = surr_model.qoi(); numVars = surr_model.cv();
38 correctionType = corr_type; correctionOrder = corr_order;
39
40 initialize_corrections();
41
42 initializedFlag = true;
43 }
44
45
46 void DiscrepancyCorrection::
initialize(const IntSet & surr_fn_indices,size_t num_fns,size_t num_vars,short corr_type,short corr_order)47 initialize(const IntSet& surr_fn_indices, size_t num_fns, size_t num_vars,
48 short corr_type, short corr_order)
49 {
50 surrogateFnIndices = surr_fn_indices;
51 numFns = num_fns; numVars = num_vars;
52 correctionType = corr_type; correctionOrder = corr_order;
53
54 initialize_corrections();
55
56 initializedFlag = true;
57
58 // in this case, surrModel is null and must be protected
59 }
60
61
62 void DiscrepancyCorrection::
initialize(const IntSet & surr_fn_indices,size_t num_fns,size_t num_vars,short corr_type,short corr_order,const String & approx_type)63 initialize(const IntSet& surr_fn_indices, size_t num_fns, size_t num_vars,
64 short corr_type, short corr_order, const String& approx_type)
65 {
66 surrogateFnIndices = surr_fn_indices;
67 numFns = num_fns; numVars = num_vars;
68 correctionType = corr_type; correctionOrder = corr_order;
69 approxType = approx_type;
70
71 initialize_corrections();
72
73 initializedFlag = true;
74
75 // in this case, surrModel is null and must be protected
76 }
77
78
initialize_corrections()79 void DiscrepancyCorrection::initialize_corrections()
80 {
81 // initialize correction data
82 correctionComputed = badScalingFlag = false;
83 if (correctionType == ADDITIVE_CORRECTION)
84 { computeAdditive = true; computeMultiplicative = false; }
85 else if (correctionType == MULTIPLICATIVE_CORRECTION)
86 { computeAdditive = false; computeMultiplicative = true; }
87 else if (correctionType == COMBINED_CORRECTION) {
88 computeAdditive = computeMultiplicative = true;
89 combineFactors.resize(numFns);
90 combineFactors = 1.; // used on 1st cycle prior to existence of prev pt.
91 }
92 UShortArray approx_order(numVars, correctionOrder);
93 switch (correctionOrder) {
94 case 2: dataOrder = 7; break;
95 case 1: dataOrder = 3; break;
96 case 0: default: dataOrder = 1; break;
97 }
98
99 ISIter it;
100 if (approxType.empty())
101 sharedData = SharedApproxData("local_taylor", approx_order, numVars,
102 dataOrder, NORMAL_OUTPUT);
103 else {
104 dataOrder = 1; // for GP and poly, do not need grad or hessian info
105 sharedData = SharedApproxData(approxType, approx_order, numVars,
106 dataOrder, NORMAL_OUTPUT);
107 }
108 if (computeAdditive) {
109 addCorrections.resize(numFns);
110 for (it=surrogateFnIndices.begin(); it!=surrogateFnIndices.end(); ++it)
111 addCorrections[*it] = Approximation(sharedData);
112 }
113 if (computeMultiplicative) {
114 multCorrections.resize(numFns);
115 for (it=surrogateFnIndices.begin(); it!=surrogateFnIndices.end(); ++it)
116 multCorrections[*it] = Approximation(sharedData);
117 }
118 correctionPrevCenterPt = surrModel.current_variables().copy();
119 }
120
121
122 /** Compute an additive or multiplicative correction that corrects the
123 approx_response to have 0th-order consistency (matches values),
124 1st-order consistency (matches values and gradients), or 2nd-order
125 consistency (matches values, gradients, and Hessians) with the
126 truth_response at a single point (e.g., the center of a trust
127 region). The 0th-order, 1st-order, and 2nd-order corrections use
128 scalar values, linear scaling functions, and quadratic scaling
129 functions, respectively, for each response function. */
130 void DiscrepancyCorrection::
compute(const Variables & vars,const Response & truth_response,const Response & approx_response,bool quiet_flag)131 compute(const Variables& vars, const Response& truth_response,
132 const Response& approx_response, bool quiet_flag)
133 {
134 // The incoming approx_response is assumed to be uncorrected (i.e.,
135 // correction has not been applied to it previously). In this case,
136 // it is not necessary to back out a previous correction, and the
137 // computation of the new correction is straightforward.
138
139 // update previous center data arrays for combined corrections
140 // TO DO: augment approxFnsPrevCenter logic for data fit surrogates. May
141 // require additional fn evaluation of previous pt on current surrogate.
142 // This could combine with DB lookups within apply_multiplicative()
143 // (approx re-evaluated if not found in DB search).
144 int index; size_t j, k; ISIter it;
145 if (correctionType == COMBINED_CORRECTION && correctionComputed) {
146 approxFnsPrevCenter = approxFnsCenter;
147 truthFnsPrevCenter = truthFnsCenter;
148 it = surrogateFnIndices.begin();
149 const Pecos::SurrogateDataVars& anchor_sdv
150 = (computeAdditive || badScalingFlag) ?
151 addCorrections[*it].surrogate_data().anchor_variables() :
152 multCorrections[*it].surrogate_data().anchor_variables();
153 correctionPrevCenterPt.continuous_variables(
154 anchor_sdv.continuous_variables());
155 correctionPrevCenterPt.discrete_int_variables(
156 anchor_sdv.discrete_int_variables());
157 correctionPrevCenterPt.discrete_real_variables(
158 anchor_sdv.discrete_real_variables());
159 }
160 // update current center data arrays
161 const RealVector& truth_fns = truth_response.function_values();
162 const RealVector& approx_fns = approx_response.function_values();
163 bool fall_back
164 = (computeMultiplicative && correctionOrder >= 1 && surrModel.is_null());
165 if (correctionType == COMBINED_CORRECTION) truthFnsCenter = truth_fns;
166 if (correctionType == COMBINED_CORRECTION || fall_back)
167 approxFnsCenter = approx_fns;
168 if (fall_back) approxGradsCenter = approx_response.function_gradients();
169
170 // detect numerical issues with multiplicative scaling
171 badScalingFlag = (computeMultiplicative) ?
172 discrepCalc.check_multiplicative(truth_fns, approx_fns, correctionOrder) :
173 false;
174 if (badScalingFlag) {
175 Cerr << "\nWarning: Multiplicative correction temporarily deactivated "
176 << "due to functions near zero.\n Additive correction will "
177 << "be used.\n";
178 if (addCorrections.empty()) {
179 addCorrections.resize(numFns);
180 for (it=surrogateFnIndices.begin(); it!=surrogateFnIndices.end(); ++it)
181 addCorrections[*it] = Approximation(sharedData);
182 }
183 }
184
185 Pecos::SurrogateDataVars sdv(vars.continuous_variables(),
186 vars.discrete_int_variables(), vars.discrete_real_variables(),
187 Pecos::DEEP_COPY);
188 if (computeAdditive || badScalingFlag) {
189 for (it=surrogateFnIndices.begin(); it!=surrogateFnIndices.end(); ++it) {
190 index = *it;
191 Pecos::SurrogateDataResp sdr(dataOrder, numVars);
192 if (dataOrder & 1)
193 discrepCalc.compute_additive(truth_response.function_value(index),
194 approx_response.function_value(index), sdr.response_function_view());
195 if (dataOrder & 2) {
196 RealVector sdr_grad(sdr.response_gradient_view()); // compiler reqmt
197 discrepCalc.compute_additive(
198 truth_response.function_gradient_view(index),
199 approx_response.function_gradient_view(index), sdr_grad);
200 }
201 if (dataOrder & 4) {
202 RealSymMatrix sdr_hess(sdr.response_hessian_view()); // compiler reqmt
203 discrepCalc.compute_additive(truth_response.function_hessian(index),
204 approx_response.function_hessian(index), sdr_hess);
205 }
206 if (!quiet_flag)
207 Cout << "\nAdditive correction computed:\n" << sdr;
208
209 // shallow copy of vars/resp into the active SurrogateData instance
210 if (approxType.empty()) { // update anchor data
211 addCorrections[index].add(sdv, true, false); // anchor, shallow
212 addCorrections[index].add(sdr, true, false); // anchor, shallow
213 }
214 else {
215 addCorrections[index].add(sdv, false, false); // not anchor, shallow
216 addCorrections[index].add(sdr, false, false); // not anchor, shallow
217 }
218 }
219 }
220
221 if (computeMultiplicative && !badScalingFlag) {
222 for (it=surrogateFnIndices.begin(); it!=surrogateFnIndices.end(); ++it) {
223 index = *it;
224 Pecos::SurrogateDataResp sdr(dataOrder, numVars);
225 if (dataOrder & 1)
226 discrepCalc.compute_multiplicative(truth_response.function_value(index),
227 approx_response.function_value(index), sdr.response_function_view());
228 if (dataOrder & 2) {
229 RealVector sdr_grad(sdr.response_gradient_view()); // compiler reqmt
230 discrepCalc.compute_multiplicative(truth_response.function_value(index),
231 truth_response.function_gradient_view(index),
232 approx_response.function_value(index),
233 approx_response.function_gradient_view(index), sdr_grad);
234 }
235 if (dataOrder & 4) {
236 RealSymMatrix sdr_hess(sdr.response_hessian_view()); // compiler reqmt
237 discrepCalc.compute_multiplicative(truth_response.function_value(index),
238 truth_response.function_gradient_view(index),
239 truth_response.function_hessian(index),
240 approx_response.function_value(index),
241 approx_response.function_gradient_view(index),
242 approx_response.function_hessian(index), sdr_hess);
243 }
244 if (!quiet_flag)
245 Cout << "\nMultiplicative correction computed:\n" << sdr;
246
247 // update anchor data; shallow cp of vars/resp into active SurrogateData
248 multCorrections[index].add(sdv, true, false); // anchor, shallow
249 multCorrections[index].add(sdr, true, false); // anchor, shallow
250 }
251 }
252
253 // Compute combination factors once for each new correction. combineFactors =
254 // [f_hi(x_pp) - f_hi_beta(x_pp)]/[f_hi_alpha(x_pp) - f_hi_beta(x_pp)]. This
255 // ratio goes -> 1 (use additive alone) if f_hi_alpha(x_pp) -> f_hi(x_pp) and
256 // it goes -> 0 (use multiplicative alone) if f_hi_beta(x_pp) -> f_hi(x_pp).
257 if (correctionType == COMBINED_CORRECTION && correctionComputed &&
258 !badScalingFlag) {
259 RealVector alpha_corr_fns(approxFnsPrevCenter),
260 beta_corr_fns(approxFnsPrevCenter);
261 apply_additive(correctionPrevCenterPt, alpha_corr_fns);
262 apply_multiplicative(correctionPrevCenterPt, beta_corr_fns);
263 for (it=surrogateFnIndices.begin(); it!=surrogateFnIndices.end(); ++it) {
264 index = *it;
265 Real numer = truthFnsPrevCenter[index] - beta_corr_fns[index];
266 Real denom = alpha_corr_fns[index] - beta_corr_fns[index];
267 combineFactors[index] = (std::fabs(denom) > Pecos::SMALL_NUMBER) ?
268 numer/denom : 1.;
269 #ifdef DEBUG
270 Cout << "truth prev = " << truthFnsPrevCenter[index]
271 << " additive prev = " << alpha_corr_fns[index]
272 << " multiplicative prev = " << beta_corr_fns[index]
273 << "\nnumer = " << numer << " denom = " << denom << '\n';
274 #endif
275 }
276 if (!quiet_flag)
277 Cout << "\nCombined correction computed: combination factors =\n"
278 << combineFactors << '\n';
279
280 #ifdef DEBUG
281 Cout << "Testing final match at previous point\n";
282 Response approx_copy = approx_response.copy();
283 ActiveSet fns_set = approx_response.active_set(); // copy
284 fns_set.request_values(1); // correct fn values only
285 approx_copy.active_set(fns_set);
286 approx_copy.function_values(approxFnsPrevCenter);
287 apply(correctionPrevCenterPt, approx_copy);
288 #endif
289 }
290
291 if (computeAdditive || computeMultiplicative)
292 correctionComputed = true;
293 }
294
295
296 void DiscrepancyCorrection::
compute(const Response & truth_response,const Response & approx_response,Response & discrep_response,bool quiet_flag)297 compute(//const Variables& vars,
298 const Response& truth_response, const Response& approx_response,
299 Response& discrep_response,
300 //Response& add_discrep_response, Response& mult_discrep_response,
301 bool quiet_flag)
302 {
303 // The incoming approx_response is assumed to be uncorrected (i.e.,
304 // correction has not been applied to it previously). In this case,
305 // it is not necessary to back out a previous correction, and the
306 // computation of the new correction is straightforward.
307
308 // for asynchronous operations involving IntResponseMaps, the incoming
309 // discrep_response may be empty
310 if (discrep_response.is_null()) {
311 if (truth_response.active_set() != approx_response.active_set()) {
312 Cerr << "Error: active set inconsistency between truth and approx in "
313 << "DiscrepancyCorrection::compute()." << std::endl;
314 abort_handler(-1);
315 }
316 discrep_response = truth_response.copy();
317 }
318
319 // update previous center data arrays for combined corrections
320 // TO DO: augment approxFnsPrevCenter logic for data fit surrogates. May
321 // require additional fn evaluation of previous pt on current surrogate.
322 // This could combine with DB lookups within apply_multiplicative()
323 // (approx re-evaluated if not found in DB search).
324 int index; size_t j, k; ISIter it;
325 //if (correctionType == COMBINED_CORRECTION && correctionComputed) {
326 // correctionPrevCenterPt = correctionCenterPt;
327 // approxFnsPrevCenter = approxFnsCenter;
328 // truthFnsPrevCenter = truthFnsCenter;
329 //}
330 // update current center data arrays (deep copies for history)
331 const RealVector& truth_fns = truth_response.function_values();
332 const RealVector& approx_fns = approx_response.function_values();
333 bool fall_back
334 = (computeMultiplicative && correctionOrder >= 1 && surrModel.is_null());
335 //if (correctionType == COMBINED_CORRECTION)
336 // { copy_data(c_vars, correctionCenterPt); truthFnsCenter = truth_fns; }
337 if (/*correctionType == COMBINED_CORRECTION ||*/ fall_back)
338 approxFnsCenter = approx_fns;
339 if (fall_back) approxGradsCenter = approx_response.function_gradients();
340
341 // detect numerical issues with multiplicative scaling
342 badScalingFlag = (computeMultiplicative) ?
343 discrepCalc.check_multiplicative(truth_fns, approx_fns, correctionOrder) :
344 false;
345 if (badScalingFlag)
346 Cerr << "\nWarning: Multiplicative correction temporarily deactivated "
347 << "due to functions near zero.\n Additive correction will "
348 << "be used.\n";
349
350 // compute additive/multiplicative discrepancy and store in discrep_response
351 const ShortArray& discrep_asv = discrep_response.active_set_request_vector();
352 for (it=surrogateFnIndices.begin(); it!=surrogateFnIndices.end(); ++it) {
353 index = *it;
354 if (computeAdditive || badScalingFlag) {
355 if ( (dataOrder & 1) && (discrep_asv[index] & 1) )
356 discrepCalc.compute_additive(truth_response.function_value(index),
357 approx_response.function_value(index),
358 discrep_response.function_value_view(index));
359 if ( (dataOrder & 2) && (discrep_asv[index] & 2) ) {
360 RealVector discrep_grad(discrep_response.function_gradient_view(index));
361 discrepCalc.compute_additive(
362 truth_response.function_gradient_view(index),
363 approx_response.function_gradient_view(index), discrep_grad);
364 }
365 if ( (dataOrder & 4) && (discrep_asv[index] & 4) ) {
366 RealSymMatrix
367 discrep_hess(discrep_response.function_hessian_view(index));
368 discrepCalc.compute_additive(truth_response.function_hessian(index),
369 approx_response.function_hessian(index), discrep_hess);
370 }
371 }
372 else if (correctionType == COMBINED_CORRECTION) {
373 // for a single discrep_response, only a single correction type can be
374 // applied (corrections can be combined when applying but must be stored
375 // separately when computing)
376 Cerr << "Error: combined corrections not currently supported for compute"
377 << "() applied to a single response discrepancy." << std::endl;
378 abort_handler(-1);
379 //compute_additive(truth_response, approx_response, index,
380 // add_discrep_fn, add_discrep_grad, add_discrep_hess);
381 //compute_multiplicative(truth_response, approx_response, index,
382 // mult_discrep_fn, mult_discrep_grad,
383 // mult_discrep_hess);
384 }
385 else if (computeMultiplicative) {
386 if ( (dataOrder & 1) && (discrep_asv[index] & 1) )
387 discrepCalc.compute_multiplicative(truth_response.function_value(index),
388 approx_response.function_value(index),
389 discrep_response.function_value_view(index));
390 if ( (dataOrder & 2) && (discrep_asv[index] & 2) ) {
391 RealVector discrep_grad(discrep_response.function_gradient_view(index));
392 discrepCalc.compute_multiplicative(truth_response.function_value(index),
393 truth_response.function_gradient_view(index),
394 approx_response.function_value(index),
395 approx_response.function_gradient_view(index), discrep_grad);
396 }
397 if ( (dataOrder & 4) && (discrep_asv[index] & 4) ) {
398 RealSymMatrix
399 discrep_hess(discrep_response.function_hessian_view(index));
400 discrepCalc.compute_multiplicative(truth_response.function_value(index),
401 truth_response.function_gradient_view(index),
402 truth_response.function_hessian(index),
403 approx_response.function_value(index),
404 approx_response.function_gradient_view(index),
405 approx_response.function_hessian(index), discrep_hess);
406 }
407 }
408 }
409
410 if (!quiet_flag) {
411 if (computeAdditive || badScalingFlag)
412 Cout << "\nAdditive correction computed:\n" << discrep_response;
413 else if (computeMultiplicative)
414 Cout << "\nMultiplicative correction computed:\n" << discrep_response;
415 }
416
417 //if (correctionType == COMBINED_CORRECTION && correctionComputed &&
418 // !badScalingFlag)
419 // compute_combine_factors(add_discrep_response, mult_discrep_response);
420 }
421
422
423 void DiscrepancyCorrection::
compute(const VariablesArray & vars_array,const ResponseArray & truth_response_array,const ResponseArray & approx_response_array,bool quiet_flag)424 compute(const VariablesArray& vars_array, const ResponseArray&
425 truth_response_array, const ResponseArray& approx_response_array,
426 bool quiet_flag)
427 {
428 // The incoming approx_response is assumed to be uncorrected (i.e.,
429 // correction has not been applied to it previously). In this case,
430 // it is not necessary to back out a previous correction, and the
431 // computation of the new correction is straightforward.
432
433 int i, index; ISIter it;
434
435 for (i=0; i < vars_array.size(); i++)
436 compute(vars_array[i], truth_response_array[i], approx_response_array[i],
437 quiet_flag);
438
439 if (!approxType.empty()) {
440 for (it=surrogateFnIndices.begin(); it!=surrogateFnIndices.end(); ++it) {
441 index = *it;
442 addCorrections[index].build();
443 //const String GPstring = "modDiscrep";
444 //const String GPPrefix = "GP";
445 //addCorrections[index].export_model(GPstring, GPPrefix, ALGEBRAIC_FILE);
446 }
447 }
448 }
449
450
451 /*
452 void DiscrepancyCorrection::
453 compute_additive(const Response& truth_response,
454 const Response& approx_response, int index,
455 Real& discrep_fn, RealVector& discrep_grad,
456 RealSymMatrix& discrep_hess)
457 {
458 // -----------------------------
459 // Additive 0th order correction
460 // -----------------------------
461 if (dataOrder & 1)
462 discrepCalc.compute_additive(truth_response.function_value(index)
463 approx_response.function_value(index),
464 discrep_fn);
465 // -----------------------------
466 // Additive 1st order correction
467 // -----------------------------
468 if (dataOrder & 2)
469 discrepCalc.compute_additive(truth_response.function_gradient_view(index);
470 approx_response.function_gradient_view(index),
471 discrep_grad);
472 // -----------------------------
473 // Additive 2nd order correction
474 // -----------------------------
475 if (dataOrder & 4)
476 discrepCalc.compute_additive(truth_response.function_hessian(index);
477 approx_response.function_hessian(index),
478 discrep_hess);
479 }
480
481
482 void DiscrepancyCorrection::
483 compute_multiplicative(const Response& truth_response,
484 const Response& approx_response, int index,
485 Real& discrep_fn, RealVector& discrep_grad,
486 RealSymMatrix& discrep_hess)
487 {
488 // -----------------------------------
489 // Multiplicative 2nd order correction
490 // -----------------------------------
491 if (dataOrder & 4)
492 discrepCalc.compute_multiplicative(truth_response.function_value(index),
493 truth_response.function_gradient_view(index),
494 truth_response.function_hessian(index),
495 approx_response.function_value(index),
496 approx_response.function_gradient_view(index),
497 approx_response.function_hessian(index),
498 discrep_fn, discrep_grad, discrep_hess);
499 // -----------------------------------
500 // Multiplicative 1st order correction
501 // -----------------------------------
502 else if (dataOrder & 2)
503 discrepCalc.compute_multiplicative(truth_response.function_value(index),
504 truth_response.function_gradient_view(index),
505 approx_response.function_value(index),
506 approx_response.function_gradient_view(index), discrep_fn, discrep_grad);
507 // -----------------------------------
508 // Multiplicative 0th order correction
509 // -----------------------------------
510 else if (dataOrder & 1)
511 discrepCalc.compute_multiplicative(truth_response.function_value(index)
512 approx_response.function_value(index), discrep_fn);
513 }
514
515
516 void DiscrepancyCorrection::
517 compute_additive(const Response& truth_response,
518 const Response& approx_response, int index,
519 Real& discrep_fn, RealVector& discrep_grad,
520 RealSymMatrix& discrep_hess)
521 {
522 // -----------------------------
523 // Additive 0th order correction
524 // -----------------------------
525 if (dataOrder & 1)
526 discrep_fn = truth_response.function_value(index)
527 - approx_response.function_value(index);
528 if (approxType.empty()) {
529 // -----------------------------
530 // Additive 1st order correction
531 // -----------------------------
532 if ( (dataOrder & 2) && !discrep_grad.empty() ) {
533 const Real* truth_grad = truth_response.function_gradient(index);
534 const Real* approx_grad = approx_response.function_gradient(index);
535 for (size_t j=0; j<numVars; ++j)
536 discrep_grad[j] = truth_grad[j] - approx_grad[j];
537 }
538 // -----------------------------
539 // Additive 2nd order correction
540 // -----------------------------
541 if ( (dataOrder & 4) && !discrep_hess.empty() ) {
542 const RealSymMatrix& truth_hess = truth_response.function_hessian(index);
543 const RealSymMatrix& approx_hess
544 = approx_response.function_hessian(index);
545 for (size_t j=0; j<numVars; ++j)
546 for (size_t k=0; k<=j; ++k) // lower half
547 discrep_hess(j,k) = truth_hess(j,k) - approx_hess(j,k);
548 }
549 }
550 }
551
552
553 void DiscrepancyCorrection::
554 compute_multiplicative(const Response& truth_response,
555 const Response& approx_response, int index,
556 Real& discrep_fn, RealVector& discrep_grad,
557 RealSymMatrix& discrep_hess)
558 {
559 // -----------------------------------
560 // Multiplicative 0th order correction
561 // -----------------------------------
562 const Real& truth_fn = truth_response.function_value(index);
563 const Real& approx_fn = approx_response.function_value(index);
564 Real ratio = truth_fn / approx_fn;
565 if (dataOrder & 1)
566 discrep_fn = ratio;
567 // -----------------------------------
568 // Multiplicative 1st order correction
569 // -----------------------------------
570 // The beta-correction method is based on the work of Chang and Haftka,
571 // and Alexandrov. It is a multiplicative correction like the "scaled"
572 // correction method, but it uses gradient information to achieve
573 // 1st-order consistency (matches the high-fidelity function values and
574 // the high-fidelity gradients at the center of the approximation region).
575 if ( (dataOrder & 2) && !discrep_grad.empty() ) {
576 const Real* truth_grad = truth_response.function_gradient(index);
577 const Real* approx_grad = approx_response.function_gradient(index);
578 for (size_t j=0; j<numVars; ++j)
579 discrep_grad[j] = (truth_grad[j] - approx_grad[j] * ratio) / approx_fn;
580 }
581 // -----------------------------------
582 // Multiplicative 2nd order correction
583 // -----------------------------------
584 if ( (dataOrder & 4) && !discrep_hess.empty() ) {
585 const Real* truth_grad = truth_response.function_gradient(index);
586 const Real* approx_grad = approx_response.function_gradient(index);
587 const RealSymMatrix& truth_hess = truth_response.function_hessian(index);
588 const RealSymMatrix& approx_hess = approx_response.function_hessian(index);
589 // consider use of Teuchos assign and operator-=
590 Real f_lo_2 = approx_fn * approx_fn;
591 for (size_t j=0; j<numVars; ++j)
592 for (size_t k=0; k<=j; ++k) // lower half
593 discrep_hess(j,k) = ( truth_hess(j,k) * approx_fn - truth_fn *
594 approx_hess(j,k) + 2. * ratio * approx_grad[j] * approx_grad[k] -
595 truth_grad[j] * approx_grad[k] - approx_grad[j] * truth_grad[k] )
596 / f_lo_2;
597 }
598 }
599
600
601 bool DiscrepancyCorrection::
602 check_multiplicative(const RealVector& truth_fns, const RealVector& approx_fns)
603 {
604 // Multiplicative will fail if response functions are near zero.
605 // 0th order: a truth_val == 0 causes a zero scaling which will cause
606 // optimization failure; an approx_val == 0 will cause a
607 // division by zero FPE.
608 // 1st/2nd order: a truth_val == 0 is OK (so long as the total scaling
609 // function != 0); an approx_val == 0 will cause a division
610 // by zero FPE.
611 // In either case, automatically transition to additive correction. Current
612 // logic transitions back to multiplicative as soon as the response fns are
613 // no longer near zero.
614 bool bad_scaling = false; int index; ISIter it;
615 for (it=surrogateFnIndices.begin(); it!=surrogateFnIndices.end(); ++it) {
616 index = *it;
617 if ( std::fabs(approx_fns[index]) < Pecos::SMALL_NUMBER ||
618 ( correctionOrder == 0 &&
619 std::fabs(truth_fns[index]) < Pecos::SMALL_NUMBER ) )
620 { bad_scaling = true; break; }
621 }
622 if (bad_scaling)
623 Cerr << "\nWarning: Multiplicative correction temporarily deactivated "
624 << "due to functions near zero.\n Additive correction will "
625 << "be used.\n";
626 return bad_scaling;
627 }
628 */
629
630
631 void DiscrepancyCorrection::
apply(const Variables & vars,Response & approx_response,bool quiet_flag)632 apply(const Variables& vars, Response& approx_response, bool quiet_flag)
633 {
634 if (!correctionType || !correctionComputed)
635 return;
636
637 // update approx_response with the alpha/beta/combined corrected data
638 if (correctionType == ADDITIVE_CORRECTION || badScalingFlag) // use alpha
639 apply_additive(vars, approx_response);
640 else if (correctionType == MULTIPLICATIVE_CORRECTION) // use beta
641 apply_multiplicative(vars, approx_response);
642 else if (correctionType == COMBINED_CORRECTION) { // use both alpha and beta
643
644 // compute {add,mult}_response contributions to combined correction
645 Response add_response = approx_response.copy(),
646 mult_response = approx_response.copy();
647 apply_additive(vars, add_response);
648 apply_multiplicative(vars, mult_response);
649
650 // compute convex combination of add_response and mult_response
651 ISIter it; int index; size_t j, k;
652 const ShortArray& asv = approx_response.active_set_request_vector();
653 for (it=surrogateFnIndices.begin(); it!=surrogateFnIndices.end(); ++it) {
654 index = *it;
655 Real cf = combineFactors[index], ccf = 1. - cf;
656 if (asv[index] & 1) {
657 Real corrected_fn = cf * add_response.function_value(index)
658 + ccf * mult_response.function_value(index);
659 approx_response.function_value(corrected_fn, index);
660 }
661 if (asv[index] & 2) {
662 RealVector corrected_grad
663 = approx_response.function_gradient_view(index);
664 const Real* add_grad = add_response.function_gradient(index);
665 const Real* mult_grad = mult_response.function_gradient(index);
666 for (j=0; j<numVars; j++)
667 corrected_grad[j] = cf * add_grad[j] + ccf * mult_grad[j];
668 }
669 if (asv[index] & 4) {
670 RealSymMatrix corrected_hess
671 = approx_response.function_hessian_view(index);
672 const RealSymMatrix& add_hess = add_response.function_hessian(index);
673 const RealSymMatrix& mult_hess = mult_response.function_hessian(index);
674 for (j=0; j<numVars; ++j)
675 for (k=0; k<=j; ++k)
676 corrected_hess(j,k) = cf * add_hess(j,k) + ccf * mult_hess(j,k);
677 }
678 }
679 }
680
681 if (!quiet_flag)
682 Cout << "\nCorrection applied: corrected response =\n" << approx_response;
683 }
684
685
686 void DiscrepancyCorrection::
apply_additive(const Variables & vars,Response & approx_response)687 apply_additive(const Variables& vars, Response& approx_response)
688 {
689 size_t index; ISIter it;
690 const ShortArray& asv = approx_response.active_set_request_vector();
691 for (it=surrogateFnIndices.begin(); it!=surrogateFnIndices.end(); ++it) {
692 index = *it;
693 Approximation& add_corr = addCorrections[index];
694 if (asv[index] & 1)
695 approx_response.function_value(approx_response.function_value(index) +
696 add_corr.value(vars), index);
697 if (correctionOrder >= 1 && asv[index] & 2) {
698 // update view (no reassignment):
699 RealVector approx_grad = approx_response.function_gradient_view(index);
700 approx_grad += add_corr.gradient(vars);
701 }
702 if (correctionOrder == 2 && asv[index] & 4) {
703 // update view (no reassignment):
704 RealSymMatrix approx_hess = approx_response.function_hessian_view(index);
705 approx_hess += add_corr.hessian(vars);
706 }
707 }
708 }
709
710
711 void DiscrepancyCorrection::
apply_multiplicative(const Variables & vars,Response & approx_response)712 apply_multiplicative(const Variables& vars, Response& approx_response)
713 {
714 // Determine needs for retrieving uncorrected data due to cases where
715 // the data required to apply the correction is different from the
716 // active data being corrected.
717 bool fn_db_search = false, grad_db_search = false;
718 const ShortArray& asv = approx_response.active_set_request_vector();
719 ShortArray fn_db_asv, grad_db_asv; ISIter it; size_t j, k, index;
720 for (it=surrogateFnIndices.begin(); it!=surrogateFnIndices.end(); ++it) {
721 index = *it;
722 if ( !(asv[index] & 1) && ( ((asv[index] & 2) && correctionOrder >= 1) ||
723 ((asv[index] & 4) && correctionOrder == 2) ) ) {
724 if (fn_db_asv.empty()) fn_db_asv.assign(numFns, 0);
725 fn_db_asv[index] = 1; fn_db_search = true;
726 }
727 if ( !(asv[index] & 2) && (asv[index] & 4) && correctionOrder >= 1) {
728 if (grad_db_asv.empty()) grad_db_asv.assign(numFns, 0);
729 grad_db_asv[index] = 2; grad_db_search = true;
730 }
731 }
732 // Retrieve any uncorrected fn values/gradients required for use in gradient
733 // and Hessian corrections. They are not immediately available in cases where
734 // the correction is applied only to the fn gradient (i.e., when the current
735 // asv contains 2's due to DOT/CONMIN/OPT++ requesting gradients separately)
736 // or only to the fn Hessian (i.e., when the current asv contains 4's due to
737 // OPT++ requesting Hessians separately). If surrModel is initialized, we
738 // lookup the data in data_pairs and re-evaluate if not found. If surrModel
739 // is not initialized, we fall back to using uncorrected data from the center
740 // of the current trust region (TR). This still satisifies the required
741 // consistency at the TR center, but is less accurate over the rest of the TR.
742 RealVector uncorr_fns; RealMatrix uncorr_grads; RealVector empty_rv;
743 if (fn_db_search) {
744 uncorr_fns.sizeUninitialized(numFns);
745 if (surrModel.is_null()) { // fallback position
746 Cerr << "Warning: original function values not available at the current "
747 << "point.\n Multiplicative correction falling back to "
748 << "function values from the correction point." << std::endl;
749 for (size_t i=0; i<numFns; ++i)
750 if (fn_db_asv[i])
751 uncorr_fns[i] = approxFnsCenter[i];
752 }
753 else {
754 const Response& db_resp = search_db(vars, fn_db_asv);
755 for (size_t i=0; i<numFns; ++i)
756 if (fn_db_asv[i])
757 uncorr_fns[i] = db_resp.function_value(i);
758 }
759 }
760 if (grad_db_search) {
761 uncorr_grads.shapeUninitialized(numVars, numFns);
762 if (surrModel.is_null()) { // fallback position
763 Cerr << "Warning: original function gradients not available at the "
764 << "current point.\n Multiplicative correction falling back "
765 << "to function gradients from the correction point." << std::endl;
766 for (int i=0; i<numFns; ++i)
767 if (grad_db_asv[i])
768 Teuchos::setCol(Teuchos::getCol(Teuchos::View, approxGradsCenter, i),
769 i, uncorr_grads);
770 }
771 else {
772 const Response& db_resp = search_db(vars, grad_db_asv);
773 for (int i=0; i<numFns; ++i)
774 if (grad_db_asv[i])
775 Teuchos::setCol(db_resp.function_gradient_view(i), i, uncorr_grads);
776 }
777 }
778
779 for (it=surrogateFnIndices.begin(); it!=surrogateFnIndices.end(); ++it) {
780 index = *it;
781 Approximation& mult_corr = multCorrections[index];
782 Real fn_corr = mult_corr.value(vars);
783 const RealVector& grad_corr = (correctionOrder >= 1 && (asv[index] & 6)) ?
784 mult_corr.gradient(vars) : empty_rv;
785 // apply corrections in descending derivative order to avoid
786 // disturbing original approx fn/grad values
787 if (asv[index] & 4) {
788 // update view (no reassignment):
789 RealSymMatrix approx_hess = approx_response.function_hessian_view(index);
790 const Real* approx_grad = (grad_db_search) ? uncorr_grads[index] :
791 approx_response.function_gradient(index);
792 switch (correctionOrder) {
793 case 2: {
794 const RealSymMatrix& hess_corr = mult_corr.hessian(vars);
795 const Real& approx_fn = (fn_db_search) ? uncorr_fns[index] :
796 approx_response.function_value(index);
797 for (j=0; j<numVars; ++j)
798 for (k=0; k<=j; ++k)
799 approx_hess(j,k) = approx_hess(j,k) * fn_corr
800 + hess_corr(j,k) * approx_fn + grad_corr[j] * approx_grad[k]
801 + grad_corr[k] * approx_grad[j];
802 break;
803 }
804 case 1:
805 for (j=0; j<numVars; ++j)
806 for (k=0; k<=j; ++k)
807 approx_hess(j,k) = approx_hess(j,k) * fn_corr
808 + grad_corr[j] * approx_grad[k] + grad_corr[k] * approx_grad[j];
809 break;
810 case 0:
811 approx_hess *= fn_corr;
812 break;
813 }
814 }
815 if (asv[index] & 2) {
816 // update view (no reassignment):
817 RealVector approx_grad = approx_response.function_gradient_view(index);
818 const Real& approx_fn = (fn_db_search) ? uncorr_fns[index] :
819 approx_response.function_value(index);
820 approx_grad *= fn_corr; // all correction orders
821 if (correctionOrder >= 1)
822 for (j=0; j<numVars; ++j)
823 approx_grad[j] += grad_corr[j] * approx_fn;
824 }
825 if (asv[index] & 1)
826 approx_response.function_value(approx_response.function_value(index) *
827 fn_corr, index);
828 }
829 }
830
831 void DiscrepancyCorrection::
compute_variance(const VariablesArray & vars_array,RealMatrix & approx_variance,bool quiet_flag)832 compute_variance(const VariablesArray& vars_array, RealMatrix& approx_variance,
833 bool quiet_flag)
834 {
835 int index; ISIter it;
836 RealVector pred_var(vars_array.size());
837 //Cout << "Vars array size = " << vars_array.size();
838 for (it=surrogateFnIndices.begin(); it!=surrogateFnIndices.end(); ++it) {
839 index = *it;
840 for (int i=0; i < vars_array.size(); i++) {
841 //std::cout << "vars array = " << vars_array[i] << std::endl;
842 pred_var[i] = addCorrections[index].prediction_variance(vars_array[i]);
843 }
844 Teuchos::setCol(pred_var, index, approx_variance);
845 }
846
847 // KAM: turn this back on?
848 //if (!quiet_flag)
849 //Cout << "\nCorrection variances computed:\n" << approx_variance;
850 }
851
852 const Response& DiscrepancyCorrection::
search_db(const Variables & search_vars,const ShortArray & search_asv)853 search_db(const Variables& search_vars, const ShortArray& search_asv)
854 {
855 // Retrieve missing uncorrected approximate data for use in derivative
856 // multiplicative corrections. The correct approach is to retrieve the
857 // missing data for the current point in parameter space, and this approach
858 // is when data is either available directly as indicated by the asv, can be
859 // retrieved via a data_pairs search, or can be recomputed). A final fallback
860 // is to employ approx center data; this approach is used when surrModel has
861 // not been initialized (see apply_multiplicative()). Recomputation can occur
862 // either for ApproximationInterface data in DataFitSurrModels or low fidelity
863 // data in HierarchSurrModels that involves additional model recursions, since
864 // neither of these data sets are catalogued in data_pairs.
865
866 // query data_pairs to extract the response at the current pt
867 ActiveSet search_set = surrModel.current_response().active_set(); // copy
868 search_set.request_vector(search_asv);
869 PRPCacheHIter cache_it = lookup_by_val(data_pairs, surrModel.interface_id(),
870 search_vars, search_set);
871
872 if (cache_it == data_pairs.get<hashed>().end()) {
873 // perform approx fn eval to retrieve missing data
874 surrModel.active_variables(search_vars);
875 surrModel.evaluate(search_set);
876 return surrModel.current_response();
877 }
878 else
879 return cache_it->response();
880 }
881
882 } // namespace Dakota
883