1 /*  _______________________________________________________________________
2 
3     DAKOTA: Design Analysis Kit for Optimization and Terascale Applications
4     Copyright 2014-2020 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
5     This software is distributed under the GNU Lesser General Public License.
6     For more information, see the README file in the top Dakota directory.
7     _______________________________________________________________________ */
8 
9 //- Class:       NonDExpansion
10 //- Description: Implementation code for NonDExpansion class
11 //- Owner:       Mike Eldred
12 
13 #include "dakota_system_defs.hpp"
14 #include "dakota_data_io.hpp"
15 #include "NonDExpansion.hpp"
16 #include "NonDCubature.hpp"
17 #include "NonDQuadrature.hpp"
18 #include "NonDSparseGrid.hpp"
19 #include "NonDLHSSampling.hpp"
20 #include "NonDAdaptImpSampling.hpp"
21 #include "RecastModel.hpp"
22 #include "DataFitSurrModel.hpp"
23 #include "DakotaResponse.hpp"
24 #include "ProblemDescDB.hpp"
25 #include "SharedPecosApproxData.hpp"
26 #include "PecosApproximation.hpp"
27 #include "pecos_stat_util.hpp"
28 #include "SensAnalysisGlobal.hpp"
29 #include "DiscrepancyCorrection.hpp"
30 #include "dakota_tabular_io.hpp"
31 #include "ParallelLibrary.hpp"
32 #include "NatafTransformation.hpp"
33 
34 //#define DEBUG
35 //#define CONVERGENCE_DATA
36 //#define TEST_HESSIANS
37 
38 
39 namespace Dakota {
40 
NonDExpansion(ProblemDescDB & problem_db,Model & model)41 NonDExpansion::NonDExpansion(ProblemDescDB& problem_db, Model& model):
42   NonD(problem_db, model), expansionCoeffsApproach(-1),
43   expansionBasisType(problem_db.get_short("method.nond.expansion_basis_type")),
44   statsMetricMode(
45     problem_db.get_short("method.nond.refinement_statistics_mode")),
46   relativeMetric(
47     problem_db.get_bool("method.nond.relative_convergence_metric")),
48   dimPrefSpec(problem_db.get_rv("method.nond.dimension_preference")),
49   collocPtsSeqSpec(problem_db.get_sza("method.nond.collocation_points")),
50   collocRatio(problem_db.get_real("method.nond.collocation_ratio")),
51   termsOrder(1.),
52   tensorRegression(problem_db.get_bool("method.nond.tensor_grid")),
53   randomSeed(problem_db.get_int("method.random_seed")),
54   fixedSeed(problem_db.get_bool("method.fixed_seed")), mlmfIter(0),
55   multilevAllocControl(
56     problem_db.get_short("method.nond.multilevel_allocation_control")),
57   multilevDiscrepEmulation(
58     problem_db.get_short("method.nond.multilevel_discrepancy_emulation")),
59   kappaEstimatorRate(
60     problem_db.get_real("method.nond.multilevel_estimator_rate")),
61   gammaEstimatorScale(1.), numSamplesOnModel(0),
62   numSamplesOnExpansion(problem_db.get_int("method.nond.samples_on_emulator")),
63   nestedRules(false),
64   piecewiseBasis(problem_db.get_bool("method.nond.piecewise_basis")),
65   useDerivs(problem_db.get_bool("method.derivative_usage")),
66   refineType(problem_db.get_short("method.nond.expansion_refinement_type")),
67   refineControl(
68     problem_db.get_short("method.nond.expansion_refinement_control")),
69   refineMetric(Pecos::NO_METRIC),
70   softConvLimit(problem_db.get_ushort("method.soft_convergence_limit")),
71   numUncertainQuant(0),
72   maxRefineIterations(
73     problem_db.get_int("method.nond.max_refinement_iterations")),
74   maxSolverIterations(problem_db.get_int("method.nond.max_solver_iterations")),
75   ruleNestingOverride(problem_db.get_short("method.nond.nesting_override")),
76   ruleGrowthOverride(problem_db.get_short("method.nond.growth_override")),
77   vbdFlag(problem_db.get_bool("method.variance_based_decomp")),
78   // Note: minimum VBD order for variance-controlled refinement is enforced
79   //       in NonDExpansion::construct_{quadrature,sparse_grid}
80   vbdOrderLimit(problem_db.get_ushort("method.nond.vbd_interaction_order")),
81   vbdDropTol(problem_db.get_real("method.vbd_drop_tolerance")),
82   covarianceControl(problem_db.get_short("method.nond.covariance_control"))
83   // For supporting construct_incremental_lhs():
84   //expansionSampleType(problem_db.get_string("method.expansion_sample_type"))
85 {
86   check_dimension_preference(dimPrefSpec);
87   initialize_counts();
88   initialize_response_covariance();
89   initialize_final_statistics(); // level mappings are available
90 }
91 
92 
93 NonDExpansion::
NonDExpansion(unsigned short method_name,Model & model,short exp_coeffs_approach,const RealVector & dim_pref,int seed,short refine_type,short refine_control,short covar_control,Real colloc_ratio,short rule_nest,short rule_growth,bool piecewise_basis,bool use_derivs)94 NonDExpansion(unsigned short method_name, Model& model,
95 	      short exp_coeffs_approach, const RealVector& dim_pref, int seed,
96 	      short refine_type, short refine_control, short covar_control,
97 	      Real colloc_ratio, short rule_nest, short rule_growth,
98 	      bool piecewise_basis, bool use_derivs):
99   NonD(method_name, model), expansionCoeffsApproach(exp_coeffs_approach),
100   expansionBasisType(Pecos::DEFAULT_BASIS),
101   statsMetricMode(Pecos::DEFAULT_EXPANSION_STATS), relativeMetric(true),
102   dimPrefSpec(dim_pref), collocRatio(colloc_ratio), termsOrder(1.),
103   tensorRegression(false), randomSeed(seed), fixedSeed(false), mlmfIter(0),
104   multilevAllocControl(DEFAULT_MLMF_CONTROL),
105   multilevDiscrepEmulation(DEFAULT_EMULATION), kappaEstimatorRate(2.),
106   gammaEstimatorScale(1.), numSamplesOnModel(0), numSamplesOnExpansion(0),
107   nestedRules(false), piecewiseBasis(piecewise_basis), useDerivs(use_derivs),
108   refineType(refine_type), refineControl(refine_control),
109   refineMetric(Pecos::NO_METRIC), softConvLimit(3), numUncertainQuant(0),
110   maxRefineIterations(100), maxSolverIterations(-1),
111   ruleNestingOverride(rule_nest), ruleGrowthOverride(rule_growth),
112   vbdFlag(false), vbdOrderLimit(0), vbdDropTol(-1.),
113   covarianceControl(covar_control)
114 {
115   check_dimension_preference(dimPrefSpec);
116   initialize_counts();
117 
118   // level mappings not yet available
119   // (defer initialize_response_covariance() and initialize_final_statistics())
120 }
121 
122 
~NonDExpansion()123 NonDExpansion::~NonDExpansion()
124 { }
125 
126 
initialize_counts()127 void NonDExpansion::initialize_counts()
128 {
129   const Variables&  vars      = iteratedModel.current_variables();
130   const SizetArray& ac_totals = vars.shared_data().active_components_totals();
131 
132   // flag for combined var expansions which include non-probabilistic subset
133   // (continuous only for now)
134   allVars = (ac_totals[TOTAL_CDV] || ac_totals[TOTAL_CEUV] ||
135 	     ac_totals[TOTAL_CSV]);
136 
137   // override default definition in NonD ctor.  If there are any aleatory
138   // variables, then we will sample on that subset for probabilistic stats.
139   bool euv = (ac_totals[TOTAL_CEUV]  || ac_totals[TOTAL_DEUIV] ||
140 	      ac_totals[TOTAL_DEUSV] || ac_totals[TOTAL_DEURV]);
141   bool auv = (ac_totals[TOTAL_CAUV]  || ac_totals[TOTAL_DAUIV] ||
142 	      ac_totals[TOTAL_DAUSV] || ac_totals[TOTAL_DAURV]);
143   epistemicStats = (euv && !auv);
144 }
145 
146 
resize()147 bool NonDExpansion::resize()
148 {
149   bool parent_reinit_comms = NonD::resize();
150 
151   check_dimension_preference(dimPrefSpec);
152   initialize_counts();
153 
154   return parent_reinit_comms;
155 }
156 
157 
derived_init_communicators(ParLevLIter pl_iter)158 void NonDExpansion::derived_init_communicators(ParLevLIter pl_iter)
159 {
160   // this is redundant with Model recursions except for PCE coeff import case
161   //iteratedModel.init_communicators(pl_iter, maxEvalConcurrency);
162 
163   // uSpaceModel, expansionSampler, and importanceSampler use
164   // NoDBBaseConstructor, so no need to manage DB list nodes at this level
165   if (!expansionSampler.is_null())
166     expansionSampler.init_communicators(pl_iter);
167   else
168     uSpaceModel.init_communicators(pl_iter, maxEvalConcurrency);
169 
170   if (!importanceSampler.is_null())
171     importanceSampler.init_communicators(pl_iter);
172 }
173 
174 
derived_set_communicators(ParLevLIter pl_iter)175 void NonDExpansion::derived_set_communicators(ParLevLIter pl_iter)
176 {
177   miPLIndex = methodPCIter->mi_parallel_level_index(pl_iter);
178 
179   // uSpaceModel, expansionSampler, and importanceSampler use
180   // NoDBBaseConstructor, so no need to manage DB list nodes at this level
181   if (!expansionSampler.is_null())
182     expansionSampler.set_communicators(pl_iter);
183   else
184     uSpaceModel.set_communicators(pl_iter, maxEvalConcurrency);
185 
186   if (!importanceSampler.is_null())
187     importanceSampler.set_communicators(pl_iter);
188 }
189 
190 
derived_free_communicators(ParLevLIter pl_iter)191 void NonDExpansion::derived_free_communicators(ParLevLIter pl_iter)
192 {
193   if (!importanceSampler.is_null())
194     importanceSampler.free_communicators(pl_iter);
195 
196   if (!expansionSampler.is_null())
197     expansionSampler.free_communicators(pl_iter);
198   else
199     uSpaceModel.free_communicators(pl_iter, maxEvalConcurrency);
200 
201   // this is redundant with Model recursions except for PCE coeff import case
202   //iteratedModel.free_communicators(pl_iter, maxEvalConcurrency);
203 }
204 
205 
initialize_response_covariance()206 void NonDExpansion::initialize_response_covariance()
207 {
208   // if diagonal only, utilize a vector (or sparse matrix) to optimize
209   // both computational performance and memory footprint)
210   bool refine_by_covar = (totalLevelRequests == 0);
211   switch (covarianceControl) {
212   case DEFAULT_COVARIANCE: // assign context-specific default
213     if (refine_by_covar)      covarianceControl = FULL_COVARIANCE;
214     else if (subIteratorFlag) covarianceControl = NO_COVARIANCE;
215     else                      covarianceControl = (numFunctions > 10) ?
216 				DIAGONAL_COVARIANCE : FULL_COVARIANCE;
217     break;
218   case NO_COVARIANCE: // won't happen since NO_COVARIANCE not exposed in spec
219     if (refine_by_covar) {
220       Cerr << "Warning: covariance required by refinement.  Adding diagonal "
221 	   << "covariance terms." << std::endl;
222       covarianceControl = DIAGONAL_COVARIANCE;
223     }
224     break;
225   }
226   // now that setting is defined, size the variance/covariance storage
227   if (covarianceControl == FULL_COVARIANCE)
228     respCovariance.shapeUninitialized(numFunctions);
229   else if (covarianceControl == DIAGONAL_COVARIANCE)
230     respVariance.sizeUninitialized(numFunctions);
231 }
232 
233 
resolve_inputs(short & u_space_type,short & data_order)234 void NonDExpansion::resolve_inputs(short& u_space_type, short& data_order)
235 {
236   bool err_flag = false,
237     mf = (methodName == MULTIFIDELITY_POLYNOMIAL_CHAOS  ||
238 	  methodName == MULTIFIDELITY_STOCH_COLLOCATION ||
239 	  methodName == MULTIFIDELITY_FUNCTION_TRAIN    ),
240     mf_greedy = (mf && multilevAllocControl == GREEDY_REFINEMENT);
241 
242   // Check for suitable distribution types.
243   // Note: prefer warning in Analyzer (active discrete ignored), but
244   // RandomVariable type mapping must be defined...
245   if (numDiscreteIntVars || numDiscreteStringVars || numDiscreteRealVars) {
246     Cerr << "\nError: active discrete variables are not currently supported "
247 	 << "in NonDExpansion.\n";
248     err_flag = true;
249   }
250 
251   // check compatibility of refinement type with u-space type and MLMF settings
252   switch (refineType) {
253   case Pecos::H_REFINEMENT: // override
254     switch (u_space_type) {
255     //case EXTENDED_U: // default; not user-selectable -> quiet default reassign
256     //  break;
257     case ASKEY_U: case PARTIAL_ASKEY_U: // non-default
258       Cerr << "\nWarning: overriding transformation from ASKEY to STD_UNIFORM "
259 	   << "for h-refinement.\n" << std::endl;
260       break;
261     case STD_NORMAL_U: // non-default
262       Cerr << "\nWarning: overriding transformation from WIENER to STD_UNIFORM "
263 	   << "for h-refinement.\n" << std::endl;
264       break;
265     }
266 
267     u_space_type = STD_UNIFORM_U; piecewiseBasis = true;
268     break;
269   case Pecos::P_REFINEMENT:
270     if (piecewiseBasis) {
271       Cerr << "\nError: fixed order piecewise bases are incompatible with "
272 	   << "p-refinement.\n";
273       err_flag = true;
274     }
275     break;
276   case Pecos::NO_REFINEMENT:
277     if (mf_greedy) {
278       Cerr << "Error: greedy integrated refinement of multifidelity expansions "
279 	   << "requires a refinement specification for candidate generation.\n";
280       err_flag = true;
281     }
282     break;
283   }
284 
285   // Allow either ACTIVE or COMBINED with individual MF (default to COMBINED:
286   // more important for relative, less so for absolute), but require COMBINED
287   // for integrated MF.  Allow either sense for relativeMetric.
288   switch (statsMetricMode) {
289   case Pecos::NO_EXPANSION_STATS:      // should not happen
290     Cerr << "Error: statsMetricMode definition required in NonDExpansion::"
291 	 << "resolve_inputs()" << std::endl;
292     err_flag = true;  break;
293 
294   case Pecos::DEFAULT_EXPANSION_STATS:  // assign default
295     statsMetricMode = (mf) ?
296       Pecos::COMBINED_EXPANSION_STATS : // individual || integrated MF
297       Pecos::ACTIVE_EXPANSION_STATS;    // single fidelity || ML regression
298     // can't propagate: shared_data_rep not yet constructed (DataFitSurrModel)
299     break;
300 
301   case Pecos::ACTIVE_EXPANSION_STATS:   // ensure sanity of user spec
302     // Disallow ACTIVE with integrated MLMF (greedy mlmfAllocControl)
303     if (mf_greedy) {
304       Cerr << "Error: combined expansion stats required for greedy integrated "
305 	   << "multifidelity refinement." << std::endl;
306       err_flag = true;
307     }
308     break;
309 
310   case Pecos::COMBINED_EXPANSION_STATS: // ensure sanity of user spec
311     if (!mf) {
312       Cerr << "Error: combined expansion stats are only used for "
313 	   << "multifidelity refinement." << std::endl;
314       err_flag = true;
315     }
316     break;
317   }
318   // if individual MLMF with COMBINED, reorder loop in multifidelity_expansion()
319   // to get a better initial reference for individual adaptation
320   // > More consistent with greedy_mf to always do this, but seems less
321   //   desirable to disconnect adaptations from reference builds
322   // > Also requires clearing the starting expansions for recursive emulation
323 
324   // Enforce current support for recursive emulation
325   if (multilevDiscrepEmulation == RECURSIVE_EMULATION && mf_greedy) {
326     Cerr << "Error: recursive emulation not currently supported for greedy "
327 	 << "integrated refinement\n       due to recursive recomputation "
328 	 << "requirements.\n";
329     err_flag = true;
330   }
331 
332   if (err_flag)
333     abort_handler(METHOD_ERROR);
334 }
335 
336 
337 void NonDExpansion::
construct_cubature(Iterator & u_space_sampler,Model & g_u_model,unsigned short cub_int_order)338 construct_cubature(Iterator& u_space_sampler, Model& g_u_model,
339 		   unsigned short cub_int_order)
340 {
341   // sanity checks: CUBATURE precluded since no grid anisotropy for adaptive
342   // and very limited refinement opportunities for uniform/adaptive
343   if (refineType) {
344     Cerr << "Error: uniform/adaptive refinement of cubature grids not "
345 	 << "supported." << std::endl;
346     abort_handler(METHOD_ERROR);
347   }
348 
349   u_space_sampler.assign_rep(std::make_shared<NonDCubature>(g_u_model,
350 			     cub_int_order));
351 }
352 
353 
354 void NonDExpansion::
construct_quadrature(Iterator & u_space_sampler,Model & g_u_model,unsigned short quad_order,const RealVector & dim_pref)355 construct_quadrature(Iterator& u_space_sampler, Model& g_u_model,
356 		     unsigned short quad_order, const RealVector& dim_pref)
357 {
358   // sanity checks: no GSG for TPQ
359   if (refineControl == Pecos::DIMENSION_ADAPTIVE_CONTROL_GENERALIZED) {
360     Cerr << "Error: generalized option does not support adaptive refinement of "
361 	 << "tensor grids." << std::endl;
362     abort_handler(METHOD_ERROR);
363   }
364 
365   // enforce minimum required VBD control
366   if (!vbdFlag && refineControl == Pecos::DIMENSION_ADAPTIVE_CONTROL_SOBOL)
367     { vbdFlag = true; vbdOrderLimit = 1; }
368 
369   // manage rule nesting override
370   nestedRules = ( ruleNestingOverride == Pecos::NESTED ||
371 		  ( refineType && ruleNestingOverride != Pecos::NON_NESTED ) );
372 
373   short driver_mode = (false)//(methodName == STOCH_COLLOCATION) // TO DO
374                     ? Pecos::INTERPOLATION_MODE : Pecos::INTEGRATION_MODE;
375 
376   u_space_sampler.assign_rep(std::make_shared<NonDQuadrature>(g_u_model,
377 			     quad_order, dim_pref, driver_mode));
378 }
379 
380 
381 void NonDExpansion::
construct_quadrature(Iterator & u_space_sampler,Model & g_u_model,unsigned short quad_order,const RealVector & dim_pref,int filtered_samples)382 construct_quadrature(Iterator& u_space_sampler, Model& g_u_model,
383 		     unsigned short quad_order, const RealVector& dim_pref,
384 		     int filtered_samples)
385 {
386   // sanity checks: only uniform refinement supported for probabilistic
387   // collocation (regression using filtered tensor grids)
388   if (refineType && refineControl > Pecos::UNIFORM_CONTROL) {
389     Cerr << "Error: only uniform refinement is supported for regression with "
390 	 << "the tensor_grid option." << std::endl;
391     abort_handler(METHOD_ERROR);
392   }
393 
394   /*
395   // enforce minimum required VBD control
396   if (!vbdFlag && refineControl == Pecos::DIMENSION_ADAPTIVE_CONTROL_SOBOL)
397     { vbdFlag = true; vbdOrderLimit = 1; }
398 
399   // don't use nested rules for tensor regression since this could induce zeros
400   // in the Vandermonde matrix (high order rules contain zeros for lower-order
401   // polynomials), despite protection of m >= p+1
402   nestedRules = (ruleNestingOverride == Pecos::NESTED ||
403     (refineType && ruleNestingOverride != Pecos::NON_NESTED));
404   */
405 
406   short driver_mode = (false)//(methodName == STOCH_COLLOCATION) // TO DO
407                     ? Pecos::INTERPOLATION_MODE : Pecos::INTEGRATION_MODE;
408 
409   u_space_sampler.assign_rep(std::make_shared<NonDQuadrature>(g_u_model,
410 			     quad_order, dim_pref, driver_mode,
411 			     filtered_samples));
412 }
413 
414 
415 void NonDExpansion::
construct_quadrature(Iterator & u_space_sampler,Model & g_u_model,unsigned short quad_order,const RealVector & dim_pref,int sub_samples,int seed)416 construct_quadrature(Iterator& u_space_sampler, Model& g_u_model,
417 		     unsigned short quad_order, const RealVector& dim_pref,
418 		     int sub_samples, int seed)
419 {
420   // sanity checks: only uniform refinement supported for probabilistic
421   // collocation (regression using filtered tensor grids)
422   if (refineType && refineControl > Pecos::UNIFORM_CONTROL) {
423     Cerr << "Error: only uniform refinement is supported for regression with "
424 	 << "the tensor_grid option." << std::endl;
425     abort_handler(METHOD_ERROR);
426   }
427 
428   /*
429   // enforce minimum required VBD control
430   if (!vbdFlag && refineControl == Pecos::DIMENSION_ADAPTIVE_CONTROL_SOBOL)
431     { vbdFlag = true; vbdOrderLimit = 1; }
432 
433   // don't use nested rules for tensor regression since this could induce zeros
434   // in the Vandermonde matrix (high order rules contain zeros for lower-order
435   // polynomials), despite protection of m >= p+1
436   nestedRules = (ruleNestingOverride == Pecos::NESTED ||
437     (refineType && ruleNestingOverride != Pecos::NON_NESTED));
438   */
439 
440   short driver_mode = (false)//(methodName == STOCH_COLLOCATION) // TO DO
441                     ? Pecos::INTERPOLATION_MODE : Pecos::INTEGRATION_MODE;
442 
443   u_space_sampler.assign_rep(std::make_shared<NonDQuadrature>(g_u_model,
444 			     quad_order, dim_pref, driver_mode, sub_samples,
445 			     seed));
446 }
447 
448 
449 void NonDExpansion::
construct_sparse_grid(Iterator & u_space_sampler,Model & g_u_model,unsigned short ssg_level,const RealVector & dim_pref)450 construct_sparse_grid(Iterator& u_space_sampler, Model& g_u_model,
451 		      unsigned short ssg_level,  const RealVector& dim_pref)
452 {
453   // enforce minimum required VBD control
454   if (!vbdFlag && refineControl == Pecos::DIMENSION_ADAPTIVE_CONTROL_SOBOL)
455     { vbdFlag = true; vbdOrderLimit = 1; }
456 
457   nestedRules = (ruleNestingOverride != Pecos::NON_NESTED);
458 
459   // tracking of unique product weights needed for PCE and SC standard modes
460   // since both employ PolynomialApproximation::compute_numerical_moments(4).
461   // Neither PCE nor SC require product wts for allVars mode, since moment
462   // calculations must employ gauss_wts_1d.
463   // Exception 1: allVars Nodal SC requires weights for total covariance()
464   //              evaluation in VBD.
465   // Exception 2: NonDIntegration::print_points_weights() needs weights for
466   //              outputLevel > NORMAL_OUTPUT.
467   bool nodal_vbd = (vbdFlag && methodName == STOCH_COLLOCATION &&
468     expansionCoeffsApproach != Pecos::HIERARCHICAL_SPARSE_GRID);
469   bool track_wts = (!allVars || nodal_vbd || outputLevel > NORMAL_OUTPUT);
470 
471   short growth_rate;
472   if (ruleGrowthOverride == Pecos::UNRESTRICTED ||
473       refineControl      == Pecos::DIMENSION_ADAPTIVE_CONTROL_GENERALIZED)
474     // unstructured index set evolution: no motivation to restrict
475     growth_rate = Pecos::UNRESTRICTED_GROWTH;
476   // piecewise bases can be MODERATE *when* we distinguish INTERPOLATION_MODE
477   // TO DO: comment out this else block when re-activating INTERPOLATION_MODE
478   else if (piecewiseBasis)
479     // no reason to match Gaussian precision, but restriction still useful:
480     // use SLOW i=2l+1 since it is more natural for NEWTON_COTES,CLENSHAW_CURTIS
481     // and is more consistent with UNRESTRICTED generalized sparse grids.
482     growth_rate = Pecos::SLOW_RESTRICTED_GROWTH;
483   else
484     // INTEGRATION_MODE:   standardize on precision: i = 2m-1 = 2(2l+1)-1 = 4l+1
485     // INTERPOLATION_MODE: standardize on number of interp pts: m = 2l+1
486     growth_rate = Pecos::MODERATE_RESTRICTED_GROWTH;
487   //growth_rate = Pecos::SLOW_RESTRICTED_GROWTH; // sync with UQTk restricted
488 
489   short driver_mode = (false)//(methodName == STOCH_COLLOCATION) // TO DO
490                     ? Pecos::INTERPOLATION_MODE : Pecos::INTEGRATION_MODE;
491 
492   u_space_sampler.assign_rep(std::make_shared< NonDSparseGrid>(g_u_model,
493 			     ssg_level, dim_pref, expansionCoeffsApproach,
494 			     driver_mode, growth_rate, refineControl,
495 			     track_wts));
496 }
497 
498 
499 /*
500 BMA NOTE: If code is activated, need to instead use LHS, with refinement samples
501 void NonDExpansion::
502 construct_incremental_lhs(Iterator& u_space_sampler, Model& u_model,
503 			  int num_samples, int seed, const String& rng)
504 {
505   // sanity checks
506   if (num_samples <= 0) {
507     Cerr << "Error: bad samples specification (" << num_samples << ") in "
508 	 << "NonDExpansion::construct_incremental_lhs()." << std::endl;
509     abort_handler(METHOD_ERROR);
510   }
511 
512   // use default LHS sample_type for consistency with collocation support for
513   // incremental_lhs but not incremental_random
514   unsigned short sample_type = SUBMETHOD_DEFAULT; // default
515   u_space_sampler.assign_rep(new NonDIncremLHSSampling(u_model, sample_type,
516     num_samples, orig_seed, rng, ACTIVE), false);
517 }
518 */
519 
520 
initialize_u_space_model()521 void NonDExpansion::initialize_u_space_model()
522 {
523   if (refineControl) {
524     // communicate refinement metric to Pecos (determines internal
525     // bookkeeping requirements for some PolyApproximation types)
526     if (totalLevelRequests) {
527       refineMetric = Pecos::LEVEL_STATS_METRIC;
528       for (size_t i=0; i<numFunctions; ++i)
529 	if ( !requestedRelLevels[i].empty() ||
530 	     ( respLevelTarget == RELIABILITIES &&
531 	       !requestedRespLevels[i].empty() ) )
532 	  { refineMetric = Pecos::MIXED_STATS_METRIC; break; }
533     }
534     else
535       refineMetric = Pecos::COVARIANCE_METRIC;
536   }
537 
538   // if all variables mode, init bookkeeping for the random variable subset
539   if (allVars) {
540     std::shared_ptr<SharedApproxData> shared_data_rep =
541       uSpaceModel.shared_approximation().data_rep();
542 
543     BitArray random_vars_key(numContinuousVars); // init to false
544     assign_value(random_vars_key, true, startCAUV, numCAUV);
545     shared_data_rep->random_variables_key(random_vars_key);
546   }
547 }
548 
549 
550 void NonDExpansion::
configure_expansion_orders(unsigned short exp_order,const RealVector & dim_pref,UShortArray & exp_orders)551 configure_expansion_orders(unsigned short exp_order, const RealVector& dim_pref,
552 			UShortArray& exp_orders)
553 {
554   // expansion_order defined for expansion_samples/regression
555   if (exp_order == USHRT_MAX)
556     exp_orders.clear();
557   else
558     NonDIntegration::dimension_preference_to_anisotropic_order(exp_order,
559       dim_pref, numContinuousVars, exp_orders);
560 }
561 
562 
configure_pecos_options()563 void NonDExpansion::configure_pecos_options()
564 {
565   // Commonly used approx settings (e.g., order, outputLevel, useDerivs) are
566   // passed via the DataFitSurrModel ctor chain.  Additional data needed by
567   // {Orthog,Interp}PolyApproximation are passed in Pecos::
568   // {Expansion,Basis,Regression}ConfigOptions.   Note: passing outputLevel
569   // and useDerivs again is redundant with the DataFitSurrModel ctor.
570   std::shared_ptr<SharedPecosApproxData> shared_data_rep =
571     std::static_pointer_cast<SharedPecosApproxData>
572     (uSpaceModel.shared_approximation().data_rep());
573   Pecos::ExpansionConfigOptions ec_options(expansionCoeffsApproach,
574     expansionBasisType, iteratedModel.correction_type(),
575     multilevDiscrepEmulation, outputLevel, vbdFlag, vbdOrderLimit,
576     refineControl, refineMetric, statsMetricMode, maxRefineIterations,
577     maxSolverIterations, convergenceTol, softConvLimit);
578   shared_data_rep->configuration_options(ec_options);
579   Pecos::BasisConfigOptions
580     bc_options(nestedRules, piecewiseBasis, true, useDerivs);
581   shared_data_rep->configuration_options(bc_options);
582 }
583 
584 
initialize_u_space_grid()585 void NonDExpansion::initialize_u_space_grid()
586 {
587   // if model resize is pending, defer initializing a potentially large grid
588   if (iteratedModel.resize_pending())
589     { /* callResize = true; */ }
590   else {
591     //
592     // Note: not used by C3; Pecos restriction is appropriate (PCE/SC basis)
593     //
594     std::shared_ptr<SharedPecosApproxData> shared_data_rep =
595       std::static_pointer_cast<SharedPecosApproxData>(
596 	uSpaceModel.shared_approximation().data_rep());
597     std::shared_ptr<NonDIntegration> u_space_sampler_rep =
598       std::static_pointer_cast<NonDIntegration>(
599 	uSpaceModel.subordinate_iterator().iterator_rep());
600 
601     u_space_sampler_rep->initialize_grid(shared_data_rep->polynomial_basis());
602 
603     numSamplesOnModel = u_space_sampler_rep->maximum_evaluation_concurrency()
604       / uSpaceModel.subordinate_model().derivative_concurrency();
605     // maxEvalConcurrency already updated for expansion samples and regression
606     if (numSamplesOnModel) // optional with default = 0
607       maxEvalConcurrency *= numSamplesOnModel;
608   }
609 }
610 
611 
612 void NonDExpansion::
construct_expansion_sampler(unsigned short sample_type,const String & rng,unsigned short integration_refine,const IntVector & refine_samples,const String & import_approx_file,unsigned short import_approx_format,bool import_approx_active_only)613 construct_expansion_sampler(unsigned short sample_type, const String& rng,
614 			    unsigned short integration_refine,
615 			    const IntVector& refine_samples,
616 			    const String&  import_approx_file,
617 			    unsigned short import_approx_format,
618 			    bool import_approx_active_only)
619 {
620   bool import_pts = false, exp_sampling = false; size_t i;
621   if (!import_approx_file.empty())
622     import_pts = exp_sampling = true;
623   else if (totalLevelRequests)//&& numSamplesOnExpansion) // catch as err below
624     for (i=0; i<numFunctions; ++i)
625       if ( requestedProbLevels[i].length() || requestedGenRelLevels[i].length()
626 	   || ( requestedRespLevels[i].length() &&
627 		respLevelTarget != RELIABILITIES ) )
628 	{ exp_sampling = true; break; }
629 
630   if (!exp_sampling)
631     return;
632 
633   std::shared_ptr<NonD> exp_sampler_rep;
634   if (import_pts) {
635     RealMatrix x_samples; // imports are always from user space
636     // Analyzer::update_model_from_sample() currently updates only the active
637     // continuous vars from an allSamples column --> pass numContinuousVars as
638     // number of rows; import_build_active_only not currently used
639     TabularIO::read_data_tabular(import_approx_file,
640       "imported approx samples file", x_samples, numContinuousVars,
641       import_approx_format); //, import_build_active_only);
642     numSamplesOnExpansion = x_samples.numCols();
643     // transform to u space must follow runtime dist param updates,
644     // so pass x_samples for now and transform at runtime
645     exp_sampler_rep = std::make_shared<NonDSampling>(uSpaceModel, x_samples);//u_samples);
646     exp_sampler_rep->requested_levels(requestedRespLevels, requestedProbLevels,
647       requestedRelLevels, requestedGenRelLevels, respLevelTarget,
648       respLevelTargetReduce, cdfFlag, true); // compute/print PDFs
649   }
650   else {
651     if (!numSamplesOnExpansion) { // sanity check for samples spec
652       Cerr << "\nError: number of samples must be specified for numerically "
653 	   << "evaluating statistics on a stochastic expansion." << std::endl;
654       abort_handler(METHOD_ERROR);
655     }
656 
657     // could use construct_lhs() except for non-default ALEATORY_UNCERTAIN
658     // sampling mode.  Don't vary sampling pattern since we want to reuse
659     // same sampling stencil for different design/epistemic vars or for
660     // (goal-oriented) adaptivity.
661     exp_sampler_rep = std::make_shared<NonDLHSSampling>
662       (uSpaceModel, sample_type, numSamplesOnExpansion,
663        first_seed(), rng, false, ALEATORY_UNCERTAIN);
664     //expansionSampler.sampling_reset(numSamplesOnExpansion, true, false);
665 
666     // needs to precede exp_sampler_rep->requested_levels()
667     exp_sampler_rep->final_moments_type(NO_MOMENTS); // suppress sample moments
668 
669     // publish level mappings to expansion sampler, but suppress reliability
670     // moment mappings (performed locally within compute_statistics())
671     RealVectorArray empty_rv_array; // empty
672     RealVectorArray& req_resp_levs = (respLevelTarget == RELIABILITIES) ?
673       empty_rv_array : requestedRespLevels;
674     exp_sampler_rep->requested_levels(req_resp_levs, requestedProbLevels,
675       empty_rv_array, requestedGenRelLevels, respLevelTarget,
676       respLevelTargetReduce, cdfFlag, false); // suppress PDFs (managed locally)
677 
678     bool imp_sampling = false;
679     if (integration_refine && respLevelTarget != RELIABILITIES)
680       for (i=0; i<numFunctions; ++i)
681 	if (requestedRespLevels[i].length())
682 	  { imp_sampling = true; break; }
683 
684     if (imp_sampling) {
685       int ais_samples = 1000; // context-specific default
686       if (refine_samples.length() == 1)
687         ais_samples = refine_samples[0];
688       else if (refine_samples.length() > 1) {
689         Cerr << "\nError (NonDExpansion): refinement_samples must be length "
690              << "1 if specified." << std::endl;
691         abort_handler(PARSE_ERROR);
692       }
693       // extreme values needed for defining bounds of PDF bins
694       bool vary_pattern = true, track_extreme = pdfOutput;
695       auto imp_sampler_rep = std::make_shared<NonDAdaptImpSampling>
696 	(uSpaceModel, sample_type, ais_samples, first_seed(), rng, vary_pattern,
697 	 integration_refine, cdfFlag, false, false, track_extreme);
698       importanceSampler.assign_rep(imp_sampler_rep);
699 
700       imp_sampler_rep->output_level(outputLevel);
701       imp_sampler_rep->requested_levels(req_resp_levs, empty_rv_array,
702 	empty_rv_array, empty_rv_array, respLevelTarget, respLevelTargetReduce,
703 	cdfFlag, false); // suppress PDFs (managed locally)
704       //imp_sampler_rep->final_moments_type(NO_MOMENTS); // already off for AIS
705     }
706   }
707   // publish output verbosity
708   exp_sampler_rep->output_level(outputLevel);
709   // store rep inside envelope
710   expansionSampler.assign_rep(exp_sampler_rep);
711 }
712 
713 
core_run()714 void NonDExpansion::core_run()
715 {
716   initialize_expansion();
717 
718   compute_expansion();  // nominal iso/aniso expansion from input spec
719   if (refineType) {//&& maxRefineIterations
720     // post-process nominal expansion, defining reference stats for refinement
721     //metric_roll_up(INTERMEDIATE_RESULTS); // not relevant for single-fidelity
722     compute_statistics(INTERMEDIATE_RESULTS);
723     if (outputLevel > SILENT_OUTPUT)
724       print_results(Cout, INTERMEDIATE_RESULTS);
725 
726     refine_expansion(); // uniform/adaptive p-/h-refinement
727   }
728 
729   compute_statistics(FINAL_RESULTS);
730   // Note: print_results() called by Analyzer::post_run()
731 
732   finalize_expansion();
733 }
734 
735 
initialize_expansion()736 void NonDExpansion::initialize_expansion()
737 {
738   // IteratorScheduler::run_iterator() + Analyzer::initialize_run() ensure
739   // initialization of Model mappings for iteratedModel, but local recursions
740   // are not visible -> recur DataFitSurr +  ProbabilityTransform if needed.
741   if (!uSpaceModel.mapping_initialized()) {
742     ParLevLIter pl_iter = methodPCIter->mi_parallel_level_iterator(miPLIndex);
743     /*bool var_size_changed =*/ uSpaceModel.initialize_mapping(pl_iter);
744     //if (var_size_changed) resize();
745   }
746   // Note: part of this occurs at DataFit build time (daceIterator -> g_u_model)
747   // as well as evaluation of uSpaceModel by expansion sampler. Therefore, take
748   // care to avoid redundancy using mappingInitialized flag.
749 
750   if (totalLevelRequests) initialize_level_mappings(); // size computed*Levels
751   resize_final_statistics_gradients(); // finalStats ASV available at run time
752   //transform_correlations();
753 
754   // now that data has flowed down at run-time from any higher level recursions
755   // to iteratedModel, it must be propagated up through the local g_u_model and
756   // uSpaceModel recursions (so they are correct when propagated back down).
757   // There is no need to recur below iteratedModel.
758   // RecastModel::update_from_model() had insufficient context to update
759   // distribution parameters for variables that were not transformed, but
760   // ProbabilityTransformModel::update_from_model() can now handle this.
761   size_t layers = 2; // PTModel, DFSModel
762   // recur once (beyond layer 1) so that layer 2 pulls from iteratedModel
763   uSpaceModel.update_from_subordinate_model(layers-1);
764 
765   // if a sub-iterator, reset previous history (e.g. grid refinements) as needed
766   if (subIteratorFlag) { //&& numUncertainQuant && refineType) {
767     Iterator& u_space_sampler = uSpaceModel.subordinate_iterator();
768     if (!u_space_sampler.is_null())
769       u_space_sampler.reset();// clear previous prior to next grid generate/eval
770   }
771 
772   // set initialPtU which is used in this class for all-variables mode and local
773   // sensitivity calculations, and by external classes for queries on the PCE
774   // emulator model (e.g., NonDBayesCalibration).  In the case of design,
775   // epistemic, and state vars, it captures the current values for this UQ
776   // execution; for aleatory vars, it reflects the u-space mean values (which
777   // are invariant in std distribution cases despite updates from above).
778   initialPtU.size(numContinuousVars);
779   if (allVars)
780     uSpaceModel.probability_transformation().trans_X_to_U(
781       iteratedModel.continuous_variables(), initialPtU);
782   RealVector u_means = uSpaceModel.multivariate_distribution().means();
783   //const SharedVariablesData& svd
784   //  = iteratedModel.current_variables().shared_data();
785   for (size_t i=startCAUV; i<numCAUV; ++i)
786     initialPtU[i] = u_means[i];//[svd.cv_index_to_active_index(i)];
787 
788   // transform any points imported into expansionSampler from user space
789   // into standardized space (must follow any dist param updates)
790   if (expansionSampler.method_name() == LIST_SAMPLING &&
791       numUncertainQuant == 0) {
792     std::shared_ptr<NonDSampling> exp_sampler_rep =
793       std::static_pointer_cast<NonDSampling>(expansionSampler.iterator_rep());
794     exp_sampler_rep->
795       transform_samples(uSpaceModel.probability_transformation());
796   }
797 }
798 
799 
compute_expansion()800 void NonDExpansion::compute_expansion()
801 {
802 #ifdef DERIV_DEBUG
803   Pecos::ProbabilityTransformation& nataf
804     = uSpaceModel.probability_transformation();
805   // numerical verification of analytic Jacobian/Hessian routines
806   RealVector rdv_u;
807   nataf.trans_X_to_U(iteratedModel.continuous_variables(), rdv_u);
808   nataf.verify_trans_jacobian_hessian(rdv_u);//(rdv_x);
809   nataf.verify_design_jacobian(rdv_u);
810 #endif // DERIV_DEBUG
811 
812   Iterator& u_space_sampler = uSpaceModel.subordinate_iterator();
813   std::shared_ptr<NonD> u_space_sampler_rep =
814     std::static_pointer_cast<NonD>(u_space_sampler.iterator_rep());
815 
816   const ShortArray& final_asv = finalStatistics.active_set_request_vector();
817   const SizetArray& final_dvv = finalStatistics.active_set_derivative_vector();
818   size_t i, j, rl_len, pl_len, bl_len, gl_len, total_i, cntr = 0,
819     num_final_stats = final_asv.size(), num_final_grad_vars = final_dvv.size(),
820     moment_offset   = (finalMomentsType) ? 2 : 0;
821   bool final_stat_grad_flag = false;
822   for (i=0; i<num_final_stats; ++i)
823     if (final_asv[i] & 2)
824       { final_stat_grad_flag = true; break; }
825 
826   // define ASV for u_space_sampler and expansion coeff/grad data flags
827   ShortArray sampler_asv(numFunctions, 0);
828   std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
829   size_t end_cauv = startCAUV + numCAUV;
830   for (i=0; i<numFunctions; ++i) {
831     bool expansion_coeff_flag = false, expansion_grad_flag = false;
832     if (totalLevelRequests) {
833       rl_len = requestedRespLevels[i].length();
834       pl_len = requestedProbLevels[i].length();
835       bl_len = requestedRelLevels[i].length();
836       gl_len = requestedGenRelLevels[i].length();
837     }
838     else
839       rl_len = pl_len = bl_len = gl_len = 0;
840 
841     // map final_asv value bits into expansion_coeff_flag requirements
842     total_i = moment_offset+rl_len+pl_len+bl_len+gl_len;
843     for (j=0; j<total_i; ++j)
844       if (final_asv[cntr+j] & 1)
845         { expansion_coeff_flag = true; break; }
846 
847     if (final_stat_grad_flag) {
848       // moment grad flags manage requirements at a higher level
849       // --> mapped into expansion value/grad flags at bottom
850       bool moment1_grad = false, moment2_grad = false;
851       // map final_asv gradient bits into moment grad requirements
852       if (finalMomentsType) {
853 	if (final_asv[cntr++] & 2) moment1_grad = true;
854 	if (final_asv[cntr++] & 2) moment2_grad = true;
855       }
856       if (respLevelTarget == RELIABILITIES)
857 	for (j=0; j<rl_len; ++j) // dbeta/ds requires mu,sigma,dmu/ds,dsigma/ds
858 	  if (final_asv[cntr+j] & 2)
859 	    { moment1_grad = moment2_grad = expansion_coeff_flag = true; break;}
860       cntr += rl_len + pl_len;
861       for (j=0; j<bl_len; ++j)   // dz/ds requires dmu/ds, dsigma/ds
862 	if (final_asv[cntr+j] & 2)
863 	  { moment1_grad = moment2_grad = true; break; }
864       cntr += bl_len + gl_len;
865 
866       // map moment grad requirements into expansion_{coeff,grad}_flag reqmts
867       // (refer to *PolyApproximation::get_*_gradient() implementations)
868       // aleatory vars requirements:
869       //   mean grad:          coeff grads
870       //   var  grad: coeffs & coeff grads
871       // all vars requirements:
872       //   mean grad: coeffs (nonrandom v),          coeff grads (random v)
873       //   var  grad: coeffs (nonrandom v), coeffs & coeff grads (random v)
874       if (allVars) { // aleatory + design/epistemic
875 	size_t deriv_index, num_deriv_vars = final_dvv.size();
876 	for (j=0; j<num_deriv_vars; ++j) {
877 	  deriv_index = final_dvv[j] - 1; // OK since we are in an "All" view
878 	  // random variable
879 	  if (deriv_index >= startCAUV && deriv_index < end_cauv) {
880 	    if (moment1_grad) expansion_grad_flag = true;
881 	    if (moment2_grad) expansion_grad_flag = expansion_coeff_flag = true;
882 	  }
883 	  // non-random variable
884 	  else if (moment1_grad || moment2_grad)
885 	    expansion_coeff_flag = true;
886 	}
887       }
888       else { // aleatory expansion variables
889 	if (moment1_grad) expansion_grad_flag = true;
890 	if (moment2_grad) expansion_grad_flag = expansion_coeff_flag = true;
891       }
892     }
893     else
894       cntr += moment_offset + rl_len + pl_len + bl_len + gl_len;
895 
896     // map expansion_{coeff,grad}_flag requirements into sampler ASV and
897     // Approximation settings
898     if (expansion_coeff_flag)             sampler_asv[i] |= 1;
899     if (expansion_grad_flag || useDerivs) sampler_asv[i] |= 2;
900     Approximation& approx_i = poly_approxs[i];
901     approx_i.expansion_coefficient_flag(expansion_coeff_flag);
902     approx_i.expansion_gradient_flag(expansion_grad_flag);
903   }
904 
905   // If OUU/SOP (multiple calls to core_run()), an expansion constructed over
906   // the full range of all variables does not need to be reconstructed on
907   // subsequent calls.  However, an allVars construction over a trust region
908   // needs rebuilding when the trust region is updated.  In the checks below,
909   // all_approx detects any variable insertions or ASV omissions and
910   // force_rebuild() manages variable augmentations.
911   bool all_approx = false, dist_param_derivs
912     = (uSpaceModel.query_distribution_parameter_derivatives() > NO_DERIVS);
913   if (allVars && numUncertainQuant && !dist_param_derivs) {
914     all_approx = true;
915     // does sampler_asv contain content not evaluated previously
916     const ShortArray& prev_asv = u_space_sampler.active_set_request_vector();
917     for (i=0; i<numFunctions; ++i)
918       // bit-wise AND checks if each sampler_asv bit is present in prev_asv
919       if ( (prev_asv[i] & sampler_asv[i]) != sampler_asv[i] )
920 	{ all_approx = false; break; }
921   }
922   if (!all_approx || uSpaceModel.force_rebuild()) {
923 
924     if (u_space_sampler_rep) {
925 
926       // Set the sampler ASV (defined from previous loop over numFunctions)
927       ActiveSet sampler_set;
928       sampler_set.request_vector(sampler_asv);
929 
930       // if required statistical sensitivities are not covered by All variables
931       // mode for augmented design variables, then the simulations must evaluate
932       // response sensitivities.
933       bool sampler_grad = false;
934       if (final_stat_grad_flag) {
935 	if (dist_param_derivs)
936 	  uSpaceModel.activate_distribution_parameter_derivatives();
937 	if (allVars) sampler_grad = dist_param_derivs;
938 	else         sampler_grad = true;
939       }
940 
941       // Set the u_space_sampler DVV, managing different gradient modes & their
942       // combinations.  The u_space_sampler's DVV may then be augmented for
943       // correlations in NonD::set_u_to_x_mapping().  Sources for DVV content
944       // include the model's continuous var ids and the final_dvv set by a
945       // NestedModel.  In the latter case, NestedModel::derived_compute_response
946       // maps top-level optimizer derivative vars to sub-iterator derivative
947       // vars in NestedModel::set_mapping() and then sets this DVV within
948       // finalStats using subIterator.response_results_active_set().
949       if (useDerivs) {
950 	SizetMultiArrayConstView cv_ids
951 	  = iteratedModel.continuous_variable_ids();
952 	if (sampler_grad) { // merge cv_ids with final_dvv
953 	  SizetSet merged_set; SizetArray merged_dvv;
954 	  merged_set.insert(cv_ids.begin(), cv_ids.end());
955 	  merged_set.insert(final_dvv.begin(), final_dvv.end());
956 	  std::copy(merged_set.begin(), merged_set.end(), merged_dvv.begin());
957 	  sampler_set.derivative_vector(merged_dvv);
958 	}
959 	else // assign cv_ids
960 	  sampler_set.derivative_vector(cv_ids);
961       }
962       else if (allVars && sampler_grad) { // filter: retain only insertion tgts
963 	SizetArray filtered_final_dvv;
964 	for (i=0; i<num_final_grad_vars; ++i) {
965 	  size_t dvv_i = final_dvv[i];
966 	  if (dvv_i > startCAUV && dvv_i <= end_cauv)
967 	    filtered_final_dvv.push_back(dvv_i);
968 	}
969 	sampler_set.derivative_vector(filtered_final_dvv);
970       }
971       else if (sampler_grad)
972 	sampler_set.derivative_vector(final_dvv);
973       else // derivs not needed, but correct DVV len needed for MPI buffers
974 	sampler_set.derivative_vector(iteratedModel.continuous_variable_ids());
975 
976       // Build the orthogonal/interpolation polynomial approximations:
977       u_space_sampler.active_set(sampler_set);
978     }
979 
980     uSpaceModel.build_approximation();
981 
982     if (u_space_sampler_rep && final_stat_grad_flag && dist_param_derivs)
983       uSpaceModel.deactivate_distribution_parameter_derivatives();
984   }
985 }
986 
987 
refine_expansion()988 void NonDExpansion::refine_expansion()
989 {
990   // --------------------------------------
991   // Uniform/adaptive refinement approaches
992   // --------------------------------------
993   // DataMethod default for maxRefineIterations is -1, indicating no user spec.
994   // Assign a context-specific default in this case.
995   size_t SZ_MAX = std::numeric_limits<size_t>::max(), candidate, iter = 1,
996     max_refine_iter = (maxRefineIterations < 0) ? 100 : maxRefineIterations;
997   bool converged = (iter > max_refine_iter),
998     print_metric = (outputLevel > SILENT_OUTPUT);
999   Real metric;
1000 
1001   pre_refinement();
1002 
1003   while (!converged) {
1004 
1005     Cout << "\n>>>>> Begin refinement iteration " << iter << ":\n";
1006     candidate = core_refinement(metric, false, print_metric); // no revert
1007     if (candidate == SZ_MAX) {
1008       Cout <<"\n<<<<< Refinement has saturated with no candidates available.\n";
1009       converged = true;
1010     }
1011     else {
1012       Cout << "\n<<<<< Refinement iteration " << iter << " completed: "
1013 	   << "convergence metric = " << metric << '\n';
1014       converged = (metric <= convergenceTol || ++iter > max_refine_iter);
1015     }
1016   }
1017 
1018   post_refinement(metric);
1019 }
1020 
1021 
finalize_expansion()1022 void NonDExpansion::finalize_expansion()
1023 {
1024   ++numUncertainQuant;
1025 
1026   // IteratorScheduler::run_iterator() + Analyzer::initialize_run() ensure
1027   // finalization of Model mappings for iteratedModel, but local recursions
1028   // are not visible -> recur DataFitSurr +  ProbabilityTransform if needed.
1029   if (uSpaceModel.mapping_initialized()) {
1030     /*bool var_size_changed =*/ uSpaceModel.finalize_mapping();
1031     //if (var_size_changed) resize();
1032   }
1033 }
1034 
1035 
pre_refinement()1036 void NonDExpansion::pre_refinement()
1037 {
1038   // initialize refinement algorithms (if necessary)
1039 
1040   std::shared_ptr<Iterator> sub_iter_rep =
1041     uSpaceModel.subordinate_iterator().iterator_rep();
1042 
1043   // now embedded in IncrementalSparseGridDriver::compute_grid():
1044   //nond_sparse->update_reference();
1045 
1046   switch (refineControl) {
1047   case Pecos::DIMENSION_ADAPTIVE_CONTROL_GENERALIZED:
1048     Cout << "\n>>>>> Initialization of generalized sparse grid sets.\n";
1049     std::static_pointer_cast<NonDSparseGrid>(sub_iter_rep)->initialize_sets();
1050     break;
1051   }
1052 }
1053 
1054 
1055 size_t NonDExpansion::
core_refinement(Real & metric,bool revert,bool print_metric)1056 core_refinement(Real& metric, bool revert, bool print_metric)
1057 {
1058   size_t candidate = 0;
1059   switch (refineControl) {
1060   case Pecos::UNIFORM_CONTROL:
1061   case Pecos::DIMENSION_ADAPTIVE_CONTROL_SOBOL:
1062   case Pecos::DIMENSION_ADAPTIVE_CONTROL_DECAY: {
1063     // if refinement opportunities have saturated (e.g., increments have reached
1064     // max{Order,Rank} or previous cross validation indicated better fit with
1065     // lower order), no candidates will be generated for this model key.
1066     if (!uSpaceModel.advancement_available())
1067       { metric = 0.;  return std::numeric_limits<size_t>::max(); }
1068 
1069     RealVector stats_ref;
1070     if (revert) pull_reference(stats_ref);
1071 
1072     update_expansion();
1073     // combine expansions if necessary for metric computation:
1074     // Note: Multilevel SC overrides this fn to remove roll-up for Hier SC
1075     //       (its delta metrics can be computed w/o exp combination)
1076     metric_roll_up(REFINEMENT_RESULTS);
1077 
1078     // assess increment by computing refinement metric:
1079     // defer revert (pass false) -> simplifies best candidate tracking to follow
1080     // Note: covariance metric seems more self-consistent for Sobol'-weighted
1081     // aniso refinement, but allow final stats adaptation if mappings are used
1082     switch (refineMetric) {
1083     case Pecos::COVARIANCE_METRIC:
1084       metric = compute_covariance_metric(false, print_metric);      break;
1085     //case Pecos::MIXED_STATS_METRIC: // TO DO
1086     //  compute_mixed_metric(); [retire compute_final_stats_metric()] break;
1087     default: //case Pecos::LEVEL_STATS_METRIC:
1088       metric = compute_level_mappings_metric(false, print_metric);  break;
1089     }
1090     compute_statistics(REFINEMENT_RESULTS); // augment delta metrics if needed
1091     if (print_metric) print_results(Cout, REFINEMENT_RESULTS); // augment output
1092     pull_candidate(statsStar); // pull compute_*_metric() + augmented stats
1093 
1094     if (revert)
1095       { pop_increment(); push_reference(stats_ref); }
1096     else
1097       merge_grid();
1098     break;
1099   }
1100   case Pecos::DIMENSION_ADAPTIVE_CONTROL_GENERALIZED: // SSG only
1101     // Dimension adaptive refinement using generalized sparse grids.
1102     // > Start GSG from iso/aniso SSG: starting from scratch (w=0) is
1103     //   most efficient if fully nested; otherwise, unique points from
1104     //   lowest levels may not contribute (smolyak coeff = 0).
1105     // > Starting GSG from TPQ is conceptually straightforward but
1106     //   awkward in implementation (would need something like
1107     //   nond_sparse->ssg_driver->compute_tensor_grid()).
1108     // Returns best of several candidates for this level
1109     candidate = increment_sets(metric, revert, print_metric);
1110     break;
1111   }
1112 
1113   return candidate;
1114 }
1115 
1116 
post_refinement(Real & metric,bool reverted)1117 void NonDExpansion::post_refinement(Real& metric, bool reverted)
1118 {
1119   // finalize refinement algorithms (if necessary)
1120   switch (refineControl) {
1121   case Pecos::DIMENSION_ADAPTIVE_CONTROL_GENERALIZED: {
1122     bool converged_within_tol = (metric <= convergenceTol);
1123     finalize_sets(converged_within_tol, reverted); break;
1124   }
1125   case Pecos::UNIFORM_CONTROL:
1126   case Pecos::DIMENSION_ADAPTIVE_CONTROL_SOBOL:
1127   case Pecos::DIMENSION_ADAPTIVE_CONTROL_DECAY:
1128     if (reverted && uSpaceModel.push_available())
1129       select_increment_candidate();
1130     break;
1131   }
1132 }
1133 
1134 
increment_grid(bool update_anisotropy)1135 void NonDExpansion::increment_grid(bool update_anisotropy)
1136 {
1137   switch (refineControl) {
1138   case Pecos::UNIFORM_CONTROL:
1139     switch (expansionCoeffsApproach) {
1140     case Pecos::QUADRATURE:              case Pecos::CUBATURE:
1141     case Pecos::INCREMENTAL_SPARSE_GRID: case Pecos::HIERARCHICAL_SPARSE_GRID: {
1142       std::shared_ptr<NonDIntegration> nond_integration =
1143 	std::static_pointer_cast<NonDIntegration>
1144 	(uSpaceModel.subordinate_iterator().iterator_rep());
1145       nond_integration->increment_grid(); break;
1146     }
1147     case Pecos::ORTHOG_LEAST_INTERPOLATION: // case Pecos::SAMPLING:
1148       break;
1149     default: // regression cases
1150       increment_order_and_grid(); break;
1151     }
1152     break;
1153   case Pecos::DIMENSION_ADAPTIVE_CONTROL_SOBOL: {
1154     // Dimension adaptive refinement: define anisotropic preference
1155     // vector from total Sobol' indices, averaged over response fn set.
1156     std::shared_ptr<NonDIntegration> nond_integration =
1157       std::static_pointer_cast<NonDIntegration>
1158       (uSpaceModel.subordinate_iterator().iterator_rep());
1159     if (update_anisotropy) { // weight SSG to emphasize larger Sobol indices
1160       RealVector dim_pref;
1161       reduce_total_sobol_sets(dim_pref);
1162       nond_integration->increment_grid_preference(dim_pref); // TPQ or SSG
1163     }
1164     else // increment level while preserving current weighting / bounds
1165       nond_integration->increment_grid_preference();
1166     break;
1167   }
1168   case Pecos::DIMENSION_ADAPTIVE_CONTROL_DECAY: {
1169     // Dimension adaptive refinement: define anisotropic weight vector
1170     // from min of spectral decay rates (PCE only) over response fn set.
1171     std::shared_ptr<NonDIntegration> nond_integration =
1172       std::static_pointer_cast<NonDIntegration>
1173       (uSpaceModel.subordinate_iterator().iterator_rep());
1174     if (update_anisotropy) { // weight SSG to emphasize slower decay
1175       RealVector aniso_wts;
1176       reduce_decay_rate_sets(aniso_wts);
1177       nond_integration->increment_grid_weights(aniso_wts); // TPQ or SSG
1178     }
1179     else // increment level while preserving current weighting / bounds
1180       nond_integration->increment_grid_weights();
1181     break;
1182   }
1183   }
1184 }
1185 
1186 
decrement_grid()1187 void NonDExpansion::decrement_grid()
1188 {
1189   std::shared_ptr<NonDIntegration> nond_integration =
1190     std::static_pointer_cast<NonDIntegration>
1191     (uSpaceModel.subordinate_iterator().iterator_rep());
1192   switch (expansionCoeffsApproach) {
1193   case Pecos::QUADRATURE:              case Pecos::CUBATURE:
1194   case Pecos::INCREMENTAL_SPARSE_GRID: case Pecos::HIERARCHICAL_SPARSE_GRID:
1195     nond_integration->decrement_grid();  break;
1196   case Pecos::ORTHOG_LEAST_INTERPOLATION: // case Pecos::SAMPLING:
1197     break;
1198   default: // regression cases
1199     decrement_order_and_grid();  break;
1200   }
1201 }
1202 
1203 
push_increment()1204 void NonDExpansion::push_increment()
1205 {
1206   increment_grid(false); // don't recompute anisotropy
1207 
1208   switch (expansionCoeffsApproach) {
1209   case Pecos::INCREMENTAL_SPARSE_GRID: case Pecos::HIERARCHICAL_SPARSE_GRID: {
1210     std::shared_ptr<NonDIntegration> nond_integration =
1211       std::static_pointer_cast<NonDIntegration>
1212       (uSpaceModel.subordinate_iterator().iterator_rep());
1213     nond_integration->push_grid_increment();
1214     break;
1215   }
1216   }
1217 
1218   uSpaceModel.push_approximation(); // uses reference in append_tensor_exp
1219 }
1220 
1221 
pop_increment()1222 void NonDExpansion::pop_increment()
1223 {
1224   // reverse order from update_expansion() / push_increment()
1225   // (allows grid increment to be queried while popping expansion data)
1226   uSpaceModel.pop_approximation(true);// store increment to use in restore
1227 
1228   decrement_grid();
1229 
1230   switch (expansionCoeffsApproach) {
1231   case Pecos::INCREMENTAL_SPARSE_GRID: case Pecos::HIERARCHICAL_SPARSE_GRID: {
1232     std::shared_ptr<NonDIntegration> nond_integration =
1233       std::static_pointer_cast<NonDIntegration>
1234       (uSpaceModel.subordinate_iterator().iterator_rep());
1235     nond_integration->pop_grid_increment();
1236     break;
1237   }
1238   }
1239 }
1240 
1241 
merge_grid()1242 void NonDExpansion::merge_grid()
1243 {
1244   switch (expansionCoeffsApproach) {
1245   case Pecos::INCREMENTAL_SPARSE_GRID: case Pecos::HIERARCHICAL_SPARSE_GRID: {
1246     std::shared_ptr<NonDIntegration> nond_integration =
1247       std::static_pointer_cast<NonDIntegration>
1248       (uSpaceModel.subordinate_iterator().iterator_rep());
1249     nond_integration->merge_grid_increment();
1250     nond_integration->update_reference();
1251     break;
1252   }
1253   //case Pecos::QUADRATURE: case Pecos::CUBATURE: // no-op
1254   }
1255 }
1256 
1257 
1258 void NonDExpansion::
configure_sequence(unsigned short & num_steps,unsigned short & fixed_index,bool & multilevel,bool mf_precedence)1259 configure_sequence(unsigned short& num_steps, unsigned short& fixed_index,
1260 		   bool& multilevel,          bool mf_precedence)
1261 {
1262   // Allow either model forms or discretization levels, but not both
1263   // (precedence determined by ...)
1264   ModelList& ordered_models = iteratedModel.subordinate_models(false);
1265   ModelLIter m_iter = --ordered_models.end(); // HF model
1266   size_t num_mf = ordered_models.size(), num_hf_lev = m_iter->solution_levels();
1267 
1268   multilevel = ( num_hf_lev > 1 && ( num_mf <= 1 || !mf_precedence ) );
1269   if (multilevel) {
1270     num_steps = num_hf_lev;  fixed_index = num_mf - 1;
1271     if (num_mf > 1)
1272       Cerr << "Warning: multiple model forms will be ignored in "
1273 	   << "NonDExpansion::configure_sequence().\n";
1274   }
1275   else if ( num_mf > 1 && ( num_hf_lev <= 1 || mf_precedence ) ) {
1276     num_steps = num_mf;  fixed_index = USHRT_MAX; // sync w/ active soln index
1277     if (num_hf_lev > 1)
1278       Cerr << "Warning: solution control levels will be ignored in "
1279 	   << "NonDExpansion::configure_sequence().\n";
1280   }
1281   else {
1282     Cerr << "Error: no model hierarchy evident in NonDExpansion::"
1283 	 << "configure_sequence()." << std::endl;
1284     abort_handler(METHOD_ERROR);
1285   }
1286 }
1287 
1288 
1289 bool NonDExpansion::
query_cost(unsigned short num_steps,bool multilevel,RealVector & cost)1290 query_cost(unsigned short num_steps, bool multilevel, RealVector& cost)
1291 {
1292   bool cost_defined = true;
1293   ModelList& ordered_models = iteratedModel.subordinate_models(false);
1294   ModelLIter m_iter;
1295   if (multilevel) {
1296     ModelLIter m_iter = --ordered_models.end(); // HF model
1297     cost = m_iter->solution_level_costs();      // can be empty
1298     if (cost.length() != num_steps)
1299       cost_defined = false;
1300   }
1301   else  {
1302     cost.sizeUninitialized(num_steps);
1303     m_iter = ordered_models.begin();
1304     for (unsigned short i=0; i<num_steps; ++i, ++m_iter) {
1305       cost[i] = m_iter->solution_level_cost(); // cost for active soln index
1306       if (cost[i] <= 0.) cost_defined = false;
1307     }
1308   }
1309   if (!cost_defined) cost.sizeUninitialized(0); // for compute_equivalent_cost()
1310   return cost_defined;
1311 }
1312 
1313 
1314 void NonDExpansion::
configure_indices(unsigned short group,unsigned short form,unsigned short lev,unsigned short s_index)1315 configure_indices(unsigned short group, unsigned short form,
1316 		  unsigned short lev,   unsigned short s_index)
1317 {
1318   // Set active surrogate/truth models within the Hierarch,DataFit surrogates
1319   // (recursion from uSpaceModel to iteratedModel)
1320   // > group index is assigned based on step in model form/resolution sequence
1321 
1322   UShortArray hf_key;
1323   Pecos::DiscrepancyCalculator::form_key(group, form, lev, hf_key);
1324 
1325   if (hf_key[s_index] == 0 || !multilevDiscrepEmulation) {
1326     bypass_surrogate_mode();              // one model evaluation
1327     uSpaceModel.active_model_key(hf_key); // one data set
1328   }
1329   else { // 3 data sets: HF, either LF or LF-hat, and discrep
1330     UShortArray child_key(hf_key), discrep_key;
1331     switch (multilevDiscrepEmulation) {
1332     case DISTINCT_EMULATION:
1333       aggregated_models_mode(); // two model evaluations
1334       // child key is the LF model from a {HF,LF} aggregation
1335       Pecos::DiscrepancyCalculator::decrement_key(child_key, s_index);
1336       break;
1337     case RECURSIVE_EMULATION:
1338       bypass_surrogate_mode(); // still only one model evaluation
1339       // child key is emulator of the LF model (LF-hat) --> dummy model indices
1340       child_key[1] = child_key[2] = USHRT_MAX;// same group but no form,lev
1341       break;
1342     }
1343     Pecos::DiscrepancyCalculator::
1344       aggregate_keys(hf_key, child_key, discrep_key);
1345     uSpaceModel.active_model_key(discrep_key);
1346   }
1347 }
1348 
1349 
assign_discrepancy_mode()1350 void NonDExpansion::assign_discrepancy_mode()
1351 {
1352   // assign alternate defaults for correction and discrepancy emulation types
1353   switch (iteratedModel.correction_type()) {
1354   //case ADDITIVE_CORRECTION:
1355   //case MULTIPLICATIVE_CORRECTION:
1356   case NO_CORRECTION: // assign method-specific default
1357     iteratedModel.correction_type(ADDITIVE_CORRECTION); break;
1358   }
1359 
1360   switch (multilevDiscrepEmulation) {
1361   // HIERARCHICAL_INTERPOLANT approaches are already recursive ...
1362   //case DISTINCT_EMULATION:
1363   //  consider disallowing for basis type Pecos::HIERARCHICAL_INTERPOLANT
1364   //case RECURSIVE_EMULATION:
1365   //  consider disallowing for basis type Pecos::NODAL_INTERPOLANT
1366   case DEFAULT_EMULATION: // assign method-specific default
1367     multilevDiscrepEmulation =
1368       //(expBasisType==Pecos::HIERARCHICAL_INTERPOLANT) ? RECURSIVE_EMULATION :
1369       DISTINCT_EMULATION;
1370     break;
1371   }
1372 }
1373 
1374 
assign_hierarchical_response_mode()1375 void NonDExpansion::assign_hierarchical_response_mode()
1376 {
1377   // override default SurrogateModel::responseMode for purposes of setting
1378   // comms for the ordered Models within HierarchSurrModel::set_communicators(),
1379   // which precedes mode updates in {multifidelity,multilevel}_expansion().
1380 
1381   if (iteratedModel.surrogate_type() != "hierarchical") {
1382     Cerr << "Error: multilevel/multifidelity expansions require a "
1383 	 << "hierarchical model." << std::endl;
1384     abort_handler(METHOD_ERROR);
1385   }
1386 
1387   if (multilevDiscrepEmulation == RECURSIVE_EMULATION)
1388     iteratedModel.surrogate_response_mode(BYPASS_SURROGATE);
1389   // ML-MF {PCE,SC,FT} are based on model discrepancies, but multi-index cases
1390   // may evolve towards BYPASS_SURROGATE as sparse grids in model space will
1391   // manage QoI differences.
1392   // > AGGREGATED_MODELS avoids decimation of data and can simplify algorithms,
1393   //   but requires additional discrepancy keys for high-low QoI combinations
1394   else
1395     iteratedModel.surrogate_response_mode(AGGREGATED_MODELS);//MODEL_DISCREPANCY
1396 }
1397 
1398 
1399 void NonDExpansion::
compute_equivalent_cost(const SizetArray & N_l,const RealVector & cost)1400 compute_equivalent_cost(const SizetArray& N_l, const RealVector& cost)
1401 {
1402   if (cost.empty() || N_l.empty())
1403     { equivHFEvals = 0.; return; }
1404 
1405   // compute the equivalent number of HF evaluations
1406   size_t l, num_l = N_l.size();
1407   switch (multilevDiscrepEmulation) {
1408   case RECURSIVE_EMULATION:
1409     for (l=0; l<num_l; ++l)
1410       equivHFEvals += N_l[l] * cost[l]; // single model cost per level
1411     break;
1412   case DISTINCT_EMULATION:
1413     equivHFEvals = N_l[0] * cost[0]; // first level is single model cost
1414     for (l=1; l<num_l; ++l)  // subsequent levels incur 2 model costs
1415       equivHFEvals += N_l[l] * (cost[l] + cost[l-1]);
1416     break;
1417   }
1418   equivHFEvals /= cost[num_l-1]; // normalize into equivalent HF evals
1419 }
1420 
1421 
multifidelity_expansion()1422 void NonDExpansion::multifidelity_expansion()
1423 {
1424   // Separating reference + refinement into two loops accomplishes two things:
1425   // > allows refinement based on COMBINED_EXPANSION_STATS to have a more
1426   //   complete view of the rolled-up stats as level refinements begin
1427   // > more consistent flow + greater reuse between indiv/integrated refinement
1428   // > downside: recursive emulation requires update to ref expansions prior
1429   //   to initiating refinements for lev > 0
1430   // For greedy integrated refinement:
1431   // > Only generate combined{MultiIndex,ExpCoeffs,ExpCoeffGrads}; active
1432   //   multiIndex,expansionCoeff{s,Grads} remain at ref state (no roll up)
1433   multifidelity_reference_expansion();
1434 
1435   // Perform refinement (individual || integrated)
1436   if (multilevAllocControl == GREEDY_REFINEMENT)
1437     multifidelity_integrated_refinement(); // refineType is required
1438   else
1439     multifidelity_individual_refinement(); // refineType is optional
1440 
1441   // promote combined expansion to active
1442   combined_to_active();
1443   // FINAL_RESULTS are computed / printed at end of virtual core_run()
1444 }
1445 
1446 
multifidelity_reference_expansion()1447 void NonDExpansion::multifidelity_reference_expansion()
1448 {
1449   // clear any persistent state from previous run (e.g., for OUU)
1450   NLev.clear(); // zero sample counters
1451   mlmfIter = 0; // zero iteration counter
1452   // remove default key (empty activeKey) since this interferes with
1453   // combine_approximation().  Also useful for ML/MF re-entrancy.
1454   uSpaceModel.clear_model_keys();
1455   // clearest to always use active stats for reference builds
1456   short orig_stats_mode = statsMetricMode; // for restoration
1457   refinement_statistics_mode(Pecos::ACTIVE_EXPANSION_STATS);
1458 
1459   // Allow either model forms or discretization levels, but not both
1460   unsigned short num_steps, fixed_index, form, lev, seq_index;  bool multilev;
1461   configure_sequence(num_steps, fixed_index, multilev, true); // MF precedence
1462   // either lev varies and form is fixed, or vice versa:
1463   unsigned short& step = (multilev) ? lev : form;  step = 0;
1464   if (multilev) { form = fixed_index;  seq_index = 2; }
1465   else          {  lev = fixed_index;  seq_index = 1; }
1466 
1467   // initial low fidelity/lowest discretization expansion
1468   configure_indices(step, form, lev, seq_index);
1469   assign_specification_sequence();
1470   compute_expansion();  // nominal LF expansion from input spec
1471   compute_statistics(INTERMEDIATE_RESULTS);
1472   bool print = (outputLevel > SILENT_OUTPUT);
1473   if (print) {
1474     Cout << "\n------------------------------------------------"
1475 	 << "\nMultifidelity UQ: low fidelity reference results"
1476 	 << "\n------------------------------------------------\n";
1477     print_results(Cout, INTERMEDIATE_RESULTS);
1478   }
1479 
1480   // loop over each of the discrepancy levels
1481   for (step=1; step<num_steps; ++step) {
1482     // configure hierarchical model indices and activate key in data fit model
1483     configure_indices(step, form, lev, seq_index);
1484     // advance to the next PCE/SC specification within the MF sequence
1485     increment_specification_sequence();
1486 
1487     // form the expansion for level i
1488     compute_expansion();  // nominal discrepancy expansion from input spec
1489     compute_statistics(INTERMEDIATE_RESULTS);
1490     if (print) {
1491       Cout << "\n-----------------------------------------------------"
1492 	   << "\nMultifidelity UQ: model discrepancy reference results"
1493 	   << "\n-----------------------------------------------------\n";
1494       print_results(Cout, INTERMEDIATE_RESULTS);
1495     }
1496   }
1497 
1498   // now aggregate expansions and report COMBINED_EXPANSION_STATS for cases
1499   // where the run will continue (individual/integrated refinement).
1500   // > If complete, then expansion combination + FINAL_RESULTS handled in
1501   //   higher level finalization operations.
1502   if (refineType) {//&& maxRefineIterations
1503     // compute/print combined reference stats
1504     refinement_statistics_mode(Pecos::COMBINED_EXPANSION_STATS);
1505     metric_roll_up(INTERMEDIATE_RESULTS); // combines approximations
1506     compute_statistics(INTERMEDIATE_RESULTS);
1507     if (print) {
1508       Cout << "\n----------------------------------------------------"
1509 	   << "\nMultifidelity UQ: statistics from combined expansion"
1510 	   << "\n----------------------------------------------------\n";
1511       print_results(Cout, INTERMEDIATE_RESULTS);
1512     }
1513   }
1514 
1515   refinement_statistics_mode(orig_stats_mode); // restore
1516 }
1517 
1518 
multifidelity_individual_refinement()1519 void NonDExpansion::multifidelity_individual_refinement()
1520 {
1521   // Allow either model forms or discretization levels, but not both
1522   unsigned short num_steps, fixed_index, form, lev, seq_index;  bool multilev;
1523   configure_sequence(num_steps, fixed_index, multilev, true); // MF precedence
1524   // either lev varies and form is fixed, or vice versa:
1525   unsigned short& step = (multilev) ? lev : form;  step = 0;
1526   if (multilev) { form = fixed_index;  seq_index = 2; }
1527   else          {  lev = fixed_index;  seq_index = 1; }
1528 
1529   bool print = (outputLevel > SILENT_OUTPUT);
1530   if (refineType) {//&& maxRefineIterations
1531     // refine expansion for lowest fidelity/coarsest discretization
1532     configure_indices(step, form, lev, seq_index);
1533     //assign_specification_sequence();
1534     refine_expansion(); // uniform/adaptive refinement
1535     metric_roll_up(INTERMEDIATE_RESULTS);
1536     compute_statistics(INTERMEDIATE_RESULTS);
1537     if (print) {
1538       Cout << "\n-------------------------------------------------"
1539 	   << "\nMultifidelity UQ: low fidelity refinement results"
1540 	   << "\n-------------------------------------------------\n";
1541       print_results(Cout, INTERMEDIATE_RESULTS);
1542     }
1543 
1544     // loop over each of the discrepancy levels
1545     for (step=1; step<num_steps; ++step) {
1546       // configure hierarchical model indices and activate key in data fit model
1547       configure_indices(step, form, lev, seq_index);
1548       // advance to the next PCE/SC specification within the MF sequence
1549       //increment_specification_sequence();
1550 
1551       // update discrepancy expansion since previous level has been refined
1552       if (multilevDiscrepEmulation == RECURSIVE_EMULATION) {//&&prev_lev_updated
1553 	//update_expansion(); // no grid increment, no push
1554 
1555 	// no new sim data, compute/use new synthetic data
1556 	Cout << "\nRecompute step " << step+1 << " reference expansion due to "
1557 	     << "dependence on step " << step << " emulator.\n";
1558 	uSpaceModel.formulation_updated(true);
1559 	uSpaceModel.rebuild_approximation();
1560       }
1561 
1562       // refine the expansion for level i
1563       refine_expansion(); // uniform/adaptive refinement
1564       metric_roll_up(INTERMEDIATE_RESULTS);
1565       compute_statistics(INTERMEDIATE_RESULTS);
1566       if (print) {
1567 	Cout << "\n------------------------------------------------------"
1568 	     << "\nMultifidelity UQ: model discrepancy refinement results"
1569 	     << "\n------------------------------------------------------\n";
1570 	print_results(Cout, INTERMEDIATE_RESULTS);
1571       }
1572     }
1573   }
1574 
1575   // generate summary output across model sequence
1576   NLev.resize(num_steps);
1577   for (step=0; step<num_steps; ++step) {
1578     configure_indices(step, form, lev, seq_index);
1579     NLev[step] = uSpaceModel.approximation_data(0).points(); // first QoI
1580   }
1581   // cost specification is optional for multifidelity_expansion()
1582   RealVector cost;  query_cost(num_steps, multilev, cost); // if provided
1583   compute_equivalent_cost(NLev, cost); // compute equivalent # of HF evals
1584 }
1585 
1586 
multifidelity_integrated_refinement()1587 void NonDExpansion::multifidelity_integrated_refinement()
1588 {
1589   Cout << "\n-----------------------------------------------"
1590        << "\nMultifidelity UQ: initiating greedy competition"
1591        << "\n-----------------------------------------------\n";
1592   // Initialize again (or must propagate settings from mf_expansion())
1593   unsigned short num_steps, fixed_index, form, lev, seq_index;  bool multilev;
1594   configure_sequence(num_steps, fixed_index, multilev, true); // MF precedence
1595   // either lev varies and form is fixed, or vice versa:
1596   unsigned short& step = (multilev) ? lev : form; // step is an alias
1597   if (multilev) { form = fixed_index;  seq_index = 2; }
1598   else          {  lev = fixed_index;  seq_index = 1; }
1599   RealVector cost;  configure_cost(num_steps, multilev, cost);
1600 
1601   // Initialize all levels.  Note: configure_indices() is used for completeness
1602   // (uSpaceModel.active_model_key(...) is sufficient for current grid
1603   // initialization operations).
1604   for (step=0; step<num_steps; ++step) {
1605     configure_indices(step, form, lev, seq_index);
1606     pre_refinement();
1607   }
1608 
1609   // Integrated MF refinement mirrors individual adaptive refinement in
1610   // refine_expansion() in using the max_refinement_iterations specification.
1611   // This differs from multilevel_regression(), which uses max_iterations and
1612   // potentially max_solver_iterations.
1613   size_t SZ_MAX = std::numeric_limits<size_t>::max(),
1614     step_candidate, best_step, best_step_candidate,
1615     max_refine_iter = (maxRefineIterations < 0) ? 100 : maxRefineIterations;
1616   Real step_metric, best_step_metric = DBL_MAX;
1617   RealVector best_stats_star;
1618   bool print_metric = (outputLevel > SILENT_OUTPUT);
1619   while ( best_step_metric > convergenceTol && mlmfIter < max_refine_iter ) {
1620 
1621     ++mlmfIter;
1622     Cout << "\n>>>>> Begin iteration " << mlmfIter << ": competing candidates "
1623 	 << "across " << num_steps << " sequence steps\n";
1624 
1625     // Generate candidates at each level
1626     best_step_metric = 0.;  best_step = best_step_candidate = SZ_MAX;
1627     for (step=0; step<num_steps; ++step) {
1628       Cout << "\n>>>>> Generating candidate(s) for sequence step " << step+1
1629 	   << '\n';
1630 
1631       // configure hierarchical model indices and activate key in data fit model
1632       configure_indices(step, form, lev, seq_index);
1633 
1634       // This returns the best/only candidate for the current level
1635       // Note: it must roll up contributions from all levels --> step_metric
1636       step_candidate = core_refinement(step_metric, true, print_metric);
1637       if (step_candidate == SZ_MAX)
1638 	Cout << "\n<<<<< Sequence step " << step+1
1639 	     << " has saturated with no refinement candidates available.\n";
1640       else {
1641 	// core_refinement() normalizes level candidates based on the number of
1642 	// required evaluations, which is sufficient for selection of the best
1643 	// level candidate.  For selection among multiple level candidates, a
1644 	// secondary normalization for relative level cost is required.
1645 	step_metric /= sequence_cost(step, cost);
1646 	Cout << "\n<<<<< Sequence step " << step+1 << " refinement metric = "
1647 	     << step_metric << '\n';
1648 	// Assess candidate for best across all levels
1649 	if (step_metric > best_step_metric) {
1650 	  best_step        = step;        best_step_candidate = step_candidate;
1651 	  best_step_metric = step_metric; best_stats_star     = statsStar;
1652 	}
1653       }
1654     }
1655 
1656     // permanently apply best increment and update references for next increment
1657     Cout << "\n<<<<< Iteration " << mlmfIter << " completed: ";
1658     if (best_step == SZ_MAX) {
1659       Cout << "no refinement selected.  Terminating iteration.\n";
1660       best_step_metric = 0.; // kick out of loop
1661     }
1662     else {
1663       Cout << "selected refinement = sequence step " << best_step+1
1664 	   << " candidate " << best_step_candidate+1 << '\n';
1665       step = best_step; // also updates form | lev
1666       configure_indices(step, form, lev, seq_index);
1667       select_candidate(best_step_candidate);
1668       push_candidate(best_stats_star); // update stats from best (no recompute)
1669       if (print_metric)	print_results(Cout, INTERMEDIATE_RESULTS);
1670     }
1671   }
1672 
1673   // Perform final roll-up for each level and then combine levels
1674   NLev.resize(num_steps);  bool reverted;
1675   for (step=0; step<num_steps; ++step) {
1676     configure_indices(step, form, lev, seq_index);
1677     reverted = (step != best_step); // only candidate from best_step was applied
1678     post_refinement(best_step_metric, reverted);
1679     NLev[step] = uSpaceModel.approximation_data(0).points(); // first QoI
1680   }
1681   compute_equivalent_cost(NLev, cost); // compute equivalent # of HF evals
1682 }
1683 
1684 
multilevel_regression()1685 void NonDExpansion::multilevel_regression()
1686 {
1687   unsigned short num_steps, fixed_index, form, lev, seq_index;
1688   bool multilev, import_pilot;
1689   size_t max_iter = (maxIterations < 0) ? 25 : maxIterations;
1690   Real eps_sq_div_2, sum_root_var_cost, estimator_var0 = 0.;
1691 
1692   // Allow either model forms or discretization levels, but not both
1693   configure_sequence(num_steps, fixed_index, multilev, false); // ML precedence
1694   // either lev varies and form is fixed, or vice versa:
1695   unsigned short& step = (multilev) ? lev : form; // step is an alias
1696   if (multilev) { form = fixed_index;  seq_index = 2; }
1697   else          {  lev = fixed_index;  seq_index = 1; }
1698 
1699   // configure metrics and cost for each step in the sequence
1700   RealVector cost, level_metrics(num_steps);
1701   configure_cost(num_steps, multilev, cost);
1702 
1703   // virtual: base implementation clears keys, assigns stats type, et al.
1704   initialize_ml_regression(num_steps, import_pilot);
1705 
1706   // Initialize NLev and load the pilot sample from user specification
1707   NLev.assign(num_steps, 0);
1708   SizetArray delta_N_l(num_steps);
1709   if (collocPtsSeqSpec.empty() && collocRatio > 0.)
1710     infer_pilot_sample(/*collocRatio, */delta_N_l);
1711   else
1712     load_pilot_sample(collocPtsSeqSpec, delta_N_l);
1713 
1714   // now converge on sample counts per level (NLev)
1715   while ( mlmfIter <= max_iter &&
1716 	  ( Pecos::l1_norm(delta_N_l) || (mlmfIter == 0 && import_pilot) ) ) {
1717 
1718     sum_root_var_cost = 0.;
1719     for (step=0; step<num_steps; ++step) {
1720 
1721       configure_indices(step, form, lev, seq_index);
1722 
1723       if (mlmfIter == 0) { // initial expansion build
1724 	// Update solution control variable in uSpaceModel to support
1725 	// DataFitSurrModel::consistent() logic
1726 	if (import_pilot)
1727 	  uSpaceModel.update_from_subordinate_model(); // max depth
1728 
1729 	NLev[step] += delta_N_l[step]; // update total samples for this step
1730 	increment_sample_sequence(delta_N_l[step], NLev[step], step);
1731 	if (step == 0 || import_pilot)
1732 	  compute_expansion(); // init + import + build; not recursive
1733 	else
1734 	  update_expansion();  // just build; not recursive
1735 
1736 	if (import_pilot) { // update counts to include imported data
1737 	  NLev[step] = delta_N_l[step]
1738 	    = uSpaceModel.approximation_data(0).points();
1739 	  Cout << "Pilot count including import = " << delta_N_l[step]<< "\n\n";
1740 	  // Trap zero samples as it will cause FPE downstream
1741 	  if (NLev[step] == 0) { // no pilot spec, no import match
1742 	    Cerr << "Error: insufficient sample recovery for sequence step "
1743 		 << step << " in multilevel_regression()." << std::endl;
1744 	    abort_handler(METHOD_ERROR);
1745 	  }
1746 	}
1747       }
1748       else if (delta_N_l[step]) {
1749 	NLev[step] += delta_N_l[step]; // update total samples for this step
1750 	increment_sample_sequence(delta_N_l[step], NLev[step], step);
1751 	// Note: import build data is not re-processed by append_expansion()
1752 	append_expansion();
1753       }
1754 
1755       switch (multilevAllocControl) {
1756       case ESTIMATOR_VARIANCE: {
1757 	Real& agg_var_l = level_metrics[step];
1758 	if (delta_N_l[step] > 0) aggregate_variance(agg_var_l);
1759 	sum_root_var_cost += std::pow(agg_var_l *
1760 	  std::pow(sequence_cost(step, cost), kappaEstimatorRate),
1761 	  1./(kappaEstimatorRate+1.));
1762         // MSE reference is ML MC aggregation for pilot(+import) sample:
1763 	if (mlmfIter == 0) estimator_var0 += agg_var_l / NLev[step];
1764 	break;
1765       }
1766       default:
1767 	if (delta_N_l[step] > 0)
1768 	  sample_allocation_metric(level_metrics[step], 2.);
1769 	//else sparsity,rank metric for this level same as previous iteration
1770 	break;
1771       }
1772     }
1773 
1774     switch (multilevAllocControl) {
1775     case ESTIMATOR_VARIANCE:
1776       if (mlmfIter == 0) { // eps^2 / 2 = var * relative factor
1777 	eps_sq_div_2 = estimator_var0 * convergenceTol;
1778 	if (outputLevel == DEBUG_OUTPUT)
1779 	  Cout << "Epsilon squared target = " << eps_sq_div_2 << '\n';
1780       }
1781       compute_sample_increment(level_metrics, cost, sum_root_var_cost,
1782 			       eps_sq_div_2, NLev, delta_N_l);
1783       break;
1784     default: // RIP_SAMPLING (ML PCE), RANK_SAMPLING (ML FT)
1785       compute_sample_increment(level_metrics, NLev, delta_N_l);
1786       break;
1787     }
1788     ++mlmfIter;
1789     Cout << "\nML regression iteration " << mlmfIter << " sample increments:\n"
1790 	 << delta_N_l << std::endl;
1791   }
1792   compute_equivalent_cost(NLev, cost); // compute equivalent # of HF evals
1793 
1794   finalize_ml_regression();
1795   // Final annotated results are computed / printed in core_run()
1796 }
1797 
1798 
1799 void NonDExpansion::
initialize_ml_regression(size_t num_steps,bool & import_pilot)1800 initialize_ml_regression(size_t num_steps, bool& import_pilot)
1801 {
1802   mlmfIter = 0;
1803 
1804   // remove default key (empty activeKey) since this interferes with
1805   // combine_approximation().  Also useful for ML/MF re-entrancy.
1806   uSpaceModel.clear_model_keys();
1807 
1808   // all stats are stats for the active sequence step (not combined)
1809   refinement_statistics_mode(Pecos::ACTIVE_EXPANSION_STATS);
1810 
1811   // Multilevel variance aggregation requires independent sample sets
1812   std::shared_ptr<Iterator> u_sub_iter =
1813     uSpaceModel.subordinate_iterator().iterator_rep();
1814   if (u_sub_iter != NULL)
1815     std::static_pointer_cast<Analyzer>(u_sub_iter)->vary_pattern(true);
1816 
1817   // Default (overridden in derived classes)
1818   import_pilot = false;
1819 }
1820 
1821 
finalize_ml_regression()1822 void NonDExpansion::finalize_ml_regression()
1823 {
1824   // combine level expansions and promote to active expansion:
1825   combined_to_active();
1826 }
1827 
1828 
select_candidate(size_t best_candidate)1829 void NonDExpansion::select_candidate(size_t best_candidate)
1830 {
1831   // permanently apply best increment and update references for next increment
1832   switch (refineControl) {
1833   case Pecos::DIMENSION_ADAPTIVE_CONTROL_GENERALIZED: {
1834     // convert incoming candidate index to selected trial set
1835     std::shared_ptr<NonDSparseGrid> nond_sparse =
1836       std::static_pointer_cast<NonDSparseGrid>
1837       (uSpaceModel.subordinate_iterator().iterator_rep());
1838     // active_mi index -> iterator mapping has not been invalidated
1839     // since candidate selection was previously deferred
1840     const std::set<UShortArray>& active_mi = nond_sparse->active_multi_index();
1841     std::set<UShortArray>::const_iterator best_cit = active_mi.begin();
1842     std::advance(best_cit, best_candidate);
1843     select_index_set_candidate(best_cit);
1844     break;
1845   }
1846   case Pecos::UNIFORM_CONTROL:  case Pecos::DIMENSION_ADAPTIVE_CONTROL_SOBOL:
1847   case Pecos::DIMENSION_ADAPTIVE_CONTROL_DECAY:
1848     select_increment_candidate();  break;
1849   }
1850 
1851   // For distinct discrepancy, promotion of best candidate does not invalidate
1852   // coefficient increments for other levels, as they are separate functions.
1853   // Only the metric roll up must be updated when pushing existing expansion
1854   // increments.  For recursive discrepancy, however, all levels above the
1855   // selected candidate are invalidated.
1856   //if (multilevDiscrepEmulation == RECURSIVE_EMULATION)
1857   //  for (lev=best_lev+1; lev<num_steps; ++lev)
1858   //    uSpaceModel.clear_popped();
1859 }
1860 
1861 
1862 void NonDExpansion::
select_index_set_candidate(std::set<UShortArray>::const_iterator cit_star)1863 select_index_set_candidate(std::set<UShortArray>::const_iterator cit_star)
1864 {
1865   std::shared_ptr<NonDSparseGrid> nond_sparse =
1866     std::static_pointer_cast<NonDSparseGrid>
1867     (uSpaceModel.subordinate_iterator().iterator_rep());
1868   nond_sparse->update_sets(*cit_star); // invalidates cit_star
1869   uSpaceModel.push_approximation(); // uses reference in append_tensor_exp
1870   nond_sparse->update_reference();
1871 }
1872 
1873 
select_increment_candidate()1874 void NonDExpansion::select_increment_candidate()
1875 {
1876   // increment the grid and, if needed, the expansion order.
1877   // can ignore best_candidate (only one candidate for now).
1878   push_increment();
1879   merge_grid(); // adopt incremented state as new reference
1880 }
1881 
1882 
1883 void NonDExpansion::
select_refinement_points(const RealVectorArray & candidate_samples,unsigned short batch_size,RealMatrix & best_samples)1884 select_refinement_points(const RealVectorArray& candidate_samples,
1885 			 unsigned short batch_size, RealMatrix& best_samples)
1886 {
1887   Cerr << "Error: virtual select_refinement_points() not redefined by derived "
1888        << "class.\n       NonDExpansion does not support point selection."
1889        << std::endl;
1890   abort_handler(METHOD_ERROR);
1891 }
1892 
1893 
append_expansion()1894 void NonDExpansion::append_expansion()
1895 {
1896   // default implementation (may be overridden by derived classes)
1897 
1898   // Reqmts: numSamplesOnModel updated and propagated to uSpaceModel
1899   //         if necessary (PCE), increment_order_from_grid() has been called
1900 
1901   // Run uSpaceModel::daceIterator to generate numSamplesOnModel
1902   uSpaceModel.subordinate_iterator().sampling_reset(numSamplesOnModel,
1903 						    true, false);
1904   uSpaceModel.run_dace();
1905   // append new DACE pts and rebuild expansion
1906   uSpaceModel.append_approximation(true);
1907 }
1908 
1909 
1910 void NonDExpansion::
append_expansion(const RealMatrix & samples,const IntResponseMap & resp_map)1911 append_expansion(const RealMatrix& samples, const IntResponseMap& resp_map)
1912 {
1913   // default implementation (may be overridden by derived classes)
1914 
1915   // increment the dataset
1916   numSamplesOnModel += resp_map.size();
1917   // utilize rebuild
1918   uSpaceModel.append_approximation(samples, resp_map, true);
1919 }
1920 
1921 
1922 /** Used for uniform refinement of regression-based PCE / FT. */
increment_order_and_grid()1923 void NonDExpansion::increment_order_and_grid()
1924 {
1925   uSpaceModel.shared_approximation().increment_order();
1926   update_samples_from_order_increment();
1927 
1928   // update u-space sampler to use new sample count
1929   if (tensorRegression) {
1930     std::shared_ptr<NonDQuadrature> nond_quad
1931       = std::static_pointer_cast<NonDQuadrature>
1932       (uSpaceModel.subordinate_iterator().iterator_rep());
1933     nond_quad->samples(numSamplesOnModel);
1934     if (nond_quad->mode() == RANDOM_TENSOR)
1935       nond_quad->increment_grid(); // increment dimension quad order
1936     nond_quad->update();
1937   }
1938 
1939   // assign number of total points in DataFitSurrModel
1940   update_model_from_samples();
1941 }
1942 
1943 
1944 /** Used for uniform de-refinement of regression-based PCE / FT. */
decrement_order_and_grid()1945 void NonDExpansion::decrement_order_and_grid()
1946 {
1947   uSpaceModel.shared_approximation().decrement_order();
1948   update_samples_from_order_decrement();
1949 
1950   // update u-space sampler to use new sample count
1951   if (tensorRegression) {
1952     std::shared_ptr<NonDQuadrature> nond_quad =
1953       std::static_pointer_cast<NonDQuadrature>
1954       (uSpaceModel.subordinate_iterator().iterator_rep());
1955     nond_quad->samples(numSamplesOnModel);
1956     //if (nond_quad->mode() == RANDOM_TENSOR) ***
1957     //  nond_quad->decrement_grid(); // decrement dimension quad order
1958     nond_quad->update();
1959   }
1960 
1961   // assign number of total points in DataFitSurrModel
1962   update_model_from_samples();
1963 }
1964 
1965 
update_samples_from_order_increment()1966 void NonDExpansion::update_samples_from_order_increment()
1967 {
1968   Cerr << "Error: no base class implementation for NonDExpansion::"
1969        << "update_samples_from_order_increment()" << std::endl;
1970   abort_handler(METHOD_ERROR);
1971 }
1972 
1973 
1974 /** Default implementation: increment/decrement update process is identical */
update_samples_from_order_decrement()1975 void NonDExpansion::update_samples_from_order_decrement()
1976 { update_samples_from_order_increment(); }
1977 
1978 
update_model_from_samples()1979 void NonDExpansion::update_model_from_samples()
1980 {
1981   // for updates/rebuilds, zero out the lower bound (arises from honoring
1982   // an initial user spec alongside imports and min data requirements)
1983   // > now built in as part of of DataFitSurrModel::rebuild_global(), but
1984   //   multifidelity_reference_expansion() -> compute_expansion() also needs
1985   //   for sample updates (step > 0)  and resets (step = 0).
1986   uSpaceModel.subordinate_iterator().sampling_reference(0);
1987 
1988   // enforce total pts (increment managed in DataFitSurrModel::rebuild_global())
1989   std::shared_ptr<DataFitSurrModel> dfs_model =
1990     std::static_pointer_cast<DataFitSurrModel>(uSpaceModel.model_rep());
1991   dfs_model->total_points(numSamplesOnModel);
1992 }
1993 
1994 
1995 /** leave sampler_set, expansion flags, and distribution parameter
1996     settings as set previously by compute_expansion(); there should be
1997     no need to update these for an expansion refinement. */
update_expansion()1998 void NonDExpansion::update_expansion()
1999 {
2000   // Note: DIMENSION_ADAPTIVE_CONTROL_GENERALIZED does not utilize this fn
2001 
2002   increment_grid(); // recompute anisotropy
2003 
2004   if (uSpaceModel.push_available()) { // defaults to false
2005     switch (expansionCoeffsApproach) {
2006   //case Pecos::QUADRATURE:              case Pecos::CUBATURE:
2007     case Pecos::INCREMENTAL_SPARSE_GRID: case Pecos::HIERARCHICAL_SPARSE_GRID: {
2008       std::shared_ptr<NonDIntegration> nond_int =
2009 	std::static_pointer_cast<NonDIntegration>
2010 	(uSpaceModel.subordinate_iterator().iterator_rep());
2011       nond_int->push_grid_increment(); break;
2012     }
2013     // no-op for SAMPLING, all REGRESSION cases
2014     }
2015     uSpaceModel.push_approximation();
2016   }
2017   else {
2018     switch (expansionCoeffsApproach) {
2019     case Pecos::QUADRATURE:              case Pecos::CUBATURE:
2020     case Pecos::INCREMENTAL_SPARSE_GRID: case Pecos::HIERARCHICAL_SPARSE_GRID: {
2021       std::shared_ptr<NonDIntegration> nond_int =
2022 	std::static_pointer_cast<NonDIntegration>
2023 	(uSpaceModel.subordinate_iterator().iterator_rep());
2024       nond_int->evaluate_grid_increment();// TPQ/Cub: not currently an increment
2025       break;
2026     }
2027     }
2028     switch (expansionCoeffsApproach) {
2029     case Pecos::QUADRATURE: case Pecos::CUBATURE:
2030       // replace the previous data and rebuild (prior to incremental support)
2031       uSpaceModel.update_approximation(true); break;
2032     case Pecos::INCREMENTAL_SPARSE_GRID: case Pecos::HIERARCHICAL_SPARSE_GRID:
2033       // append new data to the existing approximation and rebuild
2034       uSpaceModel.append_approximation(true); break;
2035     default: // SAMPLING, REGRESSION: evaluate + append new data and rebuild
2036       // > if incremental unsupported, rebuild defaults to build from scratch.
2037       // > Note: DataFitSurrModel::rebuild_global() utilizes sampling_reset()
2038       //   and daceIterator.run() to define unstructured sample increment.
2039       uSpaceModel.rebuild_approximation(); break;
2040     }
2041   }
2042 }
2043 
2044 
2045 void NonDExpansion::
update_u_space_sampler(size_t sequence_index,const UShortArray & approx_orders)2046 update_u_space_sampler(size_t sequence_index, const UShortArray& approx_orders)
2047 {
2048   std::shared_ptr<Iterator> sub_iter_rep =
2049     uSpaceModel.subordinate_iterator().iterator_rep();
2050   int seed = NonDExpansion::random_seed(sequence_index);
2051   if (seed) sub_iter_rep->random_seed(seed);
2052   // replace w/ uSpaceModel.random_seed(seed)? -> u_space_sampler, shared approx
2053 
2054   if (tensorRegression) {
2055     std::shared_ptr<NonDQuadrature> nond_quad =
2056       std::static_pointer_cast<NonDQuadrature>(sub_iter_rep);
2057     nond_quad->samples(numSamplesOnModel);
2058     if (nond_quad->mode() == RANDOM_TENSOR) { // sub-sampling i/o filtering
2059       UShortArray dim_quad_order(numContinuousVars);
2060       for (size_t i=0; i<numContinuousVars; ++i)
2061 	dim_quad_order[i] = approx_orders[i] + 1;
2062       nond_quad->quadrature_order(dim_quad_order); // update ref, enforce constr
2063     }
2064     nond_quad->update(); // sanity check on sizes, likely a no-op
2065   }
2066   // test for valid sampler for case of build data import (unstructured grid)
2067   else if (sub_iter_rep != NULL) // enforce increment through sampling_reset()
2068     update_model_from_samples();
2069 }
2070 
2071 
2072 void NonDExpansion::
refinement_statistics_mode(short stats_mode)2073 refinement_statistics_mode(short stats_mode)//, bool clear_bits)
2074 {
2075   if (statsMetricMode != stats_mode) {
2076     statsMetricMode = stats_mode;
2077 
2078     /*
2079     // if poly_approxs share computed* trackers between active and combined,
2080     // then need to clear these trackers
2081     if (clear_bits) {
2082       std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
2083       for (size_t i=0; i<numFunctions; ++i)
2084 	poly_approxs[i].clear_computed_bits();
2085     }
2086 
2087     // Changing stats rollup does *not* invalidate prodType{1,2}Coeffs since it
2088     // is defined only for expType{1,2}Coeffs (supporting delta_*() use cases),
2089     // but combined_to_active *does* for the active model index.
2090     // HIPA::combined_to_active() clears all prodType{1,2}Coeffs, such that
2091     // product_interpolants() will evaluate to false and new interpolants are
2092     // generated when needed (TO DO: any redundancy?).
2093 
2094     // For invalidation of current product interpolant data, we do not have
2095     // to have full knowledge of all model keys coming from NonDExpansion;
2096     // re-initializing the current model keys is enough.
2097     for (i=0; i<numFunctions; ++i)
2098       ((PecosApproximation*)poly_approxs[i].approx_rep())
2099 	->initialize_products(); // modify to enumerate all model keys; rename
2100 	                         // existing to initialize_active_products()
2101     */
2102   }
2103 
2104   // propagate to DataFitSurrModel, SharedApproxData, etc.
2105   std::shared_ptr<SharedApproxData> shared_data_rep
2106     = uSpaceModel.shared_approximation().data_rep();
2107   shared_data_rep->refinement_statistics_mode(stats_mode);
2108 }
2109 
2110 
combined_to_active()2111 void NonDExpansion::combined_to_active()
2112 {
2113   // default implementation
2114 
2115   // compute aggregate expansion (combined{MultiIndex,ExpCoeff{s,Grads}})
2116   // with limited stats support, retaining multiIndex,expansionCoeff{s,Grads}.
2117   uSpaceModel.combine_approximation();
2118   // migrate combined{MultiIndex,ExpCoeff{s,Grads}} to current active
2119   uSpaceModel.combined_to_active();
2120   // update approach for computing statistics; don't clear bits as
2121   // combined_to_active() can transfer bits from combined to active
2122   refinement_statistics_mode(Pecos::ACTIVE_EXPANSION_STATS);//, false);
2123 }
2124 
2125 
2126 size_t NonDExpansion::
increment_sets(Real & delta_star,bool revert,bool print_metric)2127 increment_sets(Real& delta_star, bool revert, bool print_metric)
2128 {
2129   Cout << "\n>>>>> Begin evaluation of active index sets.\n";
2130 
2131   RealVector stats_ref;
2132   pull_reference(stats_ref);
2133 
2134   // Reevaluate the effect of every active set every time, since the reference
2135   // point for the surplus calculation changes (and the overlay should
2136   // eventually be inexpensive since each point set is only evaluated once).
2137   std::shared_ptr<NonDSparseGrid> nond_sparse =
2138     std::static_pointer_cast<NonDSparseGrid>
2139     (uSpaceModel.subordinate_iterator().iterator_rep());
2140   const std::set<UShortArray>& active_mi = nond_sparse->active_multi_index();
2141   std::set<UShortArray>::const_iterator cit, cit_star = active_mi.end();
2142   Real delta; delta_star = -DBL_MAX;  size_t index = 0, index_star = _NPOS;
2143   for (cit=active_mi.begin(); cit!=active_mi.end(); ++cit, ++index) {
2144 
2145     // increment grid with current candidate
2146     Cout << "\n>>>>> Evaluating trial index set:\n" << *cit;
2147     nond_sparse->increment_set(*cit);
2148     if (uSpaceModel.push_available()) {    // has been active previously
2149       nond_sparse->push_set();
2150       uSpaceModel.push_approximation();
2151     }
2152     else {                                    // a new active set
2153       nond_sparse->evaluate_set();
2154       uSpaceModel.append_approximation(true); // rebuild
2155     }
2156 
2157     // combine expansions if necessary for metric computation:
2158     // Note: Multilevel SC overrides this fn to remove roll-up for Hier SC
2159     //       (its delta metrics can be computed w/o exp combination)
2160     metric_roll_up(REFINEMENT_RESULTS);
2161     // assess increment by computing refinement metric:
2162     // defer revert (pass false) -> simplifies best candidate tracking to follow
2163     switch (refineMetric) {
2164     case Pecos::COVARIANCE_METRIC:
2165       delta = compute_covariance_metric(false, print_metric);      break;
2166     //case Pecos::MIXED_STATS_METRIC: // TO DO
2167     //  compute_mixed_metric(); [retire compute_final_stats_metric()] break;
2168     default: //case Pecos::LEVEL_STATS_METRIC:
2169       delta = compute_level_mappings_metric(false, print_metric);  break;
2170     }
2171     compute_statistics(REFINEMENT_RESULTS);        // augment compute_*_metric()
2172     if (print_metric) print_results(Cout, REFINEMENT_RESULTS); // augment output
2173 
2174     // normalize effect of increment based on cost (# of collocation pts).
2175     // Note: increment size is nonzero since growth restriction is precluded
2176     //       for generalized sparse grids.
2177     delta /= nond_sparse->increment_size();
2178     Cout << "\n<<<<< Trial set refinement metric = " << delta << '\n';
2179     // track best increment evaluated thus far
2180     if (delta > delta_star) {
2181       cit_star = cit;  delta_star = delta;  index_star = index;
2182       pull_candidate(statsStar); // pull comp_*_metric() + augmented stats
2183     }
2184 
2185     // restore previous state (destruct order is reversed from construct order)
2186     uSpaceModel.pop_approximation(true); // store data for use in push,finalize
2187     nond_sparse->decrement_set(); // store data for use in push_set()
2188     if (revert || cit != --active_mi.end()) // else overwritten by push below
2189       push_reference(stats_ref);
2190   }
2191   Cout << "\n<<<<< Evaluation of active index sets completed.\n"
2192        << "\n<<<<< Index set selection:\n" << *cit_star;
2193 
2194   if (!revert) { // permanently apply best increment and update references
2195     select_index_set_candidate(cit_star); // invalidates cit_star
2196     push_candidate(statsStar);
2197     if (print_metric) print_results(Cout, INTERMEDIATE_RESULTS);
2198   }
2199   return index_star;
2200 }
2201 
2202 
finalize_sets(bool converged_within_tol,bool reverted)2203 void NonDExpansion::finalize_sets(bool converged_within_tol, bool reverted)
2204 {
2205   Cout << "\n<<<<< Finalization of generalized sparse grid sets.\n";
2206   std::shared_ptr<NonDSparseGrid> nond_sparse =
2207     std::static_pointer_cast<NonDSparseGrid>
2208     (uSpaceModel.subordinate_iterator().iterator_rep());
2209   // apply all remaining increments not previously selected
2210   bool output_sets = (outputLevel >= VERBOSE_OUTPUT);
2211   nond_sparse->finalize_sets(output_sets, converged_within_tol, reverted);
2212   uSpaceModel.finalize_approximation();
2213   nond_sparse->update_reference(); // for completeness
2214 }
2215 
2216 
2217 /** computes the default refinement metric based on change in respCovariance */
2218 Real NonDExpansion::
compute_covariance_metric(bool revert,bool print_metric)2219 compute_covariance_metric(bool revert, bool print_metric)
2220 {
2221   // default implementation for use when direct (hierarchical) calculation
2222   // of increments is not available
2223 
2224   // Relative to computing the variance vector/covariance matrix, computing
2225   // mean values within compute_moments() adds little to no additional cost
2226   // > minor exception: PCE covariance does not require mean estimation but
2227   //   returning 1st coeff (and augmenting if all vars) is cheap
2228   // > {push,pull} of {reference,candidate} statistics to avoid recomputation
2229   //   means that prints of INTERMEDIATE results can be based on restoration
2230   //   of REFINEMENT results, so this simplifies the bridge between the two.
2231   // > Note: redundant moment estimations are protected by computed bits.
2232 
2233   Real scale;
2234   switch (covarianceControl) {
2235   case DIAGONAL_COVARIANCE: {
2236     // Note that the sense/sign of delta_resp_var is unimportant as it is only
2237     // used for the norm calculation --> more convenient to compute ref - new:
2238     RealVector resp_var_ref, delta_resp_var = respVariance; // deep copy
2239     if (revert) resp_var_ref = respVariance;
2240     if (relativeMetric)
2241       scale = std::max(Pecos::SMALL_NUMBER, respVariance.normFrobenius());
2242 
2243     compute_moments(); // little to no additional cost (see above)
2244     //compute_covariance(); // minimal variance computation
2245     if (print_metric) print_covariance(Cout);
2246     delta_resp_var -= respVariance;           // compute change
2247     Real delta_norm = delta_resp_var.normFrobenius();
2248 
2249 #ifdef DEBUG
2250     Cout << "resp_var_ref:\n" << resp_var_ref
2251 	 << "respVariance:\n" << respVariance
2252 	 << "norm of delta_resp_var = " << delta_norm << std::endl;
2253 #endif // DEBUG
2254 
2255     if (revert) respVariance = resp_var_ref;
2256 
2257     // For adaptation started from level = 0, reference covariance = 0.
2258     // Trap this and also avoid possible bogus termination from using absolute
2259     // change compared against relative conv tol.
2260     return (relativeMetric) ? delta_norm / scale : delta_norm;
2261     break;
2262   }
2263   case FULL_COVARIANCE: {
2264     // Note that the sense/sign of delta_resp_covar is unimportant as it is only
2265     // used for the norm calculation --> more convenient to compute ref - new:
2266     RealSymMatrix resp_covar_ref, delta_resp_covar = respCovariance;// deep copy
2267     if (revert) resp_covar_ref = respCovariance;
2268     if (relativeMetric)
2269       scale = std::max(Pecos::SMALL_NUMBER, respCovariance.normFrobenius());
2270 
2271     compute_moments(); // little to no additional cost (see above)
2272     compute_off_diagonal_covariance();
2273     //compute_covariance(); // minimal covariance computation
2274     if (print_metric) print_covariance(Cout);
2275     delta_resp_covar -= respCovariance;              // compute change
2276     Real delta_norm = delta_resp_covar.normFrobenius();
2277 
2278 #ifdef DEBUG
2279     Cout << "resp_covar_ref:\n" << resp_covar_ref
2280 	 << "respCovariance:\n" << respCovariance
2281 	 << "norm of delta_resp_covar = " << delta_norm << std::endl;
2282 #endif // DEBUG
2283 
2284     if (revert) respCovariance = resp_covar_ref;
2285 
2286     // For adaptation started from level = 0, reference covariance = 0.
2287     // Trap this and also avoid possible bogus termination from using absolute
2288     // change compared against relative conv tol.
2289     return (relativeMetric) ? delta_norm / scale : delta_norm;
2290     break;
2291   }
2292   default: // NO_COVARIANCE or failure to redefine DEFAULT_COVARIANCE
2293     return 0.;
2294     break;
2295   }
2296 }
2297 
2298 
2299 /** computes a "goal-oriented" refinement metric employing computed*Levels */
2300 Real NonDExpansion::
compute_level_mappings_metric(bool revert,bool print_metric)2301 compute_level_mappings_metric(bool revert, bool print_metric)
2302 {
2303   // default implementation for use when direct (hierarchical) calculation
2304   // of increments is not available
2305 
2306   // cache previous statistics
2307   size_t offset = 0;
2308   RealVector level_maps_ref;  pull_level_mappings(level_maps_ref, offset);
2309 
2310   // compute/print new statistics
2311   compute_level_mappings();
2312   if (print_metric) print_level_mappings(Cout);
2313   RealVector level_maps_new;  pull_level_mappings(level_maps_new, offset);
2314 
2315 #ifdef DEBUG
2316   Cout << "level_maps_ref:\n" << level_maps_ref
2317        << "level_maps_new:\n" << level_maps_new << std::endl;
2318 #endif // DEBUG
2319 
2320   // sum up only the level mapping stats (don't mix with mean and variance due
2321   // to scaling issues).  Note: if the level mappings are of mixed type, then
2322   // would need to scale with a target value or measure norm of relative change.
2323   Real sum_sq = 0., scale_sq = 0.;
2324   size_t i, j, cntr = offset, num_lev_i;
2325   for (i=0; i<totalLevelRequests; ++i, ++cntr) {
2326     // simple approach takes 2-norm of level mappings (no relative scaling),
2327     // which should be fine for mappings that are not of mixed type
2328     Real ref = level_maps_ref[cntr], delta = level_maps_new[cntr] - ref;
2329     if (relativeMetric) scale_sq += ref * ref;
2330     sum_sq += delta * delta;
2331   }
2332 
2333   if (revert) push_level_mappings(level_maps_ref, offset);
2334 
2335   // Risk of zero reference is reduced relative to covariance control, but not
2336   // eliminated. Trap this and also avoid possible bogus termination from using
2337   // absolute change compared against relative conv tol.
2338   if (relativeMetric) {
2339     Real scale = std::max(Pecos::SMALL_NUMBER, std::sqrt(scale_sq));
2340     return std::sqrt(sum_sq) / scale;
2341   }
2342   else
2343     return std::sqrt(sum_sq);
2344 }
2345 
2346 
2347 /*
2348 // computes a "goal-oriented" refinement metric employing finalStatistics
2349 Real NonDExpansion::
2350 compute_final_statistics_metric(bool revert, bool print_metric)
2351 {
2352   // default implementation for use when direct (hierarchical) calculation
2353   // of increments is not available
2354 
2355   // cache previous statistics
2356   RealVector final_stats_ref = finalStatistics.function_values(); // deep copy
2357   // *** Note: this requires that the reference includes FINAL_RESULTS,
2358   // *** which is not currently true (only INTERMEDIATE_RESULTS)
2359 
2360   // compute/print new statistics
2361   compute_statistics(FINAL_RESULTS); // no finalStats for REFINEMENT_RESULTS
2362   if (print_metric) print_results(Cout, FINAL_RESULTS);
2363   const RealVector& final_stats_new = finalStatistics.function_values();
2364 
2365 #ifdef DEBUG
2366   Cout << "final_stats_ref:\n" << final_stats_ref
2367        << "final_stats_new:\n" << final_stats_new << std::endl;
2368 #endif // DEBUG
2369 
2370   // sum up only the level mapping stats (don't mix with mean and variance due
2371   // to scaling issues).  Note: if the level mappings are of mixed type, then
2372   // would need to scale with a target value or measure norm of relative change.
2373   Real sum_sq = 0., scale_sq = 0.;
2374   size_t i, j, cntr = 0, num_lev_i, moment_offset = (finalMomentsType) ? 2 : 0;
2375   for (i=0; i<numFunctions; ++i) {
2376 
2377     // This modification can be used to mirror the metrics in Gerstner & Griebel
2378     // 2003.  However, their approach uses a hierarchical integral contribution
2379     // evaluation which is less prone to roundoff (and is refined to very tight
2380     // tolerances in the paper).
2381     //if (!finalMomentsType) { Cerr << "Error: "; abort_handler(METHOD_ERROR); }
2382     //sum_sq += std::pow(delta_final_stats[cntr], 2.); // mean
2383     //cntr += moment_offset + requestedRespLevels[i].length() +
2384     //  requestedProbLevels[i].length() + requestedRelLevels[i].length() +
2385     //  requestedGenRelLevels[i].length();
2386 
2387     // *** TO DO: support mixed metrics based on finalStats ASV
2388     cntr += moment_offset; // skip moments if final_stats
2389     num_lev_i = requestedRespLevels[i].length() +
2390       requestedProbLevels[i].length() + requestedRelLevels[i].length() +
2391       requestedGenRelLevels[i].length();
2392     for (j=0; j<num_lev_i; ++j, ++cntr) {
2393       Real ref  = final_stats_ref[cntr], delta = final_stats_new[cntr] - ref;
2394       if (relativeMetric) scale_sq += ref * ref;
2395       sum_sq += delta * delta;
2396     }
2397   }
2398 
2399   if (revert) finalStatistics.function_values(final_stats_ref);
2400 
2401   // Risk of zero reference is reduced relative to covariance control, but not
2402   // eliminated. Trap this and also avoid possible bogus termination from using
2403   // absolute change compared against relative conv tol.
2404   if (relativeMetric) {
2405     Real scale = std::max(Pecos::SMALL_NUMBER, std::sqrt(scale_sq));
2406     return std::sqrt(sum_sq) / scale;
2407   }
2408   else
2409     return std::sqrt(sum_sq);
2410 }
2411 */
2412 
2413 
2414 void NonDExpansion::
compute_sample_increment(const RealVector & agg_var,const RealVector & cost,Real sum_root_var_cost,Real eps_sq_div_2,const SizetArray & N_l,SizetArray & delta_N_l)2415 compute_sample_increment(const RealVector& agg_var, const RealVector& cost,
2416 			 Real sum_root_var_cost, Real eps_sq_div_2,
2417 			 const SizetArray& N_l, SizetArray& delta_N_l)
2418 {
2419   // case ESTIMATOR_VARIANCE:
2420 
2421   // eps^2 / 2 target computed based on relative tolerance: total MSE = eps^2
2422   // which is equally apportioned (eps^2 / 2) among discretization MSE and
2423   // estimator variance (\Sum var_Y_l / NLev).  Since we do not know the
2424   // discretization error, we compute an initial estimator variance and then
2425   // seek to reduce it by a relative_factor <= 1.
2426 
2427   // We assume a functional dependence of estimator variance on NLev
2428   // for minimizing aggregate cost subject to an MSE error balance:
2429   //   Var(Q-hat) = sigma_Q^2 / (gamma NLev^kappa)
2430   // where Monte Carlo has gamma = kappa = 1.  To fit these parameters,
2431   // one approach is to numerically estimate the variance in the mean
2432   // estimator (alpha_0) from two sources:
2433   // > from variation across k folds for the selected CV settings
2434   // > from var decrease as NLev increases across iters
2435 
2436   // compute and accumulate variance of mean estimator from the set of
2437   // k-fold results within the selected settings from cross-validation:
2438   //Real cv_var_i = poly_approx_rep->
2439   //  cross_validation_solver().cv_metrics(MEAN_ESTIMATOR_VARIANCE);
2440   //  (need to make MultipleSolutionLinearModelCrossValidationIterator
2441   //   cv_iterator class scope)
2442   // To validate this approach, the actual estimator variance can be
2443   // computed and compared with the CV variance approximation (as for
2444   // traditional CV error plots, but predicting estimator variance
2445   // instead of L2 fit error).
2446 
2447   // update targets based on variance estimates
2448   Real new_N_l; size_t lev, num_lev = N_l.size();
2449   Real fact = std::pow(sum_root_var_cost / eps_sq_div_2 / gammaEstimatorScale,
2450 		       1. / kappaEstimatorRate);
2451   for (lev=0; lev<num_lev; ++lev) {
2452     new_N_l = std::pow(agg_var[lev] / sequence_cost(lev, cost),
2453 		       1. / (kappaEstimatorRate+1.)) * fact;
2454     delta_N_l[lev] = one_sided_delta(N_l[lev], new_N_l);
2455   }
2456 }
2457 
2458 
aggregate_variance(Real & agg_var_l)2459 void NonDExpansion::aggregate_variance(Real& agg_var_l)
2460 {
2461   // case ESTIMATOR_VARIANCE:
2462   // statsMetricMode remains as Pecos::ACTIVE_EXPANSION_STATS
2463 
2464   // control ML using aggregated variance across the vector of QoI
2465   // (alternate approach: target QoI with largest variance)
2466   agg_var_l = 0.;  Real var_l;
2467   std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
2468   for (size_t qoi=0; qoi<numFunctions; ++qoi) {
2469     var_l = poly_approxs[qoi].variance(); // for active level
2470     agg_var_l += var_l;
2471     if (outputLevel >= DEBUG_OUTPUT)
2472       Cout << "Variance(" << "qoi " << qoi+1 << ") = " << var_l << '\n';
2473   }
2474 }
2475 
2476 
2477 /** Default implementation redefined by Multilevel derived classes. */
infer_pilot_sample(SizetArray & delta_N_l)2478 void NonDExpansion::infer_pilot_sample(/*Real ratio, */SizetArray& delta_N_l)
2479 {
2480   Cerr << "Error: no default implementation for infer_pilot_sample() used by "
2481        << "multilevel expansions." << std::endl;
2482   abort_handler(METHOD_ERROR);
2483 }
2484 
2485 
assign_specification_sequence()2486 void NonDExpansion::assign_specification_sequence()
2487 {
2488   Cerr << "Error: no default implementation for assign_specification_"
2489        << "sequence() used by multifidelity expansions." << std::endl;
2490   abort_handler(METHOD_ERROR);
2491 }
2492 
2493 
2494 /** Default implementation redefined by Multilevel derived classes. */
increment_specification_sequence()2495 void NonDExpansion::increment_specification_sequence()
2496 {
2497   Cerr << "Error: no default implementation for increment_specification_"
2498        << "sequence() used by multifidelity expansions." << std::endl;
2499   abort_handler(METHOD_ERROR);
2500 }
2501 
2502 
2503 void NonDExpansion::
increment_sample_sequence(size_t new_samp,size_t total_samp,size_t step)2504 increment_sample_sequence(size_t new_samp, size_t total_samp, size_t step)
2505 {
2506   Cerr << "Error: no default implementation for increment_sample_sequence() "
2507        << "defined for multilevel_regression()." << std::endl;
2508   abort_handler(METHOD_ERROR);
2509 }
2510 
2511 
sample_allocation_metric(Real & metric,Real power)2512 void NonDExpansion::sample_allocation_metric(Real& metric, Real power)
2513 {
2514   Cerr << "Error: no default implementation for sample_allocation_metric() "
2515        << "required for multilevel_regression()." << std::endl;
2516   abort_handler(METHOD_ERROR);
2517 }
2518 
2519 
2520 void NonDExpansion::
compute_sample_increment(const RealVector & lev_metrics,const SizetArray & N_l,SizetArray & delta_N_l)2521 compute_sample_increment(const RealVector& lev_metrics, const SizetArray& N_l,
2522 			 SizetArray& delta_N_l)
2523 {
2524   Cerr << "Error: no default implementation for compute_sample_increment() "
2525        << "defined for multilevel_regression()." << std::endl;
2526   abort_handler(METHOD_ERROR);
2527 }
2528 
2529 
reduce_total_sobol_sets(RealVector & avg_sobol)2530 void NonDExpansion::reduce_total_sobol_sets(RealVector& avg_sobol)
2531 {
2532   // anisotropy based on total Sobol indices (univariate effects only) averaged
2533   // over the response fn set.  [Addition of interaction effects based on
2534   // individual Sobol indices would require a nonlinear index set constraint
2535   // within anisotropic sparse grids.]
2536 
2537   if (numFunctions > 1) {
2538     if (avg_sobol.empty()) avg_sobol.size(numContinuousVars); // init to 0
2539     else                   avg_sobol = 0.;
2540   }
2541 
2542   size_t i;
2543   std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
2544   for (i=0; i<numFunctions; ++i) {
2545     Approximation& approx_i = poly_approxs[i];
2546     if (vbdOrderLimit) // prevent print/export of uninitialized memory
2547       approx_i.clear_component_effects();
2548     else // no order limit --> component used within total
2549       approx_i.compute_component_effects();
2550     approx_i.compute_total_effects(); // from scratch or using component
2551 
2552     // Note: response functions for which negligible variance is detected have
2553     // their totalSobolIndices assigned to zero.  This avoids corrupting the
2554     // aggregation, although the scaling that follows could be improved to
2555     // divide by the number of nonzero contributions (avg_sobol is currently
2556     // used only in a relative sense, so this is low priority).
2557     if (numFunctions > 1) avg_sobol += approx_i.total_sobol_indices();
2558     else                  avg_sobol  = approx_i.total_sobol_indices();
2559   }
2560   if (numFunctions > 1)
2561     avg_sobol.scale(1./(Real)numFunctions);
2562 
2563   // manage small values that are not 0 (SGMGA has special handling for 0)
2564   Real pref_tol = 1.e-2; // TO DO
2565   for (i=0; i<numContinuousVars; ++i)
2566     if (std::abs(avg_sobol[i]) < pref_tol)
2567       avg_sobol[i] = 0.;
2568 
2569   if (outputLevel >= NORMAL_OUTPUT)
2570     Cout << "\nUpdating anisotropy from average of total Sobol indices:\n"
2571 	 << avg_sobol << std::endl;
2572 }
2573 
2574 
reduce_decay_rate_sets(RealVector & min_decay)2575 void NonDExpansion::reduce_decay_rate_sets(RealVector& min_decay)
2576 {
2577   // anisotropy based on linear approximation to coefficient decay rates for
2578   // each dimension as measured from univariate PCE terms.  In this case,
2579   // averaging tends to wash out the interesting anisotropy, especially if
2580   // some functions converge quickly with high rates.  Thus, it is more
2581   // appropriate to extract the minimum decay rates over the response fn set.
2582 
2583   std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
2584   // This context can be specific to PCE via Pecos
2585   std::shared_ptr<PecosApproximation> poly_approx_rep =
2586     std::static_pointer_cast<PecosApproximation>(poly_approxs[0].approx_rep());
2587   min_decay = poly_approx_rep->dimension_decay_rates();
2588   size_t i, j;
2589   for (i=1; i<numFunctions; ++i) {
2590     poly_approx_rep = std::static_pointer_cast<PecosApproximation>(
2591       poly_approxs[i].approx_rep());
2592     const RealVector& decay_i = poly_approx_rep->dimension_decay_rates();
2593     for (j=0; j<numContinuousVars; ++j)
2594       if (decay_i[j] < min_decay[j])
2595 	min_decay[j] = decay_i[j];
2596   }
2597   // enforce a lower bound on minimum decay (disallow negative/zero decay rates)
2598   Real decay_tol = 1.e-2; // TO DO
2599   for (j=0; j<numContinuousVars; ++j)
2600     if (min_decay[j] < decay_tol)
2601       min_decay[j] = decay_tol;
2602 
2603   if (outputLevel >= NORMAL_OUTPUT)
2604     Cout << "\nUpdating anisotropy from minimum decay rates:\n" << min_decay
2605 	 << std::endl;
2606 }
2607 
2608 
compute_active_diagonal_variance()2609 void NonDExpansion::compute_active_diagonal_variance()
2610 {
2611   bool warn_flag = false;
2612   std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
2613   for (size_t i=0; i<numFunctions; ++i) {
2614     Approximation& approx_i = poly_approxs[i];
2615     Real& var_i = (covarianceControl == DIAGONAL_COVARIANCE)
2616                 ? respVariance[i] : respCovariance(i,i);
2617     if (approx_i.expansion_coefficient_flag())
2618       var_i = (allVars) ? approx_i.variance(initialPtU) : approx_i.variance();
2619     else
2620       { warn_flag = true; var_i = 0.; }
2621   }
2622   if (warn_flag)
2623     Cerr << "Warning: expansion coefficients unavailable in NonDExpansion::"
2624 	 << "compute_covariance().\n         Zeroing affected variance terms."
2625 	 << std::endl;
2626 }
2627 
2628 
compute_active_off_diagonal_covariance()2629 void NonDExpansion::compute_active_off_diagonal_covariance()
2630 {
2631   size_t i, j;
2632   bool warn_flag = false;
2633   std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
2634   for (i=0; i<numFunctions; ++i) {
2635     Approximation& approx_i = poly_approxs[i];
2636     if (approx_i.expansion_coefficient_flag())
2637       for (j=0; j<i; ++j) {
2638 	Approximation& approx_j = poly_approxs[j];
2639 	if (approx_j.expansion_coefficient_flag())
2640 	  respCovariance(i,j) = (allVars) ?
2641 	    approx_i.covariance(initialPtU, approx_j) :
2642 	    approx_i.covariance(approx_j);
2643 	else
2644 	  { warn_flag = true; respCovariance(i,j) = 0.; }
2645       }
2646     else {
2647       warn_flag = true;
2648       for (j=0; j<i; ++j)
2649 	respCovariance(i,j) = 0.;
2650     }
2651   }
2652   if (warn_flag)
2653     Cerr << "Warning: expansion coefficients unavailable in NonDExpansion::"
2654 	 << "compute_off_diagonal_covariance().\n         Zeroing affected "
2655 	 << "covariance terms." << std::endl;
2656 }
2657 
2658 
compute_combined_diagonal_variance()2659 void NonDExpansion::compute_combined_diagonal_variance()
2660 {
2661   bool warn_flag = false;
2662   std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
2663   for (size_t i=0; i<numFunctions; ++i) {
2664     Approximation& approx_i = poly_approxs[i];
2665     Real& var_i = (covarianceControl == DIAGONAL_COVARIANCE)
2666                 ? respVariance[i] : respCovariance(i,i);
2667     if (approx_i.expansion_coefficient_flag())
2668       var_i = (allVars) ? approx_i.combined_covariance(initialPtU, approx_i)
2669 	                : approx_i.combined_covariance(approx_i);
2670     else
2671       { warn_flag = true; var_i = 0.; }
2672   }
2673 
2674   if (warn_flag)
2675     Cerr << "Warning: expansion coefficients unavailable in NonDExpansion::"
2676 	 << "compute_combined_covariance().\n         Zeroing affected "
2677 	 << "covariance terms." << std::endl;
2678 }
2679 
2680 
compute_combined_off_diagonal_covariance()2681 void NonDExpansion::compute_combined_off_diagonal_covariance()
2682 {
2683   size_t i, j;
2684   bool warn_flag = false;
2685   std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
2686   for (i=0; i<numFunctions; ++i) {
2687     Approximation& approx_i = poly_approxs[i];
2688     if (approx_i.expansion_coefficient_flag())
2689       for (j=0; j<i; ++j) {
2690 	Approximation& approx_j = poly_approxs[j];
2691 	if (approx_j.expansion_coefficient_flag())
2692 	  respCovariance(i,j) = (allVars) ?
2693 	    approx_i.combined_covariance(initialPtU, approx_j) :
2694 	    approx_i.combined_covariance(approx_j);
2695 	else
2696 	  { warn_flag = true; respCovariance(i,j) = 0.; }
2697       }
2698     else {
2699       warn_flag = true;
2700       for (j=0; j<=i; ++j)
2701 	respCovariance(i,j) = 0.;
2702     }
2703   }
2704 
2705   if (warn_flag)
2706     Cerr << "Warning: expansion coefficients unavailable in NonDExpansion::"
2707 	 << "compute_off_diagonal_combined_covariance().\n         Zeroing "
2708 	 << "affected covariance terms." << std::endl;
2709 }
2710 
2711 
2712 /** Calculate analytic and numerical statistics from the expansion and
2713     log results within final_stats for use in OUU. */
compute_statistics(short results_state)2714 void NonDExpansion::compute_statistics(short results_state)
2715 {
2716   // restore variable settings following build/refine: supports local
2717   // sensitivities, expansion/importance sampling for all vars mode
2718   // (uses ALEATORY_UNCERTAIN sampling mode), and external uses of the
2719   // emulator model (emulator-based inference).
2720   //uSpaceModel.continuous_variables(initialPtU);
2721 
2722   switch (results_state) {
2723   case REFINEMENT_RESULTS:
2724     // compute_{covariance,level_mapping,final_statistics}_metric() performs
2725     // the necessary computations for resolving delta.norm()
2726 
2727     // mirror requirements for additional diagnostics in print_results()
2728     //switch (refineControl) {
2729     //case Pecos::DIMENSION_ADAPTIVE_CONTROL_GENERALIZED:
2730       // print_smolyak_multi_index() requires nothing additional
2731       //break;
2732     //case Pecos::DIMENSION_ADAPTIVE_CONTROL_DECAY:
2733       // dimension_decay_rates() output only requires coefficients + basis norms
2734       //break;
2735     //case Pecos::DIMENSION_ADAPTIVE_CONTROL_SOBOL:
2736       // print_sobol_indices() requires compute_sobol_indices(), but
2737       // increment_grid() --> reduce_total_sobol_sets() takes care of this
2738       //compute_sobol_indices();
2739       //break;
2740     //}
2741     break;
2742   case INTERMEDIATE_RESULTS:
2743     switch (refineMetric) {
2744     case Pecos::NO_METRIC: // possible for multifidelity_expansion()
2745       compute_moments();
2746       if (totalLevelRequests) {
2747 	if (allVars) uSpaceModel.continuous_variables(initialPtU); // see top
2748 	compute_level_mappings();
2749       }
2750       break;
2751     case Pecos::COVARIANCE_METRIC:
2752       compute_moments(); // no additional cost (mean,variance reused)
2753       if (covarianceControl == FULL_COVARIANCE)
2754 	compute_off_diagonal_covariance();
2755       break;
2756     case Pecos::MIXED_STATS_METRIC:
2757       if (allVars) uSpaceModel.continuous_variables(initialPtU); // see top
2758       compute_moments(); compute_level_mappings();
2759       break;
2760     case Pecos::LEVEL_STATS_METRIC:
2761       if (allVars) uSpaceModel.continuous_variables(initialPtU); // see top
2762       compute_level_mappings();
2763       break;
2764     }
2765     break;
2766   case FINAL_RESULTS:
2767     uSpaceModel.continuous_variables(initialPtU); // see top comment
2768     // -----------------------------
2769     // Calculate analytic statistics: includes derivs + finalStats updating
2770     // -----------------------------
2771     compute_analytic_statistics(); // stats derived from exp coeffs
2772     // ------------------------------
2773     // Calculate numerical statistics: with finalStats updating (no derivs)
2774     // ------------------------------
2775     compute_numerical_statistics(); // stats from sampling on expansion
2776 
2777     // update finalStatistics and archive results
2778     update_final_statistics(); // augments updates embedded above
2779     if (resultsDB.active()) { // archive the active variables with the results
2780       resultsDB.insert(run_identifier(), resultsNames.cv_labels,
2781 		       iteratedModel.continuous_variable_labels());
2782       resultsDB.insert(run_identifier(), resultsNames.fn_labels,
2783 		       iteratedModel.response_labels());
2784     }
2785     archive_moments();
2786     archive_coefficients();
2787     if (vbdFlag) archive_sobol_indices();
2788     break;
2789   }
2790 }
2791 
2792 
compute_level_mappings()2793 void NonDExpansion::compute_level_mappings()
2794 {
2795   // for use with incremental results states (combines code from
2796   // compute_analytic_statistics() and compute_numerical_statistics(),
2797   // which support the final results state)
2798 
2799   // start with numerical, then overlay analytic below
2800   compute_numerical_level_mappings();
2801 
2802   // flags for limiting unneeded computation (matched in print_results())
2803   bool combined_stats = (statsMetricMode == Pecos::COMBINED_EXPANSION_STATS),
2804        z_to_beta = (respLevelTarget == RELIABILITIES);
2805 
2806   // loop over response fns and compute/store analytic stats/stat grads
2807   std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
2808   const ShortArray& final_asv = finalStatistics.active_set_request_vector();
2809   Real mu, var, sigma, p, z_bar, beta_bar;
2810   size_t i, j, rl_len, pl_len, bl_len, cntr = 0,
2811     moment_offset = (finalMomentsType) ? 2 : 0;
2812 
2813   for (i=0; i<numFunctions; ++i) {
2814     Approximation& approx_i = poly_approxs[i];
2815 
2816     rl_len = requestedRespLevels[i].length();
2817     pl_len = requestedProbLevels[i].length();
2818     bl_len = requestedRelLevels[i].length();
2819 
2820     cntr += moment_offset;
2821 
2822     // Note: corresponding logic in NonDExpansion::compute_expansion() defines
2823     // expansionCoeffFlag as needed to support final data requirements.
2824     // If not full stats, suppress secondary moment calculations.
2825     bool moments_flag = false;
2826     if (z_to_beta) {
2827       size_t offset = cntr;
2828       for (j=0; j<rl_len; ++j)
2829 	if (final_asv[j+offset] & 1)
2830 	  { moments_flag = true; break; }
2831     }
2832     if (!moments_flag) {
2833       size_t offset = cntr + rl_len + pl_len;
2834       for (j=0; j<bl_len; ++j)
2835 	if (final_asv[j+offset] & 1)
2836 	  { moments_flag = true; break; }
2837     }
2838     if (moments_flag) {
2839       if (allVars)
2840 	approx_i.compute_moments(initialPtU, false, combined_stats);
2841       else
2842 	approx_i.compute_moments(false, combined_stats);
2843 
2844       const RealVector& moments = approx_i.moments(); // virtual
2845       mu = moments[0]; var = moments[1]; // Pecos provides central moments
2846 
2847       if (var >= 0.)
2848 	sigma = std::sqrt(var);
2849       else { // negative variance can happen with SC on sparse grids
2850 	Cerr << "Warning: stochastic expansion variance is negative in "
2851 	     << "computation of std deviation.\n         Setting std "
2852 	     << "deviation to zero." << std::endl;
2853 	sigma = 0.;
2854       }
2855     }
2856 
2857     if (z_to_beta) {
2858       for (j=0; j<rl_len; ++j, ++cntr)
2859 	if (final_asv[cntr] & 1) {
2860 	  z_bar = requestedRespLevels[i][j];
2861 	  if (sigma > Pecos::SMALL_NUMBER)
2862 	    computedRelLevels[i][j] = (cdfFlag) ?
2863 	      (mu - z_bar)/sigma : (z_bar - mu)/sigma;
2864 	  else
2865 	    computedRelLevels[i][j] =
2866 	      ( (cdfFlag && mu <= z_bar) || (!cdfFlag && mu > z_bar) ) ?
2867 	      -Pecos::LARGE_NUMBER : Pecos::LARGE_NUMBER;
2868 	}
2869     }
2870     else
2871       cntr += rl_len;
2872 
2873     cntr += pl_len;
2874 
2875     for (j=0; j<bl_len; ++j, ++cntr)
2876       if (final_asv[cntr] & 1) {
2877         beta_bar = requestedRelLevels[i][j];
2878 	computedRespLevels[i][j+pl_len] = (cdfFlag) ?
2879 	  mu - beta_bar * sigma : mu + beta_bar * sigma;
2880       }
2881 
2882     cntr += requestedGenRelLevels[i].length();
2883   }
2884 }
2885 
2886 
compute_numerical_level_mappings()2887 void NonDExpansion::compute_numerical_level_mappings()
2888 {
2889   // for use with incremental results states (combines code from
2890   // compute_analytic_statistics() and compute_numerical_statistics(),
2891   // which support the final results state)
2892 
2893   // perform sampling on expansion for any numerical level mappings
2894   RealVector  exp_sampler_stats;  RealVectorArray imp_sampler_stats;
2895   RealRealPairArray min_max_fns;  ShortArray            sampler_asv;
2896   define_sampler_asv(sampler_asv);
2897   if (non_zero(sampler_asv)) {
2898     run_sampler(sampler_asv, exp_sampler_stats);
2899     refine_sampler(imp_sampler_stats, min_max_fns);
2900   }
2901   std::shared_ptr<NonDSampling> exp_sampler_rep =
2902     std::static_pointer_cast<NonDSampling>(expansionSampler.iterator_rep());
2903 
2904   // flags for limiting unneeded computation (matched in print_results())
2905   bool z_to_beta = (respLevelTarget == RELIABILITIES),
2906        imp_sampling = !importanceSampler.is_null();
2907 
2908   // loop over response fns and compute/store analytic stats/stat grads
2909   const ShortArray& final_asv = finalStatistics.active_set_request_vector();
2910   size_t i, j, rl_len, pl_len, bl_len, gl_len, cntr = 0, sampler_cntr = 0,
2911     moment_offset = (finalMomentsType) ? 2 : 0, sampler_moment_offset = 0;
2912   if (exp_sampler_rep != NULL && exp_sampler_rep->final_moments_type())
2913     sampler_moment_offset = 2;
2914 
2915   for (i=0; i<numFunctions; ++i) {
2916     rl_len = requestedRespLevels[i].length();
2917     pl_len = requestedProbLevels[i].length();
2918     bl_len = requestedRelLevels[i].length();
2919     gl_len = requestedGenRelLevels[i].length();
2920 
2921     cntr  += moment_offset;  sampler_cntr += sampler_moment_offset;
2922 
2923     if (z_to_beta)
2924       cntr += rl_len; // don't increment sampler_cntr
2925     else {
2926       for (j=0; j<rl_len; ++j, ++cntr, ++sampler_cntr) {
2927 	if (final_asv[cntr] & 1) {
2928 	  Real p = (imp_sampling) ? imp_sampler_stats[i][j]
2929 	                          : exp_sampler_stats[sampler_cntr];
2930 	  if (respLevelTarget == PROBABILITIES)
2931 	    computedProbLevels[i][j] = p;
2932 	  else if (respLevelTarget == GEN_RELIABILITIES)
2933 	    computedGenRelLevels[i][j]
2934 	      = -Pecos::NormalRandomVariable::inverse_std_cdf(p);
2935 	}
2936       }
2937     }
2938 
2939     for (j=0; j<pl_len; ++j, ++cntr, ++sampler_cntr)
2940       if (final_asv[cntr] & 1)
2941 	computedRespLevels[i][j] = exp_sampler_stats[sampler_cntr];
2942 
2943     cntr += bl_len; // don't increment sampler_cntr
2944 
2945     for (j=0; j<gl_len; ++j, ++cntr, ++sampler_cntr)
2946       if (final_asv[cntr] & 1)
2947 	computedRespLevels[i][j+pl_len+bl_len]
2948 	  = exp_sampler_stats[sampler_cntr];
2949   }
2950 }
2951 
2952 
compute_moments()2953 void NonDExpansion::compute_moments()
2954 {
2955   // for use with incremental results states
2956 
2957   std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
2958   bool combined_stats = (statsMetricMode == Pecos::COMBINED_EXPANSION_STATS);
2959   for (size_t i=0; i<numFunctions; ++i) {
2960     Approximation& approx_i = poly_approxs[i];
2961     if (approx_i.expansion_coefficient_flag()) {
2962       if (allVars)
2963 	approx_i.compute_moments(initialPtU, false, combined_stats);
2964       else
2965 	approx_i.compute_moments(false, combined_stats);
2966 
2967       // extract variance (Pecos provides central moments)
2968       Real var = (combined_stats) ?
2969 	approx_i.combined_moment(1) : approx_i.moment(1);
2970       if (covarianceControl == DIAGONAL_COVARIANCE)  respVariance[i]     = var;
2971       else if (covarianceControl == FULL_COVARIANCE) respCovariance(i,i) = var;
2972     }
2973   }
2974 }
2975 
2976 
compute_sobol_indices()2977 void NonDExpansion::compute_sobol_indices()
2978 {
2979   // for use with incremental results states
2980 
2981   if (!vbdFlag) return;
2982 
2983   std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
2984   for (size_t i=0; i<numFunctions; ++i) {
2985     Approximation& approx_i = poly_approxs[i];
2986     if (approx_i.expansion_coefficient_flag()) {
2987       approx_i.compute_component_effects(); // main or main+interaction
2988       approx_i.compute_total_effects();     // total
2989     }
2990   }
2991 }
2992 
2993 
compute_analytic_statistics()2994 void NonDExpansion::compute_analytic_statistics()
2995 {
2996   // for use with FINAL_RESULTS state
2997 
2998   const ShortArray& final_asv = finalStatistics.active_set_request_vector();
2999   const SizetArray& final_dvv = finalStatistics.active_set_derivative_vector();
3000   size_t i, j, k, rl_len, pl_len, bl_len, gl_len, cntr = 0,
3001     num_final_grad_vars = final_dvv.size(), total_offset,
3002     moment_offset = (finalMomentsType) ? 2 : 0;
3003 
3004   // flags for limiting unneeded computation (matched in print_results())
3005   bool combined_stats = (statsMetricMode == Pecos::COMBINED_EXPANSION_STATS),
3006      local_grad_stats = (!subIteratorFlag && outputLevel >= NORMAL_OUTPUT);
3007 
3008   if (local_grad_stats && expGradsMeanX.empty())
3009     expGradsMeanX.shapeUninitialized(numContinuousVars, numFunctions);
3010 
3011   // loop over response fns and compute/store analytic stats/stat grads
3012   std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
3013   Real mu, var, sigma, beta, z;
3014   RealVector mu_grad, sigma_grad, final_stat_grad;
3015   for (i=0; i<numFunctions; ++i) {
3016     if (totalLevelRequests) {
3017       rl_len = requestedRespLevels[i].length();
3018       pl_len = requestedProbLevels[i].length();
3019       bl_len = requestedRelLevels[i].length();
3020       gl_len = requestedGenRelLevels[i].length();
3021     }
3022     else
3023       rl_len = pl_len = bl_len = gl_len = 0;
3024 
3025     Approximation& approx_i = poly_approxs[i];
3026 
3027     // Note: corresponding logic in NonDExpansion::compute_expansion() defines
3028     // expansionCoeffFlag as needed to support final data requirements.
3029     // If not full stats, suppress secondary moment calculations.
3030     if (approx_i.expansion_coefficient_flag()) {
3031       if (allVars)
3032 	approx_i.compute_moments(initialPtU, true, combined_stats);
3033       else
3034 	approx_i.compute_moments(true, combined_stats);
3035 
3036       const RealVector& moments = approx_i.moments(); // virtual
3037       mu = moments[0]; var = moments[1]; // Pecos provides central moments
3038 
3039       if (covarianceControl ==  DIAGONAL_COVARIANCE) respVariance[i]     = var;
3040       else if (covarianceControl == FULL_COVARIANCE) respCovariance(i,i) = var;
3041 
3042       if (var >= 0.)
3043 	sigma = std::sqrt(var);
3044       else { // negative variance can happen with SC on sparse grids
3045 	Cerr << "Warning: stochastic expansion variance is negative in "
3046 	     << "computation of std deviation.\n         Setting std "
3047 	     << "deviation to zero." << std::endl;
3048 	sigma = 0.;
3049       }
3050     }
3051 
3052     // compute moment gradients if needed for beta mappings
3053     bool moment_grad_mapping_flag = false;
3054     if (respLevelTarget == RELIABILITIES) {
3055       total_offset = cntr + moment_offset;
3056       for (j=0; j<rl_len; ++j) // dbeta/ds requires mu, sigma, dmu/ds, dsigma/ds
3057 	if (final_asv[total_offset+j] & 2)
3058 	  { moment_grad_mapping_flag = true; break; }
3059     }
3060     if (!moment_grad_mapping_flag) {
3061       total_offset = cntr + moment_offset + rl_len + pl_len;
3062       for (j=0; j<bl_len; ++j) // dz/ds requires dmu/ds, dsigma/ds
3063 	if (final_asv[total_offset+j] & 2)
3064 	  { moment_grad_mapping_flag = true; break; }
3065     }
3066 
3067     bool final_mom1_flag = false, final_mom1_grad_flag = false;
3068     if (finalMomentsType) {
3069       final_mom1_flag      = (final_asv[cntr] & 1);
3070       final_mom1_grad_flag = (final_asv[cntr] & 2);
3071     }
3072     bool mom1_grad_flag = (final_mom1_grad_flag || moment_grad_mapping_flag);
3073     // *** mean (Note: computation above not based on final ASV)
3074     if (final_mom1_flag)
3075       finalStatistics.function_value(mu, cntr);
3076     // *** mean gradient
3077     if (mom1_grad_flag) {
3078       const RealVector& grad = (allVars) ?
3079 	approx_i.mean_gradient(initialPtU, final_dvv) :
3080 	approx_i.mean_gradient();
3081       if (final_mom1_grad_flag)
3082 	finalStatistics.function_gradient(grad, cntr);
3083       if (moment_grad_mapping_flag)
3084 	mu_grad = grad; // transfer to code below
3085     }
3086     if (finalMomentsType)
3087       ++cntr;
3088 
3089     bool final_mom2_flag = false, final_mom2_grad_flag = false;
3090     if (finalMomentsType) {
3091       final_mom2_flag      = (final_asv[cntr] & 1);
3092       final_mom2_grad_flag = (final_asv[cntr] & 2);
3093     }
3094     bool mom2_grad_flag = (final_mom2_grad_flag || moment_grad_mapping_flag);
3095     bool std_moments    = (finalMomentsType == STANDARD_MOMENTS);
3096     // *** std dev / variance (Note: computation above not based on final ASV)
3097     if (final_mom2_flag) {
3098       if (std_moments) finalStatistics.function_value(sigma, cntr);
3099       else             finalStatistics.function_value(var,   cntr);
3100     }
3101     // *** std deviation / variance gradient
3102     if (mom2_grad_flag) {
3103       const RealVector& grad = (allVars) ?
3104 	approx_i.variance_gradient(initialPtU, final_dvv) :
3105 	approx_i.variance_gradient();
3106       if (std_moments || moment_grad_mapping_flag) {
3107 	if (sigma_grad.empty())
3108 	  sigma_grad.sizeUninitialized(num_final_grad_vars);
3109 	if (sigma > 0.)
3110 	  for (j=0; j<num_final_grad_vars; ++j)
3111 	    sigma_grad[j] = grad[j] / (2.*sigma);
3112 	else {
3113 	  Cerr << "Warning: stochastic expansion std deviation is zero in "
3114 	       << "computation of std deviation gradient.\n         Setting "
3115 	       << "gradient to zero." << std::endl;
3116 	  sigma_grad = 0.;
3117 	}
3118       }
3119       if (final_mom2_grad_flag) {
3120 	if (std_moments) finalStatistics.function_gradient(sigma_grad, cntr);
3121 	else             finalStatistics.function_gradient(grad,       cntr);
3122       }
3123     }
3124     if (finalMomentsType)
3125       ++cntr;
3126 
3127     if (respLevelTarget == RELIABILITIES) {
3128       for (j=0; j<rl_len; ++j, ++cntr) {
3129 	// *** beta
3130 	if (final_asv[cntr] & 1) {
3131 	  Real z_bar = requestedRespLevels[i][j];
3132 	  if (sigma > Pecos::SMALL_NUMBER) {
3133 	    Real ratio = (mu - z_bar)/sigma;
3134 	    computedRelLevels[i][j] = beta = (cdfFlag) ? ratio : -ratio;
3135 	  }
3136 	  else
3137 	    computedRelLevels[i][j] = beta =
3138 	      ( (cdfFlag && mu <= z_bar) || (!cdfFlag && mu > z_bar) ) ?
3139 	      -Pecos::LARGE_NUMBER : Pecos::LARGE_NUMBER;
3140 	  finalStatistics.function_value(beta, cntr);
3141 	}
3142 	// *** beta gradient
3143 	if (final_asv[cntr] & 2) {
3144 	  if (final_stat_grad.empty())
3145 	    final_stat_grad.sizeUninitialized(num_final_grad_vars);
3146 	  if (sigma > Pecos::SMALL_NUMBER) {
3147 	    Real z_bar = requestedRespLevels[i][j];
3148 	    for (k=0; k<num_final_grad_vars; ++k) {
3149 	      Real dratio_dx = (sigma*mu_grad[k] - (mu-z_bar)*sigma_grad[k])
3150 		             / std::pow(sigma, 2);
3151 	      final_stat_grad[k] = (cdfFlag) ? dratio_dx : -dratio_dx;
3152 	    }
3153 	  }
3154 	  else
3155 	    final_stat_grad = 0.;
3156 	  finalStatistics.function_gradient(final_stat_grad, cntr);
3157 	}
3158       }
3159     }
3160     else
3161       cntr += rl_len;
3162 
3163     // no analytic mappings for probability levels
3164     cntr += pl_len;
3165 
3166     for (j=0; j<bl_len; ++j, ++cntr) {
3167       Real beta_bar = requestedRelLevels[i][j];
3168       if (final_asv[cntr] & 1) {
3169 	// *** z
3170 	computedRespLevels[i][j+pl_len] = z = (cdfFlag) ?
3171 	  mu - beta_bar * sigma : mu + beta_bar * sigma;
3172 	finalStatistics.function_value(z, cntr);
3173       }
3174       if (final_asv[cntr] & 2) {
3175 	// *** z gradient
3176 	if (final_stat_grad.empty())
3177 	  final_stat_grad.sizeUninitialized(num_final_grad_vars);
3178 	for (k=0; k<num_final_grad_vars; ++k)
3179 	  final_stat_grad[k] = (cdfFlag) ?
3180 	    mu_grad[k] - beta_bar * sigma_grad[k] :
3181 	    mu_grad[k] + beta_bar * sigma_grad[k];
3182 	finalStatistics.function_gradient(final_stat_grad, cntr);
3183       }
3184     }
3185 
3186     // no analytic mappings for generalized reliability levels
3187     cntr += gl_len;
3188 
3189     // *** local sensitivities
3190     if (local_grad_stats && approx_i.expansion_coefficient_flag()) {
3191       // expansion sensitivities are defined from the coefficients and basis
3192       // polynomial derivatives.  They are computed for the means of the
3193       // uncertain varables and provide a measure of local importance (but not
3194       // scaled by input covariance as in mean value importance factors).
3195       Pecos::MultivariateDistribution& x_dist
3196 	= iteratedModel.multivariate_distribution();
3197       const RealVector& exp_grad_u
3198 	= approx_i.gradient(uSpaceModel.current_variables());
3199       RealVector
3200 	exp_grad_x(Teuchos::getCol(Teuchos::View, expGradsMeanX, (int)i));
3201       uSpaceModel.trans_grad_U_to_X(exp_grad_u, exp_grad_x, x_dist.means());
3202 
3203 #ifdef TEST_HESSIANS
3204       const RealSymMatrix& exp_hess_u
3205 	= approx_i.hessian(uSpaceModel.current_variables());
3206       //RealSymMatrix exp_hess_x;
3207       //uSpaceModel.trans_hess_U_to_X(exp_hess_u, exp_hess_x, x_dist.means());
3208       Cout << exp_hess_u; //<< exp_hess_x;
3209 #endif // TEST_HESSIANS
3210     }
3211 
3212     // *** global sensitivities:
3213     if (vbdFlag && approx_i.expansion_coefficient_flag()) {
3214       approx_i.compute_component_effects(); // main or main+interaction
3215       approx_i.compute_total_effects();     // total
3216     }
3217   }
3218 
3219   if (covarianceControl == FULL_COVARIANCE)
3220     compute_off_diagonal_covariance(); // diagonal entries were filled in above
3221 }
3222 
3223 
compute_numerical_statistics()3224 void NonDExpansion::compute_numerical_statistics()
3225 {
3226   // for use with FINAL_RESULTS state
3227 
3228   if (expansionSampler.is_null()) return;
3229 
3230   RealVector  exp_sampler_stats;  RealVectorArray imp_sampler_stats;
3231   RealRealPairArray min_max_fns;  ShortArray            sampler_asv;
3232   define_sampler_asv(sampler_asv);
3233   run_sampler(sampler_asv, exp_sampler_stats);
3234   refine_sampler(imp_sampler_stats, min_max_fns);
3235 
3236   const ShortArray& final_asv = finalStatistics.active_set_request_vector();
3237   bool list_sampling = (expansionSampler.method_name() == LIST_SAMPLING),
3238         imp_sampling = !importanceSampler.is_null();
3239   std::shared_ptr<NonDSampling> exp_sampler_rep =
3240     std::static_pointer_cast<NonDSampling>(expansionSampler.iterator_rep());
3241   size_t i, j, cntr = 0, sampler_cntr = 0,
3242     moment_offset = (finalMomentsType) ? 2 : 0,
3243     sampler_moment_offset = (exp_sampler_rep->final_moments_type()) ? 2 : 0;
3244   Real p, gen_beta, z;
3245 
3246   // Update finalStatistics from {exp,imp}_sampler_stats.  Moment mappings
3247   // are not recomputed since empty level arrays are passed in construct_
3248   // expansion_sampler() and these levels are omitted from sampler_cntr.
3249   archive_allocate_mappings();
3250 
3251   for (i=0; i<numFunctions; ++i) {
3252     cntr += moment_offset;
3253     // sampler_cntr tracks only the numerical stats (analytic level mappings
3254     // and final moments are suppressed in construct_expansion_sampler())
3255     sampler_cntr += sampler_moment_offset;
3256 
3257     if (respLevelTarget == RELIABILITIES)
3258       cntr += requestedRespLevels[i].length(); // don't increment sampler_cntr
3259     else {
3260       size_t rl_len = requestedRespLevels[i].length();
3261       for (j=0; j<rl_len; ++j, ++cntr, ++sampler_cntr) {
3262 	if (final_asv[cntr] & 1) {
3263 	  p = (imp_sampling) ? imp_sampler_stats[i][j]
3264 	                     : exp_sampler_stats[sampler_cntr];
3265 	  if (respLevelTarget == PROBABILITIES) {
3266 	    computedProbLevels[i][j] = p;
3267 	    finalStatistics.function_value(p, cntr);
3268 	  }
3269 	  else if (respLevelTarget == GEN_RELIABILITIES) {
3270 	    computedGenRelLevels[i][j] = gen_beta
3271 	      = -Pecos::NormalRandomVariable::inverse_std_cdf(p);
3272 	    finalStatistics.function_value(gen_beta, cntr);
3273 	  }
3274 	}
3275 	if (final_asv[cntr] & 2) { // TO DO: sampling sensitivity analysis
3276 	  Cerr << "\nError: analytic sensitivity of response ";
3277 	  if (respLevelTarget == PROBABILITIES)
3278 	    Cerr << "probability";
3279 	  else if (respLevelTarget == GEN_RELIABILITIES)
3280 	    Cerr << "generalized reliability";
3281 	  Cerr << " not yet supported." << std::endl;
3282 	  abort_handler(METHOD_ERROR);
3283 	}
3284       }
3285     }
3286 
3287     size_t pl_len = requestedProbLevels[i].length();
3288     for (j=0; j<pl_len; ++j, ++cntr, ++sampler_cntr) {
3289       if (final_asv[cntr] & 1) {
3290 	computedRespLevels[i][j] = z = exp_sampler_stats[sampler_cntr];
3291 	finalStatistics.function_value(z, cntr);
3292       }
3293       if (final_asv[cntr] & 2) { // TO DO: p->z sampling sensitivity analysis
3294 	Cerr << "\nError: analytic sensitivity of response level not yet "
3295 	     << "supported for mapping from probability." << std::endl;
3296 	abort_handler(METHOD_ERROR);
3297       }
3298     }
3299 
3300     size_t bl_len = requestedRelLevels[i].length();
3301     cntr += bl_len; // don't increment sampler_cntr
3302 
3303     size_t gl_len = requestedGenRelLevels[i].length();
3304     for (j=0; j<gl_len; ++j, ++cntr, ++sampler_cntr) {
3305       if (final_asv[cntr] & 1) {
3306 	computedRespLevels[i][j+pl_len+bl_len] = z
3307 	  = exp_sampler_stats[sampler_cntr];
3308 	finalStatistics.function_value(z, cntr);
3309       }
3310       if (final_asv[cntr] & 2) { // TO DO: beta*->p->z sampling SA
3311 	Cerr << "\nError: analytic sensitivity of response level not yet "
3312 	     << "supported for mapping from generalized reliability."
3313 	     << std::endl;
3314 	abort_handler(METHOD_ERROR);
3315       }
3316     }
3317 
3318     // archive the mappings from/to response levels
3319     archive_from_resp(i); archive_to_resp(i);
3320   }
3321 
3322   // now that level arrays are updated, infer the PDFs.
3323   // imp_sampling flag prevents mixing of refined and unrefined level mappings.
3324   compute_densities(min_max_fns, imp_sampling);
3325   for (i=0; i<numFunctions; ++i)
3326       archive_pdf(i);
3327 }
3328 
3329 
3330 void NonDExpansion::
compute_numerical_stat_refinements(RealVectorArray & imp_sampler_stats,RealRealPairArray & min_max_fns)3331 compute_numerical_stat_refinements(RealVectorArray& imp_sampler_stats,
3332 				   RealRealPairArray& min_max_fns)
3333 {
3334   // response fn is active for z->p, z->beta*, p->z, or beta*->z
3335 
3336   const RealMatrix& exp_vars = expansionSampler.all_samples();
3337   //const IntResponseMap& exp_responses = expansionSampler.all_responses();
3338   const RealVector& exp_sampler_stats
3339     = expansionSampler.response_results().function_values();
3340   int exp_cv = exp_vars.numRows();
3341 
3342   std::shared_ptr<NonDSampling> exp_sampler_rep =
3343     std::static_pointer_cast<NonDSampling>(expansionSampler.iterator_rep());
3344   std::shared_ptr<NonDAdaptImpSampling> imp_sampler_rep =
3345     std::static_pointer_cast<NonDAdaptImpSampling>
3346     (importanceSampler.iterator_rep());
3347 
3348   size_t i, j, exp_sampler_cntr = 0;
3349   imp_sampler_stats.resize(numFunctions);
3350   bool x_data_flag = false;
3351   ParLevLIter pl_iter = methodPCIter->mi_parallel_level_iterator(miPLIndex);
3352   size_t exp_sampler_moment_offset
3353     = (exp_sampler_rep->final_moments_type()) ? 2 : 0;
3354   for (i=0; i<numFunctions; ++i) {
3355     // exp_sampler_cntr tracks only the numerical stats (analytic level mappings
3356     // and final moments are suppressed in construct_expansion_sampler())
3357     exp_sampler_cntr += exp_sampler_moment_offset;
3358     size_t rl_len = requestedRespLevels[i].length();
3359     if (rl_len && respLevelTarget != RELIABILITIES) {
3360       imp_sampler_stats[i].resize(rl_len);
3361       // initializing importance sampling with both original build
3362       // points and LHS expansion sampler points (var only, no resp)
3363       const Pecos::SurrogateData& exp_data
3364 	= uSpaceModel.approximation_data(i);
3365       size_t m, num_data_pts = exp_data.points(),
3366 	num_to_is = numSamplesOnExpansion + num_data_pts;
3367       RealVectorArray initial_points(num_to_is);
3368       const Pecos::SDVArray& sdv_array = exp_data.variables_data();
3369       for (m=0; m<num_data_pts; ++m)
3370 	initial_points[m] = sdv_array[m].continuous_variables(); // view OK
3371       for (m=0; m<numSamplesOnExpansion; ++m)
3372 	copy_data(exp_vars[m], exp_cv, initial_points[m+num_data_pts]);
3373       for (j=0; j<rl_len; ++j, ++exp_sampler_cntr) {
3374 	//Cout << "Initial estimate of p to seed "
3375 	//     << exp_sampler_stats[exp_sampler_cntr] << '\n';
3376 	imp_sampler_rep->initialize(initial_points, x_data_flag, i,
3377 	  exp_sampler_stats[exp_sampler_cntr], requestedRespLevels[i][j]);
3378 
3379 	importanceSampler.run(pl_iter);
3380 
3381 	//Real p = imp_sampler_rep->final_probability();
3382 	//Cout << "importance sampling estimate for function " << i
3383 	//     << " level " << j << " = " << p << "\n";
3384 	imp_sampler_stats[i][j] = imp_sampler_rep->final_probability();
3385       }
3386     }
3387     // exp_sampler_cntr offset by moments, rl_len for p, pl_len, and gl_len
3388     exp_sampler_cntr += requestedProbLevels[i].length()
3389                      +  requestedGenRelLevels[i].length();
3390   }
3391   // update min_max_fns for use in defining bounds for outer PDF bins
3392   if (pdfOutput)
3393     min_max_fns = imp_sampler_rep->extreme_values();
3394 }
3395 
3396 
pull_reference(RealVector & stats_ref)3397 void NonDExpansion::pull_reference(RealVector& stats_ref)
3398 {
3399   if (!refineMetric) {
3400     Cerr << "Error: refineMetric definition required in NonDExpansion::"
3401 	 << "pull_reference()" << std::endl;
3402     abort_handler(METHOD_ERROR);
3403   }
3404 
3405   size_t mom_len = 0, lev_len = 0;
3406   bool full_covar = (covarianceControl == FULL_COVARIANCE);
3407   if (refineMetric == Pecos::COVARIANCE_METRIC ||
3408       refineMetric == Pecos::MIXED_STATS_METRIC)
3409     mom_len = (full_covar) ? (numFunctions*(numFunctions + 3))/2
3410                            : 2*numFunctions;
3411   if (refineMetric == Pecos::LEVEL_STATS_METRIC ||
3412       refineMetric == Pecos::MIXED_STATS_METRIC)
3413     lev_len = totalLevelRequests;
3414   size_t stats_len = mom_len + lev_len;
3415   if (stats_ref.length() != stats_len) stats_ref.sizeUninitialized(stats_len);
3416 
3417   switch (refineMetric) {
3418   case Pecos::COVARIANCE_METRIC:  case Pecos::MIXED_STATS_METRIC: {
3419 
3420     // pull means
3421     std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
3422     if (statsMetricMode == Pecos::COMBINED_EXPANSION_STATS)
3423       for (size_t i=0; i<numFunctions; ++i)
3424 	stats_ref[i] = poly_approxs[i].combined_moment(0);
3425     else
3426       for (size_t i=0; i<numFunctions; ++i)
3427 	stats_ref[i] = poly_approxs[i].moment(0);
3428 
3429     // pull resp{V,Cov}ariance (comb stats managed in compute_*_covariance())
3430     if (full_covar)
3431       pull_lower_triangle(respCovariance, stats_ref, numFunctions);
3432     else
3433       copy_data_partial(respVariance, stats_ref, numFunctions);
3434     break;
3435   }
3436   }
3437 
3438   switch (refineMetric) {
3439   case Pecos::LEVEL_STATS_METRIC:  case Pecos::MIXED_STATS_METRIC:
3440     pull_level_mappings(stats_ref, mom_len);  break;
3441   }
3442 
3443 #ifdef DEBUG
3444   Cout << "Pulled stats:\n" << stats_ref;
3445 #endif // DEBUG
3446 }
3447 
3448 
push_reference(const RealVector & stats_ref)3449 void NonDExpansion::push_reference(const RealVector& stats_ref)
3450 {
3451   if (!refineMetric) {
3452     Cerr << "Error: refineMetric definition required in NonDExpansion::"
3453 	 << "push_reference()" << std::endl;
3454     abort_handler(METHOD_ERROR);
3455   }
3456 
3457   bool full_covar = (covarianceControl == FULL_COVARIANCE);
3458   switch (refineMetric) {
3459   case Pecos::COVARIANCE_METRIC:  case Pecos::MIXED_STATS_METRIC: {
3460 
3461     // push resp{V|Cov}ariance (extract first since reused below)
3462     if (full_covar)
3463       push_lower_triangle(stats_ref, respCovariance, numFunctions);
3464     else
3465       copy_data_partial(stats_ref, numFunctions, numFunctions, respVariance);
3466 
3467     // push Pecos::{expansion|numerical}Moments
3468     std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
3469     if (statsMetricMode == Pecos::COMBINED_EXPANSION_STATS)
3470       for (size_t i=0; i<numFunctions; ++i) {
3471 	poly_approxs[i].combined_moment(stats_ref[i], 0); // mean values
3472 	if (full_covar) poly_approxs[i].combined_moment(respCovariance(i,i), 1);
3473 	else            poly_approxs[i].combined_moment(respVariance[i],     1);
3474       }
3475     else
3476       for (size_t i=0; i<numFunctions; ++i) {
3477 	poly_approxs[i].moment(stats_ref[i], 0); // mean values
3478 	if (full_covar) poly_approxs[i].moment(respCovariance(i,i), 1);
3479 	else            poly_approxs[i].moment(respVariance[i],     1);
3480       }
3481     break;
3482   }
3483   }
3484 
3485   switch (refineMetric) {
3486   case Pecos::LEVEL_STATS_METRIC:
3487     push_level_mappings(stats_ref, 0);  break;
3488   case Pecos::MIXED_STATS_METRIC: {
3489     size_t offset = (full_covar) ? (numFunctions*(numFunctions + 3))/2
3490                                  : 2*numFunctions;
3491     push_level_mappings(stats_ref, offset);  break;
3492   }
3493   }
3494 
3495 #ifdef DEBUG
3496   Cout << "Pushed stats:\n" << stats_ref;
3497 #endif // DEBUG
3498 }
3499 
3500 
define_sampler_asv(ShortArray & sampler_asv)3501 void NonDExpansion::define_sampler_asv(ShortArray& sampler_asv)
3502 {
3503   if (expansionSampler.method_name() == LIST_SAMPLING)
3504     sampler_asv.assign(numFunctions, 1);
3505   else {
3506     sampler_asv.assign(numFunctions, 0);
3507     // response fn is active for z->p, z->beta*, p->z, or beta*->z
3508     const ShortArray& final_asv = finalStatistics.active_set_request_vector();
3509     size_t i, j, cntr = 0, moment_offset = (finalMomentsType) ? 2 : 0;
3510     for (i=0; i<numFunctions; ++i) {
3511       cntr += moment_offset;
3512       size_t rl_len = requestedRespLevels[i].length();
3513       if (respLevelTarget != RELIABILITIES)
3514 	for (j=0; j<rl_len; ++j)
3515 	  if (final_asv[cntr+j] & 1)
3516 	    { sampler_asv[i] |= 1; break; }
3517       cntr += rl_len;
3518       size_t pl_len = requestedProbLevels[i].length();
3519       for (j=0; j<pl_len; ++j)
3520 	if (final_asv[cntr+j] & 1)
3521 	  { sampler_asv[i] |= 1; break; }
3522       cntr += pl_len + requestedRelLevels[i].length();
3523       size_t gl_len = requestedGenRelLevels[i].length();
3524       for (j=0; j<gl_len; ++j)
3525 	if (final_asv[cntr+j] & 1)
3526 	  { sampler_asv[i] |= 1; break; }
3527       cntr += gl_len;
3528     }
3529   }
3530 }
3531 
3532 
3533 void NonDExpansion::
run_sampler(const ShortArray & sampler_asv,RealVector & exp_sampler_stats)3534 run_sampler(const ShortArray& sampler_asv, RealVector& exp_sampler_stats)
3535 {
3536   if (expansionSampler.is_null()) return;
3537 
3538   expansionSampler.active_set_request_vector(sampler_asv);
3539 
3540   ParLevLIter pl_iter = methodPCIter->mi_parallel_level_iterator(miPLIndex);
3541   expansionSampler.run(pl_iter);
3542 
3543   std::shared_ptr<NonDSampling> exp_sampler_rep =
3544     std::static_pointer_cast<NonDSampling>(expansionSampler.iterator_rep());
3545   if (expansionSampler.method_name() == LIST_SAMPLING)
3546     // full set of numerical statistics, including PDFs
3547     exp_sampler_rep->compute_statistics(expansionSampler.all_samples(),
3548 					expansionSampler.all_responses());
3549   else { // augment moment-based with sampling-based mappings (PDFs suppressed)
3550     exp_sampler_rep->compute_level_mappings(expansionSampler.all_responses());
3551     exp_sampler_rep->update_final_statistics();
3552   }
3553   exp_sampler_stats = expansionSampler.response_results().function_values();
3554 }
3555 
3556 
3557 void NonDExpansion::
refine_sampler(RealVectorArray & imp_sampler_stats,RealRealPairArray & min_max_fns)3558 refine_sampler(RealVectorArray& imp_sampler_stats,
3559 	       RealRealPairArray& min_max_fns)
3560 {
3561   // Update probability estimates with importance sampling, if requested.
3562   if (!importanceSampler.is_null())
3563     compute_numerical_stat_refinements(imp_sampler_stats, min_max_fns);
3564   else if (pdfOutput && !expansionSampler.is_null()) {
3565     // NonDSampling::extremeValues not avail (pdfOutput off)
3566     std::shared_ptr<NonDSampling> exp_sampler_rep =
3567       std::static_pointer_cast<NonDSampling>(expansionSampler.iterator_rep());
3568     exp_sampler_rep->compute_intervals(min_max_fns);
3569   }
3570 }
3571 
3572 
archive_moments()3573 void NonDExpansion::archive_moments()
3574 {
3575   if (!resultsDB.active())
3576     return;
3577 
3578   // for now, archive only central moments to avoid duplicating all above logic
3579   // insert NaN for missing data
3580   bool exp_active = false, num_active = false;
3581   RealMatrix exp_matrix(4, numFunctions), num_matrix(4, numFunctions);
3582   std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
3583   for (size_t i=0; i<numFunctions; ++i) {
3584     Approximation& approx_i = poly_approxs[i];
3585     if (approx_i.expansion_coefficient_flag()) {
3586       // Pecos provides central moments
3587       const RealVector& exp_moments = approx_i.expansion_moments();
3588       const RealVector& num_int_moments
3589 	= approx_i.numerical_integration_moments();
3590       size_t exp_mom = exp_moments.length(),
3591 	num_int_mom  = num_int_moments.length();
3592       if (exp_mom)  exp_active = true;
3593       if (num_int_mom)  num_active = true;
3594       for (size_t j=0; j<exp_mom; ++j)
3595         exp_matrix(j,i) = exp_moments[j];
3596       for (size_t j=exp_mom; j<4; ++j)
3597         exp_matrix(j,i) = std::numeric_limits<Real>::quiet_NaN();
3598       for (size_t j=0; j<num_int_mom; ++j)
3599         num_matrix(j,i) = num_int_moments[j];
3600       for (size_t j=num_int_mom; j<4; ++j)
3601         num_matrix(j,i) = std::numeric_limits<Real>::quiet_NaN();
3602     }
3603   }
3604 
3605   // Set moments labels.
3606   std::string moment_1 = "Mean";
3607   std::string moment_2 = (finalMomentsType == CENTRAL_MOMENTS) ? "Variance" : "Standard Deviation";
3608   std::string moment_3 = (finalMomentsType == CENTRAL_MOMENTS) ? "3rd Central" : "Skewness";
3609   std::string moment_4 = (finalMomentsType == CENTRAL_MOMENTS) ? "4th Central" : "Kurtosis";
3610 
3611   std::string moment_1_lower = "mean";
3612   std::string moment_2_lower = (finalMomentsType == CENTRAL_MOMENTS) ? "variance" : "std_deviation";
3613   std::string moment_3_lower = (finalMomentsType == CENTRAL_MOMENTS) ? "third_central" : "skewness";
3614   std::string moment_4_lower = (finalMomentsType == CENTRAL_MOMENTS) ? "fourth_central" : "kurtosis";
3615 
3616   if (exp_active || num_active) {
3617     MetaDataType md_moments;
3618     md_moments["Row Labels"]
3619       = make_metadatavalue(moment_1, moment_2, moment_3, moment_4);
3620     md_moments["Column Labels"]
3621       = make_metadatavalue(iteratedModel.response_labels());
3622 
3623     if (exp_active) {
3624       resultsDB.insert(run_identifier(), resultsNames.moments_central_exp, exp_matrix, md_moments);
3625       for (int i = 0; i < numFunctions; ++i) {
3626         DimScaleMap scales;
3627         scales.emplace(0, StringScale("moments",
3628                              {moment_1_lower, moment_2_lower, moment_3_lower, moment_4_lower},
3629                              ScaleScope::SHARED));
3630         // extract column or row of moment_stats
3631         resultsDB.insert(run_identifier(), {String("expansion_moments"),
3632             iteratedModel.response_labels()[i]},
3633             Teuchos::getCol<int,double>(Teuchos::View, *const_cast<RealMatrix*>(&exp_matrix), i),
3634             scales);
3635       }
3636     }
3637     if (num_active) {
3638       resultsDB.insert(run_identifier(), resultsNames.moments_central_num,
3639           num_matrix, md_moments);
3640 
3641       for (int i = 0; i < numFunctions; ++i) {
3642         DimScaleMap scales;
3643         scales.emplace(0,
3644             StringScale("moments",
3645             {moment_1_lower, moment_2_lower, moment_3_lower, moment_4_lower},
3646             ScaleScope::SHARED));
3647         // extract column or row of moment_stats
3648         resultsDB.insert(run_identifier(), {String("integration_moments"),
3649             iteratedModel.response_labels()[i]},
3650             Teuchos::getCol<int,double>(Teuchos::View,
3651 					*const_cast<RealMatrix*>(&num_matrix),
3652 					i), scales);
3653       }
3654     }
3655   }
3656 }
3657 
3658 
archive_sobol_indices()3659 void NonDExpansion::archive_sobol_indices() {
3660 
3661   // effects are computed per resp fn within compute_statistics()
3662   // if vbdFlag and expansion_coefficient_flag
3663 
3664   // this fn called if vbdFlag and prints per resp fn if
3665   // expansion_coefficient_flag and non-negligible variance
3666   if(!resultsDB.active()) return;
3667 
3668   const StringArray& fn_labels = iteratedModel.response_labels();
3669   StringMultiArrayConstView cv_labels
3670     = iteratedModel.continuous_variable_labels();
3671 
3672   std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
3673   // Map from index to variable labels
3674   std::map<int, std::vector<const char *> > sobol_labels;
3675 
3676   // Main, total, and each order of interactions are separately inserted
3677   // into resultsDB. orders maps the index of the Sobol' index to its order
3678   // to facilitate insertion.
3679   std::map<int, int> orders;
3680 
3681   // collect variable descriptors for interactions for (aggregated) sobol index
3682   // map. These will be used as dimension scales.
3683   size_t i, j, num_indices;
3684   if (vbdOrderLimit != 1) { // unlimited (0) or includes interactions (>1)
3685     // create aggregate interaction labels (once for all response fns)
3686     std::shared_ptr<SharedApproxData> shared_data_rep
3687       = uSpaceModel.shared_approximation().data_rep();
3688     const Pecos::BitArrayULongMap& sobol_map
3689       = shared_data_rep->sobol_index_map();
3690     for (Pecos::BAULMCIter map_cit=sobol_map.begin();
3691 	 map_cit!=sobol_map.end(); ++map_cit) { // loop in key sorted order
3692       const BitArray& set = map_cit->first;
3693       unsigned long index = map_cit->second; // 0-way -> n 1-way -> interaction
3694       if (index > numContinuousVars) {       // an interaction
3695         if( sobol_labels.find(index) == sobol_labels.end())
3696           sobol_labels[index] = std::vector<const char *>();
3697         orders[index] = set.count();
3698 	for (j=0; j<numContinuousVars; ++j) {
3699 	  if (set[j])
3700 	    sobol_labels[index].push_back(cv_labels[j].c_str());
3701         }
3702       }
3703     }
3704   }
3705 
3706   // archive sobol indices per response function
3707   for (i=0; i<numFunctions; ++i) {
3708     Approximation& approx_i = poly_approxs[i];
3709     if (approx_i.expansion_coefficient_flag()) {
3710       // Note: vbdFlag can be defined for covarianceControl == NO_COVARIANCE.
3711       // In this case, we cannot screen effectively at this level.
3712       bool well_posed = ( ( covarianceControl   == DIAGONAL_COVARIANCE &&
3713 			    respVariance[i]     <= Pecos::SMALL_NUMBER ) ||
3714 			  ( covarianceControl   == FULL_COVARIANCE &&
3715 			    respCovariance(i,i) <= Pecos::SMALL_NUMBER ) )
3716 	              ? false : true;
3717       if (well_posed) {
3718 	const RealVector& total_indices = approx_i.total_sobol_indices();
3719 	const RealVector& sobol_indices = approx_i.sobol_indices();
3720         Pecos::ULongULongMap sparse_sobol_map
3721 	  = approx_i.sparse_sobol_index_map();
3722 	bool dense = sparse_sobol_map.empty();
3723 	Real sobol; size_t main_cntr = 0;
3724 	// Store main effects and total effects
3725         RealArray main_effects, total_effects;
3726         StringArray scale_labels;
3727 	for (j=0; j<numContinuousVars; ++j) {
3728 	  if (dense) // no compressive sensing
3729 	    sobol = sobol_indices[j+1];
3730 	  else { // for the compressive sensing case, storage is indirect
3731 	    Pecos::ULULMIter it = sparse_sobol_map.find(j+1);
3732 	    if (it == sparse_sobol_map.end())
3733 	      sobol = 0.;
3734 	    else
3735 	      { sobol = sobol_indices[it->second]; ++main_cntr; }
3736 	  }
3737 	  if (std::abs(sobol) > vbdDropTol || std::abs(total_indices[j]) > vbdDropTol) {
3738             main_effects.push_back(sobol);
3739             total_effects.push_back(total_indices[j]);
3740             scale_labels.push_back(cv_labels[j]);
3741           }
3742 	}
3743         if(!main_effects.empty()) {
3744           DimScaleMap scales;
3745           scales.emplace(0, StringScale("variables", scale_labels, ScaleScope::UNSHARED));
3746           resultsDB.insert(run_identifier(), {String("main_effects"), fn_labels[i]}, main_effects, scales);
3747           resultsDB.insert(run_identifier(), {String("total_effects"), fn_labels[i]}, total_effects, scales);
3748         }
3749 
3750 	// Print Interaction effects
3751 	if (vbdOrderLimit != 1) { // unlimited (0) or includes interactions (>1)
3752           RealArray int_effects;
3753           std::vector<std::vector<const char *> > int_scale;
3754 	  num_indices = sobol_indices.length();
3755           // Results file insertions are performed separately for each order. old_order is used
3756           // to detect when the order switches to trigger an insertion.
3757           int old_order;
3758 	  if (dense) { // no compressive sensing
3759             j = numContinuousVars+1;
3760             // make sure index j exists before attempting to set old_order
3761             old_order = (j < num_indices) ? orders[j] : 0;
3762 	    for (; j<num_indices; ++j) {
3763               if(orders[j] != old_order && !int_effects.empty() ) {
3764                 String result_name = String("order_") + std::to_string(old_order) + String("_interactions");
3765                 DimScaleMap int_scales;
3766                 int_scales.emplace(0, StringScale("variables", int_scale, ScaleScope::UNSHARED));
3767                 resultsDB.insert(run_identifier(), {result_name, fn_labels[i]}, int_effects, int_scales);
3768                 int_scale.clear();
3769                 int_effects.clear();
3770               }
3771               old_order = orders[j];
3772 	      if (std::abs(sobol_indices[j]) > vbdDropTol) {// print interaction
3773                 int_effects.push_back(sobol_indices[j]);
3774                 int_scale.push_back(sobol_labels[j]);
3775               }
3776             }
3777 	  }
3778 	  else { // for the compressive sensing case, storage is indirect
3779 	    Pecos::ULULMIter it = ++sparse_sobol_map.begin(); // skip 0-way
3780 	    std::advance(it, main_cntr);               // advance past 1-way
3781             // make sure it->first exists before attempting to set old_order
3782             old_order = (it != sparse_sobol_map.end()) ? orders[it->first] : 0;
3783 	    for (; it!=sparse_sobol_map.end(); ++it) { // 2-way and above
3784               if(orders[it->first] != old_order && !int_effects.empty()) {
3785                 String result_name = String("order_") + std::to_string(old_order) + String("_interactions");
3786                 DimScaleMap int_scales;
3787                 int_scales.emplace(0, StringScale("variables", int_scale, ScaleScope::UNSHARED));
3788                 resultsDB.insert(run_identifier(), {result_name, fn_labels[i]}, int_effects, int_scales);
3789                 int_scale.clear();
3790                 int_effects.clear();
3791               }
3792               old_order = orders[it->first];
3793 	      sobol = sobol_indices[it->second];
3794 	      if (std::abs(sobol) > vbdDropTol) { // print interaction
3795                 int_effects.push_back(sobol);
3796                 int_scale.push_back(sobol_labels[it->first]);
3797               }
3798 	    }
3799 	  }
3800           if(!int_effects.empty()) {  // Insert the remaining contents of int_effects, if any
3801             String result_name = String("order_") + std::to_string(old_order) + String("_interactions");
3802             DimScaleMap int_scales;
3803             int_scales.emplace(0, StringScale("variables", int_scale, ScaleScope::UNSHARED));
3804             resultsDB.insert(run_identifier(), {result_name, fn_labels[i]}, int_effects, int_scales);
3805             int_scale.clear();
3806             int_effects.clear();
3807           }
3808 	}
3809       }
3810     }
3811   }
3812 }
3813 
3814 
update_final_statistics_gradients()3815 void NonDExpansion::update_final_statistics_gradients()
3816 {
3817   if (finalStatistics.function_gradients().empty())
3818     return;
3819 
3820   // Augmented design vars:
3821   // > All vars: transform dg/du to dg/dx -> provides desired dg/ds for x = s
3822   // > Distinct vars: PCE/SC approximations for dg/ds are formed
3823   // Inserted design vars:
3824   // > All and Distinct views for the subIterator are equivalent
3825   // > PCE/SC approximations for dg/ds are formed
3826   // > Alternative: All view could force an artificial cdv augmentation
3827   // Mixed augmented/inserted design vars:
3828   // > All vars: bookkeep the two dg/ds approaches
3829   // > Distinct vars: PCE/SC approximations for dg/ds are formed
3830 
3831   // for all_variables, finalStatistics design grads are in u-space
3832   // -> transform to the original design space
3833   if (allVars) {
3834     // this approach is more efficient but less general.  If we can assume
3835     // that the DVV only contains design/state vars, then we know they are
3836     // uncorrelated and the jacobian matrix is diagonal with terms 2./range.
3837     const SharedVariablesData& svd
3838       = iteratedModel.current_variables().shared_data();
3839     SizetMultiArrayConstView cv_ids = iteratedModel.continuous_variable_ids();
3840     const SizetArray& final_dvv
3841       = finalStatistics.active_set_derivative_vector();
3842     const std::vector<Pecos::RandomVariable>& x_ran_vars
3843       = iteratedModel.multivariate_distribution().random_variables();
3844     Pecos::ProbabilityTransformation& nataf
3845       = uSpaceModel.probability_transformation();
3846 
3847     RealVector init_x;  nataf.trans_U_to_X(initialPtU, init_x);
3848     RealMatrix final_stat_grads = finalStatistics.function_gradients_view();
3849     int num_final_stats = final_stat_grads.numCols();  Real z, x, factor;
3850     size_t num_final_grad_vars = final_dvv.size(), i, j, rv_index, deriv_j,
3851       end_cauv = startCAUV + numCAUV;
3852     for (j=0; j<num_final_grad_vars; ++j) {
3853       deriv_j = find_index(cv_ids, final_dvv[j]); //final_dvv[j]-1;
3854       if ( deriv_j < startCAUV || deriv_j >= end_cauv ) {
3855 	// design/epistemic/state: factor = 2/range (see jacobian_dZ_dX())
3856 	rv_index = svd.cv_index_to_all_index(deriv_j);
3857 	z = initialPtU[deriv_j];  x = init_x[deriv_j];
3858 	//nataf_rep->trans_Z_to_X(z, x, deriv_j); // private
3859 	factor = x_ran_vars[rv_index].pdf(x)
3860 	       / Pecos::UniformRandomVariable::std_pdf(z);
3861 	for (i=0; i<num_final_stats; ++i)
3862 	  final_stat_grads(j,i) *= factor;
3863       }
3864       // else inserted design variable sensitivity: no scaling required
3865     }
3866 
3867     // This approach is more general, but is overkill for this purpose
3868     // and incurs additional copying overhead.
3869     /*
3870     Pecos::ProbabilityTransformation& nataf
3871       = uSpaceModel.probability_transformation();
3872     RealVector initial_pt_x_pv, fn_grad_u, fn_grad_x;
3873     copy_data(initial_pt_x, initial_pt_x_pv);
3874     RealMatrix jacobian_ux;
3875     nataf.jacobian_dU_dX(initial_pt_x_pv, jacobian_ux);
3876     RealBaseVector final_stat_grad;
3877     for (i=0; i<num_final_stats; ++i) {
3878       copy_data(finalStatistics.function_gradient_view(i), fn_grad_u);
3879       nataf.trans_grad_U_to_X(fn_grad_u, fn_grad_x, jacobian_ux, final_dvv);
3880       copy_data(fn_grad_x, final_stat_grad);
3881       finalStatistics.function_gradient(final_stat_grad, i)
3882     }
3883     */
3884   }
3885 
3886   // For distinct vars, nothing additional is needed since u_space_sampler
3887   // has been configured to compute dg/ds at each of the sample points (these
3888   // are already in user-space and are not part of the variable transform).
3889   // uSpaceModel.build_approximation() -> PecosApproximation::build()
3890   // then constructs PCE/SC approximations of these gradients, and
3891   // PecosApproximation::<mean,variance>_gradient()
3892   // are used above to generate dmu/ds, dsigma/ds, and dbeta/ds.
3893 }
3894 
3895 
print_moments(std::ostream & s)3896 void NonDExpansion::print_moments(std::ostream& s)
3897 {
3898   s << std::scientific << std::setprecision(write_precision);
3899 
3900   std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
3901   const StringArray& fn_labels = iteratedModel.response_labels();
3902   size_t i, j, width = write_precision+7;
3903 
3904   s << "\nMoment statistics for each response function:\n";
3905 
3906   // Handle cases of both expansion/numerical moments or only one or the other:
3907   //   both exp/num: SC and PCE with numerical integration
3908   //   exp only:     PCE with unstructured grids (regression, exp sampling)
3909   // Also handle numerical exception of negative variance in either exp or num
3910   size_t exp_mom, num_int_mom;
3911   bool exception = false, curr_exception, prev_exception = false,
3912     combined_stats = (statsMetricMode == Pecos::COMBINED_EXPANSION_STATS);
3913   RealVector std_exp_moments, std_num_int_moments, empty_moments;
3914   for (i=0; i<numFunctions; ++i) {
3915     Approximation& approx_i = poly_approxs[i];
3916     if (!approx_i.expansion_coefficient_flag()) continue;
3917     // Pecos provides central moments.  Note: combined moments could be
3918     // expansion or numerical integration, but what is important for header
3919     // output is that there is only one primary type with no secondary.
3920     const RealVector& exp_moments     = (combined_stats) ?
3921       approx_i.combined_moments() : approx_i.expansion_moments();
3922     const RealVector& num_int_moments = (combined_stats) ? empty_moments :
3923       approx_i.numerical_integration_moments();
3924     exp_mom = exp_moments.length(); num_int_mom = num_int_moments.length();
3925     curr_exception = ( (exp_mom     == 2 && exp_moments[1]     <  0.) ||
3926 		       (num_int_mom == 2 && num_int_moments[1] <  0.) ||
3927 		       (exp_mom     >  2 && exp_moments[1]     <= 0.) ||
3928 		       (num_int_mom >  2 && num_int_moments[1] <= 0.) );
3929     if (curr_exception || finalMomentsType == CENTRAL_MOMENTS) {
3930       if (i==0 || !prev_exception)
3931 	s << std::setw(width+15) << "Mean" << std::setw(width+1) << "Variance"
3932 	  << std::setw(width+1)  << "3rdCentral" << std::setw(width+2)
3933 	  << "4thCentral\n";
3934       if (exp_mom && num_int_mom) s << fn_labels[i];
3935       else                        s << std::setw(14) << fn_labels[i];
3936       if (exp_mom) {
3937 	if (num_int_mom) s << '\n' << std::setw(14) << "expansion:  ";
3938 	for (j=0; j<exp_mom; ++j)
3939 	  s << ' ' << std::setw(width) << exp_moments[j];
3940       }
3941       if (num_int_mom) {
3942 	if (exp_mom)     s << '\n' << std::setw(14) << "integration:";
3943 	for (j=0; j<num_int_mom; ++j)
3944 	  s << ' ' << std::setw(width) << num_int_moments[j];
3945       }
3946       prev_exception = curr_exception;
3947       if (curr_exception && finalMomentsType == STANDARD_MOMENTS)
3948 	exception = true;
3949     }
3950     else {
3951       if (i==0 || prev_exception)
3952 	s << std::setw(width+15) << "Mean" << std::setw(width+1) << "Std Dev"
3953 	  << std::setw(width+1)  << "Skewness" << std::setw(width+2)
3954 	  << "Kurtosis\n";
3955       if (exp_mom && num_int_mom) s << fn_labels[i];
3956       else                        s << std::setw(14) << fn_labels[i];
3957       if (exp_mom) {
3958 	Pecos::PolynomialApproximation::
3959 	  standardize_moments(exp_moments, std_exp_moments);
3960 	if (num_int_mom) s << '\n' << std::setw(14) << "expansion:  ";
3961 	for (j=0; j<exp_mom; ++j)
3962 	  s << ' ' << std::setw(width) << std_exp_moments[j];
3963       }
3964       if (num_int_mom) {
3965 	Pecos::PolynomialApproximation::
3966 	  standardize_moments(num_int_moments, std_num_int_moments);
3967 	if (exp_mom)     s << '\n' << std::setw(14) << "integration:";
3968 	for (j=0; j<num_int_mom; ++j)
3969 	  s << ' ' << std::setw(width) << std_num_int_moments[j];
3970       }
3971       prev_exception = false;
3972     }
3973     s << '\n';
3974 
3975     /* COV has been removed:
3976     if (std::abs(mean) > Pecos::SMALL_NUMBER)
3977       s << "  " << std::setw(width)   << std_dev/mean << '\n';
3978     else
3979       s << "  " << std::setw(width+1) << "Undefined\n";
3980     */
3981   }
3982   if (exception)
3983     s << "\nNote: due to non-positive variance (resulting from under-resolved "
3984       << "numerical integration),\n      standardized moments have been "
3985       << "replaced with central moments for at least one response.\n";
3986 }
3987 
3988 
print_covariance(std::ostream & s)3989 void NonDExpansion::print_covariance(std::ostream& s)
3990 {
3991   switch (covarianceControl) {
3992   case DIAGONAL_COVARIANCE: print_variance(s,   respVariance);   break;
3993   case FULL_COVARIANCE:     print_covariance(s, respCovariance); break;
3994   }
3995 }
3996 
3997 
3998 void NonDExpansion::
print_variance(std::ostream & s,const RealVector & resp_var,const String & prepend)3999 print_variance(std::ostream& s, const RealVector& resp_var,
4000 	       const String& prepend)
4001 {
4002   if (!resp_var.empty()) {
4003     if (prepend.empty())   s << "\nVariance vector for response functions:\n";
4004     else s << '\n' << prepend << " variance vector for response functions:\n";
4005     write_col_vector_trans(s, 0, resp_var);
4006   }
4007 }
4008 
4009 
4010 void NonDExpansion::
print_covariance(std::ostream & s,const RealSymMatrix & resp_covar,const String & prepend)4011 print_covariance(std::ostream& s, const RealSymMatrix& resp_covar,
4012 		 const String& prepend)
4013 {
4014   if (!resp_covar.empty()) {
4015     if (prepend.empty())   s << "\nCovariance matrix for response functions:\n";
4016     else s << '\n' << prepend << " covariance matrix for response functions:\n";
4017     s << resp_covar;
4018   }
4019 }
4020 
4021 
print_sobol_indices(std::ostream & s)4022 void NonDExpansion::print_sobol_indices(std::ostream& s)
4023 {
4024   // effects are computed per resp fn within compute_statistics()
4025   // if vbdFlag and expansion_coefficient_flag
4026 
4027   // this fn called if vbdFlag and prints per resp fn if
4028   // expansion_coefficient_flag and non-negligible variance
4029 
4030   s << "\nGlobal sensitivity indices for each response function:\n";
4031 
4032   const StringArray& fn_labels = iteratedModel.response_labels();
4033   StringMultiArrayConstView cv_labels
4034     = iteratedModel.continuous_variable_labels();
4035 
4036   // construct labels corresponding to (aggregated) sobol index map
4037   std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
4038   StringArray sobol_labels;  size_t i, j, num_indices;
4039   if (vbdOrderLimit != 1) { // unlimited (0) or includes interactions (>1)
4040     // create aggregate interaction labels (once for all response fns)
4041     std::shared_ptr<SharedApproxData> shared_data_rep
4042       = uSpaceModel.shared_approximation().data_rep();
4043     const Pecos::BitArrayULongMap& sobol_map
4044       = shared_data_rep->sobol_index_map();
4045     sobol_labels.resize(sobol_map.size());
4046     for (Pecos::BAULMCIter map_cit=sobol_map.begin();
4047 	 map_cit!=sobol_map.end(); ++map_cit) { // loop in key sorted order
4048       const BitArray& set = map_cit->first;
4049       unsigned long index = map_cit->second; // 0-way -> n 1-way -> interaction
4050       if (index > numContinuousVars) {       // an interaction
4051 	String& label = sobol_labels[index]; // store in index order
4052 	for (j=0; j<numContinuousVars; ++j)
4053 	  if (set[j])
4054 	    label += cv_labels[j] + " ";
4055       }
4056     }
4057   }
4058 
4059   // print sobol indices per response function
4060   for (i=0; i<numFunctions; ++i) {
4061     Approximation& approx_i = poly_approxs[i];
4062     if (approx_i.expansion_coefficient_flag()) {
4063       // Note: vbdFlag can be defined for covarianceControl == NO_COVARIANCE.
4064       // In this case, we cannot screen effectively at this level.
4065       bool well_posed = ( ( covarianceControl   == DIAGONAL_COVARIANCE &&
4066 			    respVariance[i]     <= Pecos::SMALL_NUMBER ) ||
4067 			  ( covarianceControl   == FULL_COVARIANCE &&
4068 			    respCovariance(i,i) <= Pecos::SMALL_NUMBER ) )
4069 	              ? false : true;
4070       if (well_posed) {
4071 	const RealVector& total_indices = approx_i.total_sobol_indices();
4072 	const RealVector& sobol_indices = approx_i.sobol_indices();
4073         Pecos::ULongULongMap sparse_sobol_map
4074 	  = approx_i.sparse_sobol_index_map();
4075 	bool dense = sparse_sobol_map.empty();
4076 	Real sobol; size_t main_cntr = 0;
4077 	// Print Main and Total effects
4078 	s << fn_labels[i] << " Sobol' indices:\n" << std::setw(38) << "Main"
4079 	  << std::setw(19) << "Total\n";
4080 	for (j=0; j<numContinuousVars; ++j) {
4081 	  if (dense)
4082 	    sobol = sobol_indices[j+1];
4083 	  else {
4084 	    Pecos::ULULMIter it = sparse_sobol_map.find(j+1);
4085 	    if (it == sparse_sobol_map.end())
4086 	      sobol = 0.;
4087 	    else
4088 	      { sobol = sobol_indices[it->second]; ++main_cntr; }
4089 	  }
4090 	  if (std::abs(sobol)            > vbdDropTol ||
4091 	      std::abs(total_indices[j]) > vbdDropTol) // print main / total
4092 	    s << "                     " << std::setw(write_precision+7)
4093 	      << sobol << ' ' << std::setw(write_precision+7)
4094 	      << total_indices[j] << ' ' << cv_labels[j] << '\n';
4095 	}
4096 	// Print Interaction effects
4097 	if (vbdOrderLimit != 1) { // unlimited (0) or includes interactions (>1)
4098 	  num_indices = sobol_indices.length();
4099 	  s << std::setw(39) << "Interaction\n";
4100 	  if (dense) {
4101 	    for (j=numContinuousVars+1; j<num_indices; ++j)
4102 	      if (std::abs(sobol_indices[j]) > vbdDropTol) // print interaction
4103 		s << "                     " << std::setw(write_precision+7)
4104 		  << sobol_indices[j] << ' ' << sobol_labels[j] << '\n';
4105 	  }
4106 	  else {
4107 	    Pecos::ULULMIter it = ++sparse_sobol_map.begin(); // skip 0-way
4108 	    std::advance(it, main_cntr);               // advance past 1-way
4109 	    for (; it!=sparse_sobol_map.end(); ++it) { // 2-way and above
4110 	      sobol = sobol_indices[it->second];
4111 	      if (std::abs(sobol) > vbdDropTol) // print interaction
4112 		s << "                     " << std::setw(write_precision+7)
4113 		  << sobol << ' ' << sobol_labels[it->first] << '\n';
4114 	    }
4115 	  }
4116 	}
4117       }
4118       else
4119 	s << fn_labels[i] << " Sobol' indices not available due to negligible "
4120 	  << "variance\n";
4121     }
4122     else
4123       s << fn_labels[i] << " Sobol' indices not available due to expansion "
4124 	<< "coefficient mode\n";
4125   }
4126 }
4127 
4128 
print_local_sensitivity(std::ostream & s)4129 void NonDExpansion::print_local_sensitivity(std::ostream& s)
4130 {
4131   const StringArray& fn_labels = iteratedModel.response_labels();
4132   s << "\nLocal sensitivities for each response function evaluated at "
4133     << "uncertain variable means:\n";
4134   std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
4135   for (size_t i=0; i<numFunctions; ++i)
4136     if (poly_approxs[i].expansion_coefficient_flag()) {
4137       s << fn_labels[i] << ":\n";
4138       write_col_vector_trans(s, (int)i, expGradsMeanX);
4139     }
4140 }
4141 
4142 
print_refinement_diagnostics(std::ostream & s)4143 void NonDExpansion::print_refinement_diagnostics(std::ostream& s)
4144 {
4145   // Output of the relevant refinement metrics occurs in
4146   // compute_{covariance,level_mapping,final_statistics}_metric();
4147   // additional refinement control diagnostics are output here:
4148   switch (refineControl) {
4149   case Pecos::DIMENSION_ADAPTIVE_CONTROL_GENERALIZED:
4150     if (outputLevel >= DEBUG_OUTPUT) {
4151       // output fine-grained data on generalized index sets
4152       std::shared_ptr<NonDSparseGrid> nond_sparse =
4153 	std::static_pointer_cast<NonDSparseGrid>
4154 	(uSpaceModel.subordinate_iterator().iterator_rep());
4155       nond_sparse->print_smolyak_multi_index();
4156     }
4157     break;
4158   /*
4159   // These calls induce redundant solves (also performed in NonDExpansion::
4160   // reduce_decay_rate_sets()), so, while these is no lag is in Sobol' case,
4161   // suppress per-QoI diagnostics for now to avoid redundant computation.
4162   case Pecos::DIMENSION_ADAPTIVE_CONTROL_DECAY:
4163     if (outputLevel >= NORMAL_OUTPUT) {
4164       // output spectral data for ML-MF UQ (for finer grain data, activate
4165       // DECAY_DEBUG in packages/pecos/src/OrthogPolyApproximation.cpp).
4166       std::vector<Approximation>& poly_approxs = uSpaceModel.approximations();
4167       std::shared_ptr<PecosApproximation> poly_approx_rep;
4168       for (size_t i=0; i<numFunctions; ++i) {
4169 	poly_approx_rep = std::static_pointer_cast<PecosApproximation>
4170 	  (poly_approxs[i].approx_rep());
4171 	s << "Variable decay rates for response function " << i+1 << ":\n"
4172 	  << poly_approx_rep->dimension_decay_rates();
4173       }
4174     }
4175     break;
4176   // Sobol indices are lagging an iteration since computing these indices is
4177   // triggered by increment_grid_{preference,weights} on subsequent iter.
4178   // Better to output them (as derived quantities) from reduce_*() once
4179   // available downstream, rather than enforcing their compute/print as
4180   // standard Sobol' output upstream.
4181   case Pecos::DIMENSION_ADAPTIVE_CONTROL_SOBOL:
4182     if (outputLevel >= NORMAL_OUTPUT)
4183       print_sobol_indices(s); // from reduce_total_sobol_sets()
4184     break;
4185   */
4186   }
4187 }
4188 
4189 
print_results(std::ostream & s,short results_state)4190 void NonDExpansion::print_results(std::ostream& s, short results_state)
4191 {
4192   switch (results_state) {
4193   case REFINEMENT_RESULTS: {
4194     // augment refinement output from compute_*_metric() [print_metric=true]
4195     if (outputLevel == DEBUG_OUTPUT) {
4196       //iteratedModel.print_evaluation_summary(s);
4197       switch (refineMetric) {
4198     //case Pecos::NO_METRIC: // not an option for refinement
4199       case Pecos::COVARIANCE_METRIC:
4200       case Pecos::MIXED_STATS_METRIC:
4201     //case Pecos::LEVEL_STATS_METRIC: // moments only computed if beta mappings
4202 	print_moments(s); break;
4203       }
4204     }
4205 
4206     print_refinement_diagnostics(s);
4207     break;
4208   }
4209   case INTERMEDIATE_RESULTS: {
4210     switch (refineMetric) {
4211     case Pecos::NO_METRIC:
4212       print_moments(s); if (totalLevelRequests) print_level_mappings(s); break;
4213     case Pecos::COVARIANCE_METRIC:
4214       print_moments(s); print_covariance(s);                             break;
4215     case Pecos::MIXED_STATS_METRIC:
4216       print_moments(s); print_level_mappings(s);                         break;
4217     case Pecos::LEVEL_STATS_METRIC:
4218       print_level_mappings(s);                                           break;
4219     }
4220 
4221     //print_refinement_diagnostics(s);
4222     break;
4223   }
4224   case FINAL_RESULTS: {
4225     s << "---------------------------------------------------------------------"
4226       << "--------\nStatistics derived analytically from polynomial expansion:"
4227       << '\n';
4228     print_moments(s);
4229     print_covariance(s);
4230     if (!subIteratorFlag && outputLevel >= NORMAL_OUTPUT)
4231       print_local_sensitivity(s);
4232     if (vbdFlag) print_sobol_indices(s);
4233 
4234     // Print level mapping statistics (typically from sampling on expansion)
4235     std::shared_ptr<NonDSampling> exp_sampler_rep =
4236       std::static_pointer_cast<NonDSampling>(expansionSampler.iterator_rep());
4237     bool exp_sampling = (exp_sampler_rep != NULL),
4238         list_sampling = (exp_sampling &&
4239 			 exp_sampler_rep->method_name() == LIST_SAMPLING);
4240     if (list_sampling) {
4241       // all stats delegated since local moments are not relevant in this case
4242       s << "-------------------------------------------------------------------"
4243 	<< "----------\nStatistics based on " << numSamplesOnExpansion
4244 	<< " imported samples performed on polynomial expansion:\n";
4245       exp_sampler_rep->print_statistics(s);
4246     }
4247     else if (totalLevelRequests) {
4248       s << "-------------------------------------------------------------------"
4249 	<< "----------\nStatistics based on ";
4250       if (exp_sampling)
4251 	s << numSamplesOnExpansion << " samples performed on polynomial "
4252 	  << "expansion:\n";
4253       else
4254 	s << "projection of analytic moments:\n";
4255       // no stats are delegated since we mix numerical stats with local analytic
4256       // moment-based projections (more accurate than sample moment-based
4257       // projections).  In addition, we may have local probability refinements.
4258       print_level_mappings(s);
4259       print_system_mappings(s); // works off of finalStatistics -> no delegation
4260     }
4261     s << "---------------------------------------------------------------------"
4262       << "--------" << std::endl;
4263     break;
4264   }
4265   }
4266 }
4267 
4268 } // namespace Dakota
4269