1 /*  _______________________________________________________________________
2 
3     DAKOTA: Design Analysis Kit for Optimization and Terascale Applications
4     Copyright 2014-2020 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
5     This software is distributed under the GNU Lesser General Public License.
6     For more information, see the README file in the top Dakota directory.
7     _______________________________________________________________________ */
8 
9 //- Class:       Analyzer
10 //- Description: Implementation code for the Analyzer class
11 //- Owner:       Mike Eldred
12 //- Checked by:
13 
14 #include <stdexcept>
15 #include "dakota_system_defs.hpp"
16 #include "dakota_data_io.hpp"
17 #include "dakota_tabular_io.hpp"
18 #include "DakotaModel.hpp"
19 #include "DakotaAnalyzer.hpp"
20 #include "ProblemDescDB.hpp"
21 #include "ParallelLibrary.hpp"
22 #include "IteratorScheduler.hpp"
23 #include "PRPMultiIndex.hpp"
24 
25 static const char rcsId[]="@(#) $Id: DakotaAnalyzer.cpp 7035 2010-10-22 21:45:39Z mseldre $";
26 
27 //#define DEBUG
28 
29 namespace Dakota {
30 
31 extern PRPCache data_pairs;
32 
33 
Analyzer(ProblemDescDB & problem_db,Model & model)34 Analyzer::Analyzer(ProblemDescDB& problem_db, Model& model):
35   Iterator(BaseConstructor(), problem_db), compactMode(true),
36   numObjFns(0), numLSqTerms(0), // default: no best data tracking
37   writePrecision(problem_db.get_int("environment.output_precision"))
38 {
39   // set_db_list_nodes() is set by a higher context
40   iteratedModel = model;
41   update_from_model(iteratedModel); // variable/response counts & checks
42 
43   // historical default convergence tolerance
44   if (convergenceTol < 0.0) convergenceTol = 1.0e-4;
45 
46   if (model.primary_fn_type() == OBJECTIVE_FNS)
47     numObjFns = model.num_primary_fns();
48   else if (model.primary_fn_type() == CALIB_TERMS)
49     numLSqTerms = model.num_primary_fns();
50   else if (model.primary_fn_type() != GENERIC_FNS) {
51     Cerr << "\nError: Unknown primary function type in Analyzer." << std::endl;
52     abort_handler(METHOD_ERROR);
53   }
54 
55   if (probDescDB.get_bool("method.variance_based_decomp"))
56     vbdDropTol = probDescDB.get_real("method.vbd_drop_tolerance");
57 
58   if (!numFinalSolutions)  // default is zero
59     numFinalSolutions = 1; // iterator-specific default assignment
60 }
61 
62 
Analyzer(unsigned short method_name,Model & model)63 Analyzer::Analyzer(unsigned short method_name, Model& model):
64   Iterator(NoDBBaseConstructor(), method_name, model), compactMode(true),
65   numObjFns(0), numLSqTerms(0), // default: no best data tracking
66   writePrecision(0)
67 {
68   update_from_model(iteratedModel); // variable/response counts & checks
69 }
70 
71 
Analyzer(unsigned short method_name)72 Analyzer::Analyzer(unsigned short method_name):
73   Iterator(NoDBBaseConstructor(), method_name), compactMode(true),
74   numObjFns(0), numLSqTerms(0), // default: no best data tracking
75   writePrecision(0)
76 { }
77 
78 
resize()79 bool Analyzer::resize()
80 {
81   bool parent_reinit_comms = Iterator::resize();
82 
83   numContinuousVars     = iteratedModel.cv();
84   numDiscreteIntVars    = iteratedModel.div();
85   numDiscreteStringVars = iteratedModel.dsv();
86   numDiscreteRealVars   = iteratedModel.drv();
87   numFunctions          = iteratedModel.response_size();
88 
89   return parent_reinit_comms;
90 }
91 
update_from_model(const Model & model)92 void Analyzer::update_from_model(const Model& model)
93 {
94   Iterator::update_from_model(model);
95 
96   numContinuousVars     = model.cv();  numDiscreteIntVars  = model.div();
97   numDiscreteStringVars = model.dsv(); numDiscreteRealVars = model.drv();
98   numFunctions          = model.response_size();
99 
100   bool err_flag = false;
101   // Check for correct bit associated within methodName
102   if ( !(methodName & ANALYZER_BIT) ) {
103     Cerr << "\nError: analyzer bit not activated for method instantiation "
104 	 << "(case " << methodName << ") within Analyzer branch." << std::endl;
105     err_flag = true;
106   }
107   // Check for active design variables and discrete variable support
108   if (methodName == CENTERED_PARAMETER_STUDY ||
109       methodName == LIST_PARAMETER_STUDY     ||
110       methodName == MULTIDIM_PARAMETER_STUDY ||
111       methodName == VECTOR_PARAMETER_STUDY   || methodName == RANDOM_SAMPLING ||
112       methodName == GLOBAL_INTERVAL_EST      || methodName == GLOBAL_EVIDENCE ||
113       methodName == ADAPTIVE_SAMPLING ) {
114     if (!numContinuousVars && !numDiscreteIntVars && !numDiscreteStringVars &&
115 	!numDiscreteRealVars) {
116       Cerr << "\nError: " << method_enum_to_string(methodName)
117 	   << " requires active variables." << std::endl;
118       err_flag = true;
119     }
120   }
121   else { // methods supporting only continuous design variables
122     if (!numContinuousVars) {
123       Cerr << "\nError: " << method_enum_to_string(methodName)
124 	   << " requires active continuous variables." << std::endl;
125       err_flag = true;
126     }
127     if (numDiscreteIntVars || numDiscreteStringVars || numDiscreteRealVars)
128       Cerr << "\nWarning: active discrete variables ignored by "
129 	   << method_enum_to_string(methodName) << std::endl;
130   }
131   // Check for response functions
132   if ( numFunctions <= 0 ) {
133     Cerr << "\nError: number of response functions must be greater than zero."
134 	 << std::endl;
135     err_flag = true;
136   }
137 
138   if (err_flag)
139     abort_handler(METHOD_ERROR);
140 }
141 
142 
initialize_run()143 void Analyzer::initialize_run()
144 {
145   // Verify that iteratedModel is not null (default ctor and some
146   // NoDBBaseConstructor ctors leave iteratedModel uninitialized).
147   if (!iteratedModel.is_null()) {
148     // update context data that is outside scope of local DB specifications.
149     // This is needed for reused objects.
150     //iteratedModel.db_scope_reset(); // TO DO: need better name?
151 
152     // This is to catch un-initialized models used by local iterators that
153     // are not called through IteratorScheduler::run_iterator().  Within a
154     // recursion, it will correspond to the first initialize_run() with an
155     // uninitialized mapping, such as the outer-iterator on the first pass
156     // of a recursion.  On subsequent passes, it may correspond to the inner
157     // iterator.  The Iterator scope should not matter for the iteratedModel
158     // mapping initialize/finalize.
159     if (!iteratedModel.mapping_initialized()) {
160       ParLevLIter pl_iter = methodPCIter->mi_parallel_level_iterator();
161       bool var_size_changed = iteratedModel.initialize_mapping(pl_iter);
162       if (var_size_changed)
163         /*bool reinit_comms =*/ resize(); // Ignore return value
164     }
165 
166     // Do not reset the evaluation reference for sub-iterators
167     // (previously managed via presence/absence of ostream)
168     //if (!subIteratorFlag)
169     if (summaryOutputFlag)
170       iteratedModel.set_evaluation_reference();
171   }
172 }
173 
174 
pre_run()175 void Analyzer::pre_run()
176 { bestVarsRespMap.clear(); }
177 
178 
post_run(std::ostream & s)179 void Analyzer::post_run(std::ostream& s)
180 {
181   if (summaryOutputFlag) {
182     // Print the function evaluation summary for all Iterators
183     if (!iteratedModel.is_null())
184       iteratedModel.print_evaluation_summary(s); // full hdr, relative counts
185 
186     // The remaining final results output varies by iterator branch
187     print_results(s);
188   }
189 }
190 
191 
finalize_run()192 void Analyzer::finalize_run()
193 {
194   // Finalize an initialized mapping.  This will correspond to the first
195   // finalize_run() with an uninitialized mapping, such as the inner-iterator
196   // in a recursion.
197   if (iteratedModel.mapping_initialized()) {
198     bool var_size_changed = iteratedModel.finalize_mapping();
199     if (var_size_changed)
200       /*bool reinit_comms =*/ resize(); // Ignore return value
201   }
202 
203   Iterator::finalize_run(); // included for completeness
204 }
205 
206 
207 /** Convenience function for derived classes with sets of function
208     evaluations to perform (e.g., NonDSampling, DDACEDesignCompExp,
209     FSUDesignCompExp, ParamStudy). */
210 void Analyzer::
evaluate_parameter_sets(Model & model,bool log_resp_flag,bool log_best_flag)211 evaluate_parameter_sets(Model& model, bool log_resp_flag, bool log_best_flag)
212 {
213   // This function does not need an iteratorRep fwd because it is a
214   // protected fn only called by letter classes.
215 
216   // allVariables or allSamples defines the set of fn evals to be performed
217   size_t i, num_evals
218     = (compactMode) ? allSamples.numCols() : allVariables.size();
219   bool header_flag = (allHeaders.size() == num_evals);
220   bool asynch_flag = model.asynch_flag();
221 
222   if (!asynch_flag && log_resp_flag) allResponses.clear();
223 
224   // Loop over parameter sets and compute responses.  Collect data
225   // and track best evaluations based on flags.
226   for (i=0; i<num_evals; i++) {
227     // output the evaluation header (if present)
228     if (header_flag)
229       Cout << allHeaders[i];
230 
231     if (compactMode)
232       update_model_from_sample(model, allSamples[i]);
233     else
234       update_model_from_variables(model, allVariables[i]);
235 
236     // compute the response
237     if (asynch_flag)
238       model.evaluate_nowait(activeSet);
239     else {
240       model.evaluate(activeSet);
241       const Response& resp = model.current_response();
242       int eval_id = model.evaluation_id();
243       if (log_best_flag) // update best variables/response
244         update_best(model.current_variables(), eval_id, resp);
245       if (log_resp_flag) // log response data
246         allResponses[eval_id] = resp.copy();
247       archive_model_response(resp, i);
248     }
249 
250     archive_model_variables(model, i);
251   }
252   // synchronize asynchronous evaluations
253   if (asynch_flag) {
254     const IntResponseMap& resp_map = model.synchronize();
255     if (log_resp_flag) // log response data
256       allResponses = resp_map;
257     if (log_best_flag) { // update best variables/response
258       IntRespMCIter r_cit;
259       if (compactMode)
260         for (i=0, r_cit=resp_map.begin(); r_cit!=resp_map.end(); ++i, ++r_cit)
261           update_best(allSamples[i], r_cit->first, r_cit->second);
262       else
263         for (i=0, r_cit=resp_map.begin(); r_cit!=resp_map.end(); ++i, ++r_cit)
264           update_best(allVariables[i], r_cit->first, r_cit->second);
265     }
266     if (resultsDB.active()) {
267       IntRespMCIter r_cit;
268       for(r_cit=resp_map.begin(); r_cit!=resp_map.end(); ++r_cit)
269         archive_model_response(r_cit->second,
270 			       std::distance(resp_map.begin(), r_cit));
271     }
272   }
273 }
274 
275 
update_model_from_variables(Model & model,const Variables & vars)276 void Analyzer::update_model_from_variables(Model& model, const Variables& vars)
277 {
278   // default implementation is sufficient in current uses, but could
279   // be overridden in future cases where a view discrepancy can exist
280   // between model and vars.
281   model.active_variables(vars);
282 }
283 
284 // ***************************************************
285 // MSE TO DO: generalize for all active variable types
286 // NonDSampling still overrrides in the case of samplingVarsMode != active view
287 // ***************************************************
288 
update_model_from_sample(Model & model,const Real * sample_vars)289 void Analyzer::update_model_from_sample(Model& model, const Real* sample_vars)
290 {
291   // default implementation is sufficient for FSUDesignCompExp and
292   // NonD{Quadrature,SparseGrid,Cubature}, but NonDSampling overrides.
293   size_t i, num_cv = model.cv();
294   for (i=0; i<num_cv; ++i)
295     model.continuous_variable(sample_vars[i], i);
296 }
297 
298 
299 /** Default mapping that maps into continuous part of Variables only */
300 void Analyzer::
sample_to_variables(const Real * sample_c_vars,Variables & vars)301 sample_to_variables(const Real* sample_c_vars, Variables& vars)
302 {
303   // pack sample_matrix into vars_array
304   const Variables& model_vars = iteratedModel.current_variables();
305   if (vars.is_null()) // use minimal data ctor
306     vars = Variables(model_vars.shared_data());
307   for (size_t i=0; i<numContinuousVars; ++i)
308     vars.continuous_variable(sample_c_vars[i], i); // ith row
309   // BMA: this may be needed if vars wasn't initialized off the model
310   vars.inactive_continuous_variables(
311     model_vars.inactive_continuous_variables());
312   // preserve any active discrete vars (unsupported by sample_matrix)
313   size_t num_adiv = model_vars.adiv(), num_adrv = model_vars.adrv();
314   if (num_adiv)
315     vars.all_discrete_int_variables(model_vars.all_discrete_int_variables());
316   if (num_adrv)
317     vars.all_discrete_real_variables(model_vars.all_discrete_real_variables());
318 }
319 
320 
321 void Analyzer::
samples_to_variables_array(const RealMatrix & sample_matrix,VariablesArray & vars_array)322 samples_to_variables_array(const RealMatrix& sample_matrix,
323 			   VariablesArray& vars_array)
324 {
325   // pack sample_matrix into vars_array
326   size_t i, num_samples = sample_matrix.numCols(); // #vars by #samples
327   if (vars_array.size() != num_samples)
328     vars_array.resize(num_samples);
329   for (i=0; i<num_samples; ++i)
330     sample_to_variables(sample_matrix[i], vars_array[i]);
331 }
332 
333 
334 /** Default implementation maps active continuous variables only */
335 void Analyzer::
variables_to_sample(const Variables & vars,Real * sample_c_vars)336 variables_to_sample(const Variables& vars, Real* sample_c_vars)
337 {
338   const RealVector& c_vars = vars.continuous_variables();
339   for (size_t i=0; i<numContinuousVars; ++i)
340     sample_c_vars[i] = c_vars[i]; // ith row of samples_matrix
341 }
342 
343 
344 void Analyzer::
variables_array_to_samples(const VariablesArray & vars_array,RealMatrix & sample_matrix)345 variables_array_to_samples(const VariablesArray& vars_array,
346 			   RealMatrix& sample_matrix)
347 {
348   // pack vars_array into sample_matrix
349   size_t i, num_samples = vars_array.size();
350   if (sample_matrix.numRows() != numContinuousVars ||
351       sample_matrix.numCols() != num_samples)
352     sample_matrix.reshape(numContinuousVars, num_samples); // #vars by #samples
353   // populate each colum of the sample matrix (one col per sample)
354   for (i=0; i<num_samples; ++i)
355     variables_to_sample(vars_array[i], sample_matrix[i]);
356 }
357 
358 
359 
360 /** Generate (numvars + 2)*num_samples replicate sets for VBD,
361     populating allSamples( numvars, (numvars + 2)*num_samples ) */
get_vbd_parameter_sets(Model & model,int num_samples)362 void Analyzer::get_vbd_parameter_sets(Model& model, int num_samples)
363 {
364   if (!compactMode) {
365     Cerr << "\nError: get_vbd_parameter_sets requires compactMode.\n";
366     abort_handler(METHOD_ERROR);
367   }
368 
369   // BMA TODO: This may not be right for all LHS active/inactive
370   // sampling modes, but is equivalent to previous code.
371   size_t num_vars = numContinuousVars + numDiscreteIntVars +
372     numDiscreteStringVars + numDiscreteRealVars;
373   size_t num_replicates = num_vars + 2;
374 
375   allSamples.shape(num_vars, (num_vars+2)*num_samples);
376 
377   // run derived sampling routine generate two initial matrices
378   vary_pattern(true);
379 
380   // populate the first num_samples cols of allSamples
381   RealMatrix sample_1(Teuchos::View, allSamples, num_vars, num_samples, 0, 0);
382   get_parameter_sets(model, num_samples, sample_1);
383   if (sample_1.numCols() != num_samples) {
384     Cerr << "\nError in Analyzer::variance_based_decomp(): Expected "
385 	 << num_samples << " variable samples; received "
386 	 << sample_1.numCols() << std::endl;
387     abort_handler(METHOD_ERROR);
388   }
389 
390   // populate the second num_samples cols of allSamples
391   RealMatrix sample_2(Teuchos::View, allSamples, num_vars, num_samples, 0,
392 		     num_samples);
393   get_parameter_sets(model, num_samples, sample_2);
394   if (sample_2.numCols() != num_samples) {
395     Cerr << "\nError in Analyzer::variance_based_decomp(): Expected "
396 	 << num_samples << " variable samples; received "
397 	 << sample_2.numCols() << std::endl;
398     abort_handler(METHOD_ERROR);
399   }
400 
401   // one additional replicate per variable
402   for (int i=0; i<num_vars; ++i) {
403     int replicate_index = i+2;
404     RealMatrix sample_r(Teuchos::View, allSamples, num_vars, num_samples, 0,
405 			replicate_index * num_samples);
406     // initialize the replicate to the second sample
407     sample_r.assign(sample_2);
408     // now swap in a row from the first sample
409     for (int j=0; j<num_samples; ++j)
410       sample_r(i, j) = sample_1(i, j);
411   }
412 }
413 
414 
415 /** Calculation of sensitivity indices obtained by variance based
416     decomposition.  These indices are obtained by the Saltelli version
417     of the Sobol VBD which uses (K+2)*N function evaluations, where K
418     is the number of dimensions (uncertain vars) and N is the number
419     of samples.  */
compute_vbd_stats(const int num_samples,const IntResponseMap & resp_samples)420 void Analyzer::compute_vbd_stats(const int num_samples,
421 				 const IntResponseMap& resp_samples)
422 {
423   using boost::multi_array;
424   using boost::extents;
425 
426   // BMA TODO: This may not be right for all LHS active/inactive
427   // sampling modes, but is equivalent to previous code.
428   size_t i, j, k, num_vars = numContinuousVars + numDiscreteIntVars +
429     numDiscreteStringVars + numDiscreteRealVars;
430 
431   if (resp_samples.size() != num_samples*(num_vars+2)) {
432     Cerr << "\nError in Analyzer::compute_vbd_stats: expected "
433 	 << num_samples << " responses; received " << resp_samples.size()
434 	 << std::endl;
435     abort_handler(METHOD_ERROR);
436   }
437 
438   // BMA: for now copy the data to previous data structure
439   //      total_fn_vals[respFn][replicate][sample]
440   // This is making the assumption that the responses are ordered as allSamples
441   // BMA TODO: compute statistics on finite samples only
442   multi_array<Real,3>
443     total_fn_vals(extents[numFunctions][num_vars+2][num_samples]);
444   IntRespMCIter r_it = resp_samples.begin();
445   for (i=0; i<(num_vars+2); ++i)
446     for (j=0; j<num_samples; ++r_it, ++j)
447       for (k=0; k<numFunctions; ++k)
448 	total_fn_vals[k][i][j] = r_it->second.function_value(k);
449 
450 #ifdef DEBUG
451     Cout << "allSamples:\n" << allSamples << '\n';
452     for (k=0; k<numFunctions; ++k)
453       for (i=0; i<num_vars+2; ++i)
454 	for (j=0; j<num_samples; ++j)
455 	  Cout << "Response " << k << " for replicate " << i << ", sample " << j
456 	       << ": " << total_fn_vals[k][i][j] << '\n';
457 #endif
458 
459   // There are four versions of the indices being calculated.
460   // S1 is a corrected version from Saltelli's 2004 "Sensitivity
461   // Analysis in Practice" book.  S1 does not have scaled output Y,
462   // but S2 and S3 do (where scaled refers to subtracting the overall mean
463   // from all of the output samples).  S2 and S3 have different ways of
464   // calculating the overal variance.  S4 uses the scaled Saltelli
465   // formulas from the following paper:  Saltelli, A., Annoni, P., Azzini, I.,
466   // Campolongo, F., Ratto, M., Tarantola, S.. Variance based sensitivity
467   // analysis of model output.  Design and estimator for the total sensitivity
468   // index. Comp Physics Comm 2010;181:259--270.  We decided to use formulas S4
469   // and T4 based on testing with a shock physics problem that had significant
470   // nonlinearities, interactions, and several response functions.
471   // The results are documented in a paper by Weirs et al. that will
472   // be forthcoming in RESS in 2011. For now we are leaving the different
473   // implementations in if further testing is needed.
474 
475   //RealVectorArray S1(numFunctions), T1(numFunctions);
476   //RealVectorArray S2(numFunctions), T2(numFunctions);
477   //RealVectorArray S3(numFunctions), T3(numFunctions);
478   S4.resize(numFunctions, RealVector(num_vars));
479   T4.resize(numFunctions, RealVector(num_vars));
480 
481   multi_array<Real,3>
482     total_norm_vals(extents[numFunctions][num_vars+2][num_samples]);
483 
484   // Loop over number of responses to obtain sensitivity indices for each
485   for (k=0; k<numFunctions; ++k) {
486 
487     // calculate expected value of Y
488     Real mean_hatY = 0., var_hatYS = 0., var_hatYT = 0.,
489       mean_sq1=0., mean_sq2 = 0., var_hatYnom = 0.,
490       overall_mean = 0., mean_hatB=0., var_hatYC= 0., mean_C=0.,
491       mean_A_norm = 0., mean_B_norm = 0., var_A_norm = 0., var_B_norm=0.,
492       var_AB_norm = 0.;
493     // mean estimate of Y (possibly average over matrices later)
494     for (j=0; j<num_samples; j++)
495       mean_hatY += total_fn_vals[k][0][j];
496     mean_hatY /= (Real)num_samples;
497     mean_sq1 = mean_hatY*mean_hatY;
498     for (j=0; j<num_samples; j++)
499       mean_hatB += total_fn_vals[k][1][j];
500     mean_hatB /= (Real)(num_samples);
501     mean_sq2 = mean_hatY*mean_hatB;
502     //mean_sq2 += total_fn_vals[k][0][j]*total_fn_vals[k][1][j];
503     //mean_sq2 /= (Real)(num_samples);
504     // variance estimate of Y for S indices
505     for (j=0; j<num_samples; j++)
506       var_hatYS += std::pow(total_fn_vals[k][0][j], 2.);
507     var_hatYnom = var_hatYS/(Real)(num_samples) - mean_sq1;
508     var_hatYS = var_hatYS/(Real)(num_samples) - mean_sq2;
509     // variance estimate of Y for T indices
510     for (j=0; j<num_samples; j++)
511       var_hatYT += std::pow(total_fn_vals[k][1][j], 2.);
512     var_hatYT = var_hatYT/(Real)(num_samples) - (mean_hatB*mean_hatB);
513     for (j=0; j<num_samples; j++){
514       for(i=0; i<(num_vars+2); i++){
515         total_norm_vals[k][i][j]=total_fn_vals[k][i][j];
516         overall_mean += total_norm_vals[k][i][j];
517       }
518     }
519 
520     overall_mean /= Real((num_samples)* (num_vars+2));
521     for (j=0; j<num_samples; j++)
522       for(i=0; i<(num_vars+2); i++)
523         total_norm_vals[k][i][j]-=overall_mean;
524     mean_C=mean_hatB*(Real)(num_samples)+mean_hatY*(Real)(num_samples);
525     mean_C=mean_C/(2*(Real)(num_samples));
526     for (j=0; j<num_samples; j++)
527       var_hatYC += std::pow(total_fn_vals[k][0][j],2);
528     for (j=0; j<num_samples; j++)
529       var_hatYC += std::pow(total_fn_vals[k][1][j],2);
530     var_hatYC = var_hatYC/((Real)(num_samples)*2)-mean_C*mean_C;
531 
532   //  for (j=0; j<num_samples; j++)
533   //    mean_A_norm += total_norm_vals[k][0][j];
534   //  mean_A_norm /= (Real)num_samples;
535   //  for (j=0; j<num_samples; j++)
536   //    mean_B_norm += total_norm_vals[k][1][j];
537   //  mean_B_norm /= (Real)(num_samples);
538   //  for (j=0; j<num_samples; j++)
539   //    var_A_norm += std::pow(total_norm_vals[k][0][j], 2.);
540   //  var_AB_norm = var_A_norm/(Real)(num_samples) - (mean_A_norm*mean_B_norm);
541   //  var_A_norm = var_A_norm/(Real)(num_samples) - (mean_A_norm*mean_A_norm);
542   // variance estimate of Y for T indices
543   //  for (j=0; j<num_samples; j++)
544   //    var_B_norm += std::pow(total_norm_vals[k][1][j], 2.);
545   //  var_B_norm = var_B_norm/(Real)(num_samples) - (mean_B_norm*mean_B_norm);
546 
547 
548 #ifdef DEBUG
549     Cout << "\nVariance of Yhat for S " << var_hatYS
550 	 << "\nVariance of Yhat for T " << var_hatYT
551 	 << "\nMeanSq1 " << mean_sq1 << "\nMeanSq2 " << mean_sq2 << '\n';
552 #endif
553 
554     // calculate first order sensitivity indices and first order total indices
555     for (i=0; i<num_vars; i++) {
556       Real sum_S = 0., sum_T = 0., sum_J = 0., sum_J2 = 0., sum_3 = 0., sum_32=0.,sum_5=0, sum_6=0;
557       for (j=0; j<num_samples; j++) {
558 	//sum_S += total_fn_vals[k][0][j]*total_fn_vals[k][i+2][j];
559 	//sum_T += total_fn_vals[k][1][j]*total_fn_vals[k][i+2][j];
560         //sum_5 += total_norm_vals[k][0][j]*total_norm_vals[k][i+2][j];
561 	//sum_6 += total_norm_vals[k][1][j]*total_norm_vals[k][i+2][j];
562 	//sum_J += std::pow((total_fn_vals[k][0][j]-total_fn_vals[k][i+2][j]),2.);
563 	sum_J2 += std::pow((total_norm_vals[k][1][j]-total_norm_vals[k][i+2][j]),2.);
564 	sum_3 += total_norm_vals[k][0][j]*(total_norm_vals[k][i+2][j]-total_norm_vals[k][1][j]);
565 	//sum_32 += total_fn_vals[k][1][j]*(total_fn_vals[k][1][j]-total_fn_vals[k][i+2][j]);
566       }
567 
568       //S1[k][i] = (sum_S/(Real)(num_samples-1) - mean_sq2)/var_hatYS;
569       //S1[k][i] = (sum_S/(Real)(num_samples) - mean_sq2)/var_hatYS;
570       //T1[k][i] = 1. - (sum_T/(Real)(num_samples-1) - mean_sq2)/var_hatYS;
571       //T1[k][i] = 1. - (sum_T/(Real)(num_samples) - (mean_hatB*mean_hatB))/var_hatYT;
572       //S2[k][i] = (sum_S/(Real)(num_samples) - mean_sq1)/var_hatYnom;
573       //T2[k][i] = 1. - (sum_T/(Real)(num_samples) - mean_sq1)/var_hatYnom;
574       //S2[k][i] = (sum_5/(Real)(num_samples) - (mean_A_norm*mean_B_norm))/var_AB_norm;
575       //T2[k][i] = 1. - (sum_6/(Real)(num_samples) - (mean_B_norm*mean_B_norm))/var_B_norm;
576       //S3[k][i] = ((sum_5/(Real)(num_samples)) - (mean_A_norm*mean_A_norm))/var_A_norm;
577       //T3[k][i] = 1. - (sum_6/(Real)(num_samples) - (mean_A_norm*mean_B_norm))/var_AB_norm;
578       //S3[k][i] = (sum_3/(Real)(num_samples))/var_hatYC;
579       //T3[k][i] = (sum_32/(Real)(num_samples))/var_hatYnom;
580       //S4[k][i] = (var_hatYnom - (sum_J/(Real)(2*num_samples)))/var_hatYnom;
581       S4[k][i] = (sum_3/(Real)(num_samples))/var_hatYC;
582       T4[k][i] = (sum_J2/(Real)(2*num_samples))/var_hatYC;
583     }
584   }
585 }
586 
587 
588 /** Generate tabular output with active variables (compactMode) or all
589     variables with their labels and response labels, with no data.
590     Variables are sequenced {cv, div, drv} */
pre_output()591 void Analyzer::pre_output()
592 {
593   // distinguish between defaulted pre-run and user-specified
594   if (!parallelLib.command_line_user_modes())
595     return;
596 
597   const String& filename = parallelLib.command_line_pre_run_output();
598   if (filename.empty()) {
599     if (outputLevel > QUIET_OUTPUT)
600       Cout << "\nPre-run phase complete: no output requested.\n" << std::endl;
601     return;
602   }
603 
604   size_t num_evals = compactMode ? allSamples.numCols() : allVariables.size();
605   if (num_evals == 0) {
606     if (outputLevel > QUIET_OUTPUT)
607       Cout << "\nPre-run phase complete: no variables to output.\n"
608 	   << std::endl;
609     return;
610   }
611 
612   std::ofstream tabular_file;
613   TabularIO::open_file(tabular_file, filename, "pre-run output");
614 
615   // try to mitigate errors resulting from lack of precision in output
616   // the full 17 digits might surprise users, but will reduce
617   // numerical errors between pre/post phases
618   // TODO: consider passing precision to helper functions instead of using global
619   int save_precision;
620   if (writePrecision == 0) {
621     save_precision = write_precision;
622     write_precision = 17;
623   }
624 
625   // Write all variables in input spec ordering; always annotated.
626   // When in compactMode, get the inactive variables off the Model and
627   // use sample_to_variables to set the discrete variables not treated
628   // by allSamples.
629   unsigned short tabular_format =
630     parallelLib.program_options().pre_run_output_format();
631   TabularIO::write_header_tabular(tabular_file,
632 				  iteratedModel.current_variables(),
633 				  iteratedModel.current_response(),
634 				  "eval_id",
635 				  tabular_format);
636 
637   tabular_file << std::setprecision(write_precision)
638 	       << std::resetiosflags(std::ios::floatfield);
639 
640   Variables vars = iteratedModel.current_variables().copy();
641   for (size_t eval_index = 0; eval_index < num_evals; eval_index++) {
642 
643     TabularIO::write_leading_columns(tabular_file, eval_index+1,
644 				     iteratedModel.interface_id(),
645 				     tabular_format);
646     if (compactMode) {
647       // allSamples num_vars x num_evals, so each col becomes tabular file row
648       // populate the active discrete variables that aren't in sample_matrix
649       size_t num_vars = allSamples.numRows();
650       sample_to_variables(allSamples[eval_index], vars);
651       vars.write_tabular(tabular_file);
652     }
653     else
654       allVariables[eval_index].write_tabular(tabular_file);
655     // no response data, so terminate the record
656     tabular_file << '\n';
657   }
658 
659   tabular_file.flush();
660   tabular_file.close();
661 
662   if (writePrecision == 0)
663     write_precision = save_precision;
664   if (outputLevel > QUIET_OUTPUT)
665     Cout << "\nPre-run phase complete: variables written to tabular file "
666 	 << filename << ".\n" << std::endl;
667 }
668 
669 
670 /// read num_evals variables/responses from file
read_variables_responses(int num_evals,size_t num_vars)671 void Analyzer::read_variables_responses(int num_evals, size_t num_vars)
672 {
673   // distinguish between defaulted post-run and user-specified
674   if (!parallelLib.command_line_user_modes())
675     return;
676 
677   const String& filename = parallelLib.command_line_post_run_input();
678   if (filename.empty()) {
679     if (outputLevel > QUIET_OUTPUT)
680       Cout << "\nPost-run phase initialized: no input requested.\n"
681 	   << std::endl;
682     return;
683   }
684 
685   if (num_evals == 0) {
686     if (outputLevel > QUIET_OUTPUT)
687       Cout << "\nPost-run phase initialized: zero samples specified.\n"
688 	   << std::endl;
689     return;
690   }
691 
692   // pre/post only supports annotated; could detect
693   unsigned short tabular_format =
694     parallelLib.program_options().post_run_input_format();
695 
696   // Define modelList and recastFlags to support any recastings within
697   // a model recursion
698   bool map_to_iter_space = iteratedModel.manage_data_recastings();
699 
700   // TO DO: validate/accommodate incoming num_vars since it may be defined
701   // from a local sampling mode (see NonDSampling) that differs from active;
702   // support for active discrete also varies across the post-run Iterators.
703   bool active_only = true; // consistent with PStudyDACE use cases
704   Variables vars(iteratedModel.current_variables().copy());
705   Response  resp(iteratedModel.current_response().copy());
706 
707   PRPList import_prp_list;
708   bool verbose = (outputLevel > NORMAL_OUTPUT);
709   // This reader will either get the eval ID from the file, or number
710   // the IDs starting from 1
711   TabularIO::read_data_tabular(filename, "post-run input", vars, resp,
712 			       import_prp_list, tabular_format, verbose,
713 			       active_only);
714   size_t num_imported = import_prp_list.size();
715   if (num_imported < num_evals) {
716     Cerr << "Error: number of imported evaluations (" << num_imported
717 	 << ") less than expected (" << num_evals << ")." << std::endl;
718     abort_handler(METHOD_ERROR);
719   }
720   else if (verbose) {
721     Cout << "\nRead " << num_imported << " samples from file " << filename;
722     if (num_imported > num_evals)
723       Cout << " of which " << num_evals << " will be used." << std::endl;
724     else Cout << std::endl;
725   }
726 
727   if (compactMode) allSamples.shapeUninitialized(num_vars, num_evals);
728   else             allVariables.resize(num_evals);
729 
730   size_t i; PRPLIter prp_it;
731   bool cache = iteratedModel.evaluation_cache(), // recurse_flag = true
732      restart = iteratedModel.restart_file();     // recurse_flag = true
733   Variables iter_vars; Response iter_resp;
734   for (i=0, prp_it=import_prp_list.begin(); i<num_evals; ++i, ++prp_it) {
735 
736     ParamResponsePair& pr = *prp_it;
737 
738     // insert imported user-space data into evaluation cache (for consistency)
739     // and restart (more likely to be useful).  Unlike DataFitSurrModel, we
740     // will preserve the incoming eval id in the post-input file import case.
741     if (restart) parallelLib.write_restart(pr); // preserve eval id
742     if (cache)   data_pairs.insert(pr); // duplicate ids OK for PRPCache
743 
744     // manage any model recastings to promote from user-space to iterator-space
745     if (map_to_iter_space)
746       iteratedModel.user_space_to_iterator_space(pr.variables(), pr.response(),
747 						 iter_vars, iter_resp);
748     else
749       { iter_vars = pr.variables(); iter_resp = pr.response(); }
750 
751     // update allVariables,allSamples
752     if (compactMode) variables_to_sample(iter_vars, allSamples[i]);
753     else             allVariables[i] = iter_vars;
754     // update allResponses (requires unique eval IDs)
755     allResponses[pr.eval_id()] = iter_resp;
756 
757     // mirror any post-processing in Analyzer::evaluate_parameter_sets()
758     if (numObjFns || numLSqTerms)
759       update_best(iter_vars, i+1, iter_resp);
760   }
761 
762   if (outputLevel > QUIET_OUTPUT)
763     Cout << "\nPost-run phase initialized: variables / responses read from "
764 	 << "tabular\nfile " << filename << ".\n" << std::endl;
765 }
766 
767 
768 /** printing of variance based decomposition indices. */
print_sobol_indices(std::ostream & s) const769 void Analyzer::print_sobol_indices(std::ostream& s) const
770 {
771   StringMultiArrayConstView cv_labels
772     = iteratedModel.continuous_variable_labels();
773   StringMultiArrayConstView div_labels
774     = iteratedModel.discrete_int_variable_labels();
775   StringMultiArrayConstView drv_labels
776     = iteratedModel.discrete_real_variable_labels();
777   const StringArray& resp_labels = iteratedModel.response_labels();
778   // output explanatory info
779   //s << "Variance Based Decomposition Sensitivity Indices\n"
780   //  << "These Sobol' indices measure the importance of the uncertain input\n"
781   //  << "variables in determining the uncertainty (variance) of the output.\n"
782   //  << "Si measures the main effect for variable i itself, while Ti\n"
783   //  << "measures the total effect (including the interaction effects\n"
784   //  << "of variable i with other uncertain variables.)\n";
785   s << std::scientific
786     << "\nGlobal sensitivity indices for each response function:\n";
787 
788   size_t i, k, offset;
789   for (k=0; k<numFunctions; ++k) {
790     s << resp_labels[k] << " Sobol' indices:\n";
791     s << std::setw(38) << "Main" << std::setw(19) << "Total\n";
792     Real main, total;
793     for (i=0; i<numContinuousVars; ++i) {
794       main = S4[k][i]; total = T4[k][i];
795       if (std::abs(main) > vbdDropTol || std::abs(total) > vbdDropTol)
796         s << "                     " << std::setw(write_precision+7) << main
797 	  << ' ' << std::setw(write_precision+7) << total << ' '
798 	  << cv_labels[i] << '\n';
799     }
800     offset = numContinuousVars;
801     for (i=0; i<numDiscreteIntVars; ++i) {
802       main = S4[k][i+offset]; total = T4[k][i+offset];
803       if (std::abs(main) > vbdDropTol || std::abs(total) > vbdDropTol)
804 	s << "                     " << std::setw(write_precision+7)
805 	  << main << ' ' << std::setw(write_precision+7)
806 	  << total << ' ' << div_labels[i] << '\n';
807     }
808     offset += numDiscreteIntVars;
809     //for (i=0; i<numDiscreteStringVars; ++i) // LPS TO DO
810     //offset += numDiscreteStringVars;
811     for (i=0; i<numDiscreteRealVars; ++i) {
812       main = S4[k][i+offset]; total = T4[k][i+offset];
813       if (std::abs(main) > vbdDropTol || std::abs(total) > vbdDropTol)
814 	s << "                     " << std::setw(write_precision+7)
815 	  << main << ' ' << std::setw(write_precision+7)
816           << total << ' ' << drv_labels[i] << '\n';
817     }
818   }
819 }
820 
821 /** printing of variance based decomposition indices. */
archive_sobol_indices() const822 void Analyzer::archive_sobol_indices() const
823 {
824   if(!resultsDB.active())
825     return;
826 
827   StringMultiArrayConstView cv_labels
828     = iteratedModel.continuous_variable_labels();
829   StringMultiArrayConstView div_labels
830     = iteratedModel.discrete_int_variable_labels();
831   StringMultiArrayConstView drv_labels
832     = iteratedModel.discrete_real_variable_labels();
833   const StringArray& resp_labels = iteratedModel.response_labels();
834 
835 
836   size_t i, k, offset;
837   for (k=0; k<numFunctions; ++k) {
838     RealArray main_effects, total_effects;
839     StringArray scale_labels;
840     for (i=0; i<numContinuousVars; ++i) {
841       Real main = S4[k][i], total = T4[k][i];
842       if (std::abs(main) > vbdDropTol || std::abs(total) > vbdDropTol) {
843         main_effects.push_back(main);
844         total_effects.push_back(total);
845         scale_labels.push_back(cv_labels[i]);
846       }
847     }
848     offset = numContinuousVars;
849     for (i=0; i<numDiscreteIntVars; ++i) {
850       Real main = S4[k][i+offset], total = T4[k][i+offset];
851       if (std::abs(main) > vbdDropTol || std::abs(total) > vbdDropTol) {
852         main_effects.push_back(main);
853         total_effects.push_back(total);
854         scale_labels.push_back(div_labels[i]);
855       }
856     }
857     offset += numDiscreteIntVars;
858     //for (i=0; i<numDiscreteStringVars; ++i) // LPS TO DO
859     //offset += numDiscreteStringVars;
860     for (i=0; i<numDiscreteRealVars; ++i) {
861       Real main = S4[k][i+offset], total = T4[k][i+offset];
862       if (std::abs(main) > vbdDropTol || std::abs(total) > vbdDropTol) {
863         main_effects.push_back(main);
864         total_effects.push_back(total);
865         scale_labels.push_back(drv_labels[i]);
866       }
867     }
868     DimScaleMap scales;
869     scales.emplace(0, StringScale("variables", scale_labels, ScaleScope::UNSHARED));
870     resultsDB.insert(run_identifier(), {String("main_effects"), resp_labels[k]},
871                      main_effects, scales);
872     resultsDB.insert(run_identifier(), {String("total_effects"), resp_labels[k]},
873                      total_effects, scales);
874   }
875 }
876 
compute_best_metrics(const Response & response,std::pair<Real,Real> & metrics)877 void Analyzer::compute_best_metrics(const Response& response,
878 				    std::pair<Real,Real>& metrics)
879 {
880   size_t i, constr_offset;
881   const RealVector& fn_vals = response.function_values();
882   const RealVector& primary_wts
883     = iteratedModel.primary_response_fn_weights();
884   Real& obj_fn = metrics.second; obj_fn = 0.0;
885   if (numObjFns) {
886     constr_offset = numObjFns;
887     if (primary_wts.empty()) {
888       for (i=0; i<numObjFns; i++)
889 	obj_fn += fn_vals[i];
890       if (numObjFns > 1)
891 	obj_fn /= (Real)numObjFns;
892     }
893     else
894       for (i=0; i<numObjFns; i++)
895 	obj_fn += primary_wts[i] * fn_vals[i];
896   }
897   else if (numLSqTerms) {
898     constr_offset = numLSqTerms;
899     if (primary_wts.empty())
900       for (i=0; i<numLSqTerms; i++)
901 	obj_fn += std::pow(fn_vals[i], 2);
902     else
903       for (i=0; i<numLSqTerms; i++)
904 	obj_fn += std::pow(primary_wts[i]*fn_vals[i], 2);
905   }
906   else // no "best" metric currently defined for generic response fns
907     return;
908   Real& constr_viol   = metrics.first; constr_viol= 0.0;
909   size_t num_nln_ineq = iteratedModel.num_nonlinear_ineq_constraints(),
910          num_nln_eq   = iteratedModel.num_nonlinear_eq_constraints();
911   const RealVector& nln_ineq_lwr_bnds
912     = iteratedModel.nonlinear_ineq_constraint_lower_bounds();
913   const RealVector& nln_ineq_upr_bnds
914     = iteratedModel.nonlinear_ineq_constraint_upper_bounds();
915   const RealVector& nln_eq_targets
916     = iteratedModel.nonlinear_eq_constraint_targets();
917   for (i=0; i<num_nln_ineq; i++) { // ineq constraint violation
918     size_t index = i + constr_offset;
919     Real ineq_con = fn_vals[index];
920     if (ineq_con > nln_ineq_upr_bnds[i])
921       constr_viol += std::pow(ineq_con - nln_ineq_upr_bnds[i],2);
922     else if (ineq_con < nln_ineq_lwr_bnds[i])
923       constr_viol += std::pow(nln_ineq_lwr_bnds[i] - ineq_con,2);
924   }
925   for (i=0; i<num_nln_eq; i++) { // eq constraint violation
926     size_t index = i + constr_offset + num_nln_ineq;
927     Real eq_con = fn_vals[index];
928     if (std::fabs(eq_con - nln_eq_targets[i]) > 0.)
929       constr_viol += std::pow(eq_con - nln_eq_targets[i], 2);
930   }
931 }
932 
933 
934 void Analyzer::
update_best(const Real * sample_c_vars,int eval_id,const Response & response)935 update_best(const Real* sample_c_vars, int eval_id, const Response& response)
936 {
937   RealRealPair metrics;
938   compute_best_metrics(response, metrics);
939 #ifdef DEBUG
940   Cout << "Best metrics: " << metrics.first << ' ' << metrics.second
941        << std::endl;
942 #endif
943 
944   size_t num_best_map = bestVarsRespMap.size();
945   if (num_best_map < numFinalSolutions) { // initialization of best map
946     Variables vars = iteratedModel.current_variables().copy();
947     sample_to_variables(sample_c_vars, vars); // copy sample only when needed
948     Response copy_resp = response.copy();
949     ParamResponsePair prp(vars, iteratedModel.interface_id(), copy_resp,
950 			  eval_id, false); // shallow copy since previous deep
951     std::pair<RealRealPair, ParamResponsePair> new_pr(metrics, prp);
952     bestVarsRespMap.insert(new_pr);
953   }
954   else {
955     RealPairPRPMultiMap::iterator it = --bestVarsRespMap.end();
956     //   Primary criterion: constraint violation must be <= stored violation
957     // Secondary criterion: for equal (or zero) constraint violation, objective
958     //                      must be < stored objective
959     if (metrics < it->first) { // new best
960       bestVarsRespMap.erase(it);
961       Variables vars = iteratedModel.current_variables().copy();
962       sample_to_variables(sample_c_vars, vars); // copy sample only when needed
963       Response copy_resp = response.copy();
964       ParamResponsePair prp(vars, iteratedModel.interface_id(), copy_resp,
965 			    eval_id, false); // shallow copy since previous deep
966       std::pair<RealRealPair, ParamResponsePair> new_pr(metrics, prp);
967       bestVarsRespMap.insert(new_pr);
968     }
969   }
970 }
971 
972 
973 void Analyzer::
update_best(const Variables & vars,int eval_id,const Response & response)974 update_best(const Variables& vars, int eval_id, const Response& response)
975 {
976   RealRealPair metrics;
977   compute_best_metrics(response, metrics);
978 #ifdef DEBUG
979   Cout << "Best metrics: " << metrics.first << ' ' << metrics.second
980        << std::endl;
981 #endif
982 
983   size_t num_best_map = bestVarsRespMap.size();
984   if (num_best_map < numFinalSolutions) { // initialization of best map
985     ParamResponsePair prp(vars, iteratedModel.interface_id(),
986 			  response, eval_id); // deep copy
987     std::pair<RealRealPair, ParamResponsePair> new_pr(metrics, prp);
988     bestVarsRespMap.insert(new_pr);
989   }
990   else {
991     RealPairPRPMultiMap::iterator it = --bestVarsRespMap.end();
992     //   Primary criterion: constraint violation must be <= stored violation
993     // Secondary criterion: for equal (or zero) constraint violation, objective
994     //                      must be < stored objective
995     if (metrics < it->first) { // new best
996       bestVarsRespMap.erase(it);
997       ParamResponsePair prp(vars, iteratedModel.interface_id(),
998 			    response, eval_id); // deep copy
999       std::pair<RealRealPair, ParamResponsePair> new_pr(metrics, prp);
1000       bestVarsRespMap.insert(new_pr);
1001     }
1002   }
1003 }
1004 
1005 
print_results(std::ostream & s,short results_state)1006 void Analyzer::print_results(std::ostream& s, short results_state)
1007 {
1008   if (!numObjFns && !numLSqTerms) {
1009     s << "<<<<< Best data metrics not defined for generic response functions\n";
1010     return;
1011   }
1012 
1013   // -------------------------------------
1014   // Single and Multipoint results summary
1015   // -------------------------------------
1016   RealPairPRPMultiMap::iterator it = bestVarsRespMap.begin();
1017   size_t i, offset, num_fns, num_best_map = bestVarsRespMap.size();
1018   for (i=1; it!=bestVarsRespMap.end(); ++i, ++it) {
1019     const ParamResponsePair& best_pr = it->second;
1020     const Variables&  best_vars = best_pr.variables();
1021     const RealVector& best_fns  = best_pr.response().function_values();
1022     s << "<<<<< Best parameters          ";
1023     if (num_best_map > 1) s << "(set " << i << ") ";
1024     s << "=\n" << best_vars;
1025     num_fns = best_fns.length(); offset = 0;
1026     if (numObjFns) {
1027       if (numObjFns > 1) s << "<<<<< Best objective functions ";
1028       else               s << "<<<<< Best objective function  ";
1029       if (num_best_map > 1) s << "(set " << i << ") "; s << "=\n";
1030       write_data_partial(s, offset, numObjFns, best_fns);
1031       offset = numObjFns;
1032     }
1033     else if (numLSqTerms) {
1034       s << "<<<<< Best residual terms      ";
1035       if (num_best_map > 1) s << "(set " << i << ") "; s << "=\n";
1036       write_data_partial(s, offset, numLSqTerms, best_fns);
1037       offset = numLSqTerms;
1038     }
1039     if (num_fns > offset) {
1040       s << "<<<<< Best constraint values   ";
1041       if (num_best_map > 1) s << "(set " << i << ") "; s << "=\n";
1042       write_data_partial(s, offset, num_fns - offset, best_fns);
1043     }
1044     s << "<<<<< Best data captured at function evaluation "
1045       << best_pr.eval_id() << std::endl;
1046   }
1047 }
1048 
1049 
vary_pattern(bool pattern_flag)1050 void Analyzer::vary_pattern(bool pattern_flag)
1051 {
1052   Cerr << "Error: Analyzer lacking redefinition of virtual vary_pattern() "
1053        << "function.\n       This analyzer does not support pattern variance."
1054        << std::endl;
1055   abort_handler(METHOD_ERROR);
1056 }
1057 
1058 
get_parameter_sets(Model & model)1059 void Analyzer::get_parameter_sets(Model& model)
1060 {
1061   Cerr << "Error: Analyzer lacking redefinition of virtual get_parameter_sets"
1062        << "(1) function.\n       This analyzer does not support parameter sets."
1063        << std::endl;
1064   abort_handler(METHOD_ERROR);
1065 }
1066 
get_parameter_sets(Model & model,const int num_samples,RealMatrix & design_matrix)1067 void Analyzer::get_parameter_sets(Model& model, const int num_samples,
1068 				  RealMatrix& design_matrix)
1069 {
1070   Cerr << "Error: Analyzer lacking redefinition of virtual get_parameter_sets"
1071        << "(3) function.\n       This analyzer does not support parameter sets."
1072        << std::endl;
1073   abort_handler(METHOD_ERROR);
1074 }
1075 
1076 } // namespace Dakota
1077