1 /*  _______________________________________________________________________
2 
3     DAKOTA: Design Analysis Kit for Optimization and Terascale Applications
4     Copyright 2014-2020 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
5     This software is distributed under the GNU Lesser General Public License.
6     For more information, see the README file in the top Dakota directory.
7     _______________________________________________________________________ */
8 
9 //- Class:       ConcurrentMetaIterator
10 //- Description: Implementation code for the ConcurrentMetaIterator class
11 //- Owner:       Mike Eldred
12 //- Checked by:
13 
14 #include "ConcurrentMetaIterator.hpp"
15 #include "ProblemDescDB.hpp"
16 #include "ParallelLibrary.hpp"
17 #include "ParamResponsePair.hpp"
18 #include "NonDLHSSampling.hpp"
19 #include "EvaluationStore.hpp"
20 
21 static const char rcsId[]="@(#) $Id: ConcurrentMetaIterator.cpp 7018 2010-10-12 02:25:22Z mseldre $";
22 
23 
24 namespace Dakota {
25 
ConcurrentMetaIterator(ProblemDescDB & problem_db)26 ConcurrentMetaIterator::ConcurrentMetaIterator(ProblemDescDB& problem_db):
27   MetaIterator(problem_db),
28   numRandomJobs(probDescDB.get_int("method.concurrent.random_jobs")),
29   randomSeed(probDescDB.get_int("method.random_seed"))
30 {
31   // ***************************************************************************
32   // TO DO: support concurrent meta-iteration for both Minimizer & Analyzer:
33   // one step within general purpose module for sequencing with concurrency.
34   //   Pareto set:  Minimizer only with obj/lsq weight sets
35   //   Multi-start: currently Minimizer; examples of multi-start Analyzer?
36   // ***************************************************************************
37 
38   // ***************************************************************************
39   // TO DO: once NestedModel has been updated to use IteratorScheduler, consider
40   // design using NestedModel lightweight ctor for Iterator concurrency.
41   // Iterators define available I/O and the meta-iterator checks compatibility.
42   // ***************************************************************************
43 
44   // pull these from the DB before any resetting of DB nodes
45   const RealVector& raw_param_sets
46     = problem_db.get_rv("method.concurrent.parameter_sets");
47 
48   const String& sub_meth_ptr
49     = problem_db.get_string("method.sub_method_pointer");
50   const String& sub_meth_name = problem_db.get_string("method.sub_method_name");
51   const String& sub_model_ptr
52     = problem_db.get_string("method.sub_model_pointer");
53 
54   // store/restore the method/model indices separately (the current state of the
55   // iterator/model DB nodes may not be synched due to Model ctor recursions in
56   // process)
57   size_t method_index, model_index; // Note: _NPOS is a valid restoration value
58   bool restore_method = false, restore_model = false;
59   bool print_rank = (parallelLib.world_rank() == 0); // prior to lead_rank()
60   if (!sub_meth_ptr.empty()) {
61     restore_method = restore_model = true;
62     method_index = problem_db.get_db_method_node(); // for restoration
63     model_index  = problem_db.get_db_model_node();  // for restoration
64     problem_db.set_db_list_nodes(sub_meth_ptr);
65   }
66   else if (!sub_meth_name.empty()) {
67     // if sub_model_ptr is defined, activate this model spec;
68     // if sub_model_ptr is empty, identify default model using empty string.
69     // Note: inheriting a prev model spec could be desirable in some contexts,
70     //       but not this ctor since a top-level/non-subordinate MetaIterator.
71     restore_model = true;
72     model_index   = problem_db.get_db_model_node(); // for restoration
73     problem_db.set_db_model_nodes(sub_model_ptr);
74   }
75   else {
76     if (print_rank)
77       Cerr << "Error: insufficient method identification in "
78 	   << "ConcurrentMetaIterator." << std::endl;
79     abort_handler(-1);
80   }
81 
82   // Instantiate the model on all processors, even a dedicated master
83   iteratedModel = problem_db.get_model();
84   initialize_model();
85 
86   // user-specified jobs
87   copy_data(raw_param_sets, parameterSets, 0, paramSetLen);
88 
89   // estimation of paramSetLen is dependent on the iteratedModel
90   // --> concurrent iterator partitioning is pushed downstream a bit
91   maxIteratorConcurrency = iterSched.numIteratorJobs
92     = parameterSets.size() + numRandomJobs;
93   if (!maxIteratorConcurrency) { // verify at least 1 job has been specified
94     if (print_rank)
95       Cerr << "Error: concurrent meta-iterator must have at least 1 job.  "
96 	   << "Please specify either a\n       list of parameter sets or a "
97 	   << "number of random jobs." << std::endl;
98     abort_handler(-1);
99   }
100 
101   // restore list nodes
102   if (restore_method) problem_db.set_db_method_node(method_index);
103   if (restore_model)  problem_db.set_db_model_nodes(model_index);
104 }
105 
106 
107 ConcurrentMetaIterator::
ConcurrentMetaIterator(ProblemDescDB & problem_db,Model & model)108 ConcurrentMetaIterator(ProblemDescDB& problem_db, Model& model):
109   MetaIterator(problem_db, model),
110   numRandomJobs(probDescDB.get_int("method.concurrent.random_jobs")),
111   randomSeed(probDescDB.get_int("method.random_seed"))
112 {
113   const RealVector& raw_param_sets
114     = problem_db.get_rv("method.concurrent.parameter_sets");
115 
116   // ensure consistency between model and any method/model pointers
117   check_model(problem_db.get_string("method.sub_method_pointer"),
118 	      problem_db.get_string("method.sub_model_pointer"));
119   // For this ctor with an incoming model, we can simplify DB node assignment
120   // and mirror logic in check_model()
121   size_t model_index = problem_db.get_db_model_node();  // for restoration
122   problem_db.set_db_model_nodes(iteratedModel.model_id());
123 
124   initialize_model(); // uses DB lookup for model data (number of obj fns)
125 
126   // user-specified jobs
127   copy_data(raw_param_sets, parameterSets, 0, paramSetLen);
128 
129   maxIteratorConcurrency = iterSched.numIteratorJobs
130     = parameterSets.size() + numRandomJobs;
131   if (!maxIteratorConcurrency) { // verify at least 1 job has been specified
132     if (parallelLib.world_rank() == 0) // prior to lead_rank()
133       Cerr << "Error: concurrent meta-iterator must have at least 1 job.  "
134 	   << "Please specify either a\n       list of parameter sets or a "
135 	   << "number of random jobs." << std::endl;
136     abort_handler(-1);
137   }
138 
139   // restore list nodes
140   problem_db.set_db_model_nodes(model_index);
141 }
142 
143 
declare_sources()144 void ConcurrentMetaIterator::declare_sources() {
145   evaluationsDB.declare_source(method_id(),
146                                "iterator",
147                                selectedIterator.method_id(),
148                                "iterator");
149 }
150 
~ConcurrentMetaIterator()151 ConcurrentMetaIterator::~ConcurrentMetaIterator()
152 { }
153 
154 
derived_init_communicators(ParLevLIter pl_iter)155 void ConcurrentMetaIterator::derived_init_communicators(ParLevLIter pl_iter)
156 {
157   const String& sub_meth_ptr
158     = probDescDB.get_string("method.sub_method_pointer");
159   const String& sub_meth_name = probDescDB.get_string("method.sub_method_name");
160   //const String& sub_model_ptr
161   //  = probDescDB.get_string("method.sub_model_pointer");
162 
163   // Model recursions may update method or model nodes and restoration may not
164   // occur until the recursion completes, so don't assume that method and
165   // model indices are in sync.
166   size_t method_index, model_index; // Note: _NPOS is a valid restoration value
167   bool restore_method = false, restore_model = false,
168     lightwt_ctor = sub_meth_ptr.empty();
169   if (lightwt_ctor) {
170     restore_model = true;
171     model_index = probDescDB.get_db_model_node(); // for restoration
172     probDescDB.set_db_model_nodes(iteratedModel.model_id());
173   }
174   else {
175     restore_method = restore_model = true;
176     method_index = probDescDB.get_db_method_node(); // for restoration
177     model_index  = probDescDB.get_db_model_node();  // for restoration
178     probDescDB.set_db_list_nodes(sub_meth_ptr);
179   }
180 
181   iterSched.update(methodPCIter);
182 
183   // It is not practical to estimate the evaluation concurrency without
184   // instantiating the iterator (see, e.g., NonDPolynomialChaos), and here we
185   // have a circular dependency: we need the evaluation concurrency from the
186   // iterator to estimate the max_ppi for input to the mi_pl partition, but
187   // we want to segregate iterator construction based on the mi_pl partition.
188   // To resolve this dependency, we instantiate the iterator based on the
189   // previous parallel level and then augment it below based on the mi_pl level.
190   // We avoid repeated instantiations by the check on iterator.is_null() as well
191   // as through the lookup in problem_db_get_iterator() (method_ptr case); this
192   // requires that no calls to init_comms occur at construct time, since the
193   // mi_pl basis for this is not yet available.
194   IntIntPair ppi_pr = (lightwt_ctor) ?
195     iterSched.configure(probDescDB, sub_meth_name, selectedIterator,
196 			iteratedModel) :
197     iterSched.configure(probDescDB, selectedIterator, iteratedModel);
198   iterSched.partition(maxIteratorConcurrency, ppi_pr);
199   summaryOutputFlag = iterSched.lead_rank();
200 
201   // from this point on, we can specialize logic in terms of iterator servers.
202   // An idle partition need not instantiate iterators (empty selectedIterator
203   // envelope is adequate) or initialize, so return now.  A dedicated
204   // master processor is managed in IteratorScheduler::init_iterator().
205   if (iterSched.iteratorServerId <= iterSched.numIteratorServers) {
206     // Instantiate the iterator
207     if (lightwt_ctor) {
208       iterSched.init_iterator(probDescDB, sub_meth_name, selectedIterator,
209 			      iteratedModel);
210       if (summaryOutputFlag && outputLevel >= VERBOSE_OUTPUT)
211 	Cout << "Concurrent Iterator = " << sub_meth_name << std::endl;
212     }
213     else {
214       iterSched.init_iterator(probDescDB, selectedIterator, iteratedModel);
215       if (summaryOutputFlag && outputLevel >= VERBOSE_OUTPUT)
216 	Cout << "Concurrent Iterator = "
217 	     << method_enum_to_string(probDescDB.get_ushort("method.algorithm"))
218 	     << std::endl;
219     }
220   }
221 
222   // restore list nodes
223   if (restore_method) probDescDB.set_db_method_node(method_index);
224   if (restore_model)  probDescDB.set_db_model_nodes(model_index);
225 }
226 
227 
derived_set_communicators(ParLevLIter pl_iter)228 void ConcurrentMetaIterator::derived_set_communicators(ParLevLIter pl_iter)
229 {
230   size_t mi_pl_index = methodPCIter->mi_parallel_level_index(pl_iter) + 1;
231   iterSched.update(methodPCIter, mi_pl_index);
232   if (iterSched.iteratorServerId <= iterSched.numIteratorServers) {
233     ParLevLIter si_pl_iter
234       = methodPCIter->mi_parallel_level_iterator(mi_pl_index);
235     iterSched.set_iterator(selectedIterator, si_pl_iter);
236   }
237 }
238 
239 
derived_free_communicators(ParLevLIter pl_iter)240 void ConcurrentMetaIterator::derived_free_communicators(ParLevLIter pl_iter)
241 {
242   size_t mi_pl_index = methodPCIter->mi_parallel_level_index(pl_iter) + 1;
243   iterSched.update(methodPCIter, mi_pl_index);
244   if (iterSched.iteratorServerId <= iterSched.numIteratorServers) {
245     ParLevLIter si_pl_iter
246       = methodPCIter->mi_parallel_level_iterator(mi_pl_index);
247     iterSched.free_iterator(selectedIterator, si_pl_iter);
248   }
249 
250   // deallocate the mi_pl parallelism level
251   iterSched.free_iterator_parallelism();
252 }
253 
254 
pre_run()255 void ConcurrentMetaIterator::pre_run()
256 {
257   if (iterSched.iteratorCommRank > 0 ||
258       iterSched.iteratorServerId > iterSched.numIteratorServers)
259     return;
260 
261   // initialize initialPt
262   if (methodName != MULTI_START)
263     copy_data(iteratedModel.continuous_variables(), initialPt); // view->copy
264 
265   // estimate params_msg_len & results_msg_len and publish to IteratorScheduler
266   int params_msg_len = 0, results_msg_len; // peer sched doesn't send params
267   if (iterSched.iteratorScheduling == MASTER_SCHEDULING) {
268     // define params_msg_len
269     RealVector rv(paramSetLen);
270     MPIPackBuffer send_buffer;
271     send_buffer << rv;
272     params_msg_len = send_buffer.size();
273     // define results_msg_len
274     if (iterSched.iteratorServerId == 0) // master proc: init_comms not called
275       iteratedModel.estimate_message_lengths();
276   }
277   results_msg_len = iteratedModel.message_lengths()[3];
278   iterSched.iterator_message_lengths(params_msg_len, results_msg_len);
279 
280   // -------------------------------------------------------------------------
281   // Define parameterSets from the combination of user-specified & random jobs
282   // -------------------------------------------------------------------------
283   if ( iterSched.iteratorServerId   == 0 ||                // master proc
284        iterSched.iteratorScheduling == PEER_SCHEDULING ) { // peer server
285 
286     // random jobs
287     if (numRandomJobs) { // random jobs specified
288       size_t i, j;
289       RealVectorArray random_jobs;
290       if (iterSched.lead_rank()) {
291 	// set up bounds for uniform sampling
292 	RealVector lower_bnds, upper_bnds;
293 	if (methodName == MULTI_START) {
294 	  lower_bnds = iteratedModel.continuous_lower_bounds(); // view OK
295 	  upper_bnds = iteratedModel.continuous_upper_bounds(); // view OK
296 	}
297 	else {
298 	  lower_bnds.sizeUninitialized(paramSetLen); lower_bnds = 0.;
299 	  upper_bnds.sizeUninitialized(paramSetLen); upper_bnds = 1.;
300 	}
301 	// invoke NonDLHSSampling as either old or new LHS is always available.
302 	// We don't use a dace_method_pointer spec since we aren't sampling over
303 	// the variables specification in all cases.  In particular, we're
304 	// sampling over multiobj. weight sets in the Pareto-set case.  This
305 	// hard-wiring currently restricts us to uniform, uncorrelated samples.
306 	unsigned short sample_type = SUBMETHOD_DEFAULT;
307 	String rng; // empty string: use default
308 	NonDLHSSampling lhs_sampler(sample_type, numRandomJobs, randomSeed,
309 				    rng, lower_bnds, upper_bnds);
310 	const RealMatrix& all_samples = lhs_sampler.all_samples();
311 	random_jobs.resize(numRandomJobs);
312 	for (i=0; i<numRandomJobs; ++i)
313 	  copy_data(all_samples[i], paramSetLen, random_jobs[i]);
314       }
315 
316       if (iterSched.iteratorScheduling == PEER_SCHEDULING &&
317 	  iterSched.numIteratorServers > 1) {
318 	const ParallelLevel& mi_pl
319 	  = methodPCIter->mi_parallel_level(iterSched.miPLIndex);
320 	// For static scheduling, bcast all random jobs over mi_intra_comm (not
321 	// necessary for self-scheduling as jobs are assigned from the master).
322 	if (iterSched.lead_rank()) {
323 	  MPIPackBuffer send_buffer;
324 	  send_buffer << random_jobs;
325 	  int buffer_len = send_buffer.size();
326 	  parallelLib.bcast_hs(buffer_len, mi_pl);
327 	  parallelLib.bcast_hs(send_buffer, mi_pl);
328 	}
329 	else {
330 	  int buffer_len;
331 	  parallelLib.bcast_hs(buffer_len, mi_pl);
332 	  MPIUnpackBuffer recv_buffer(buffer_len);
333 	  parallelLib.bcast_hs(recv_buffer, mi_pl);
334 	  recv_buffer >> random_jobs;
335 	}
336       }
337 
338       // rescale (if needed) and append to parameterSets
339       size_t cntr = parameterSets.size();
340       parameterSets.resize(iterSched.numIteratorJobs);
341       for (i=0; i<numRandomJobs; ++i, ++cntr) {
342         if (methodName == MULTI_START)
343           parameterSets[cntr] = random_jobs[i];
344         else { // scale: multi-objective weights should add to 1
345           // NOTE: there is a better way to do this; i.e., mixture experimental
346           // design (e.g., Ch. 11 in Myers and Montgomery), but scaling an LHS
347           // design is sufficient as a first cut.
348           Real sum = 0.0;
349           for (j=0; j<paramSetLen; j++)
350             sum += random_jobs[i][j];
351           parameterSets[cntr].sizeUninitialized(paramSetLen);
352           for (j=0; j<paramSetLen; j++)
353             parameterSets[cntr][j] = random_jobs[i][j]/sum;
354         }
355       }
356     }
357 
358     // May also want to support an approach, at least for multistart, where the
359     // best m subset out of n evaluated candidates are used for iterator starts.
360     // However, this requires evaluation of the n candidates, which violates the
361     // distinction between a multi-start minimizer and a sequential hybrid
362     // minimizer (initiated with global sampling).
363   }
364 
365   // all iterator masters bookkeep on the full results list, even if
366   // only some entries are defined locally
367   prpResults.resize(iterSched.numIteratorJobs);
368 }
369 
370 
core_run()371 void ConcurrentMetaIterator::core_run()
372 {
373   // For graphics data, limit to iterator server comm leaders; this is further
374   // segregated within initialize_graphics(): all iterator masters stream
375   // tabular data, but only iterator server 1 generates a graphics window.
376   if (iterSched.iteratorCommRank == 0) {
377     int server_id = iterSched.iteratorServerId;
378     if (server_id > 0 && server_id <= iterSched.numIteratorServers)
379       selectedIterator.initialize_graphics(server_id);
380   }
381 
382   iterSched.schedule_iterators(*this, selectedIterator);
383 }
384 
385 
print_results(std::ostream & s,short results_state)386 void ConcurrentMetaIterator::print_results(std::ostream& s, short results_state)
387 {
388   using std::setw;
389   s << "\n<<<<< Results summary:\n";
390 
391   // Table header:
392   StringMultiArrayConstView cv_labels
393     = prpResults[0].variables().continuous_variable_labels();
394   StringMultiArrayConstView div_labels
395     = prpResults[0].variables().discrete_int_variable_labels();
396   StringMultiArrayConstView dsv_labels
397     = prpResults[0].variables().discrete_string_variable_labels();
398   StringMultiArrayConstView drv_labels
399     = prpResults[0].variables().discrete_real_variable_labels();
400   const StringArray& fn_labels = prpResults[0].response().function_labels();
401   size_t i, num_cv  = cv_labels.size(),  num_div = div_labels.size(),
402     num_dsv = dsv_labels.size(), num_drv = drv_labels.size(),
403     num_fns = fn_labels.size();
404   s << "   set_id "; // matlab comment syntax
405   StringArray weight_strings; // used to store pareto_set results
406   for (i=0; i<paramSetLen; ++i) {
407     if (methodName == MULTI_START)
408       s << setw(14) << cv_labels[i].data() << ' ';
409     else {
410       char string[10];
411       std::sprintf(string, "w%i", (int)i + 1);
412       weight_strings.push_back(string);
413       s << setw(14) << string << ' ';
414     }
415   }
416   for (i=0; i<num_cv; i++) {
417     String label = (methodName == MULTI_START) ?
418       cv_labels[i] + String("*") : cv_labels[i];
419     s << setw(14) << label.data() << ' ';
420   }
421   for (i=0; i<num_div; i++) {
422     String label = (methodName == MULTI_START) ?
423       div_labels[i] + String("*") : div_labels[i];
424     s << setw(14) << label.data() << ' ';
425   }
426   for (i=0; i<num_dsv; i++) {
427     String label = (methodName == MULTI_START) ?
428       dsv_labels[i] + String("*") : dsv_labels[i];
429     s << setw(14) << label.data() << ' ';
430   }
431   for (i=0; i<num_drv; i++) {
432     String label = (methodName == MULTI_START) ?
433       drv_labels[i] + String("*") : drv_labels[i];
434     s << setw(14) << label.data() << ' ';
435   }
436   for (i=0; i<num_fns; i++)
437     s << setw(14) << fn_labels[i].data() << ' ';
438   s << '\n';
439 
440   // Table data:
441   size_t num_results = prpResults.size();
442   for (i=0; i<num_results; ++i) {
443     const ParamResponsePair& prp_result = prpResults[i];
444     s << std::setprecision(10) << std::resetiosflags(std::ios::floatfield)
445          << setw(9) << prp_result.eval_id() << ' ';
446     for (size_t j=0; j<paramSetLen; ++j)
447       s << setw(14) << parameterSets[i][j] << ' ';
448     const Variables& prp_vars = prp_result.variables();
449     //prp_vars.write_tabular(s) not used since active vars, not all vars
450     write_data_tabular(s, prp_vars.continuous_variables());
451     write_data_tabular(s, prp_vars.discrete_int_variables());
452     write_data_tabular(s, prp_vars.discrete_string_variables());
453     write_data_tabular(s, prp_vars.discrete_real_variables());
454     prp_result.response().write_tabular(s);
455   }
456   s << '\n';
457   // Write results to DB
458   if(resultsDB.active()) {
459     const auto iterator_id = run_identifier();
460     // set_ids for dimnension scales
461     IntArray set_ids(num_results);
462     std::iota(set_ids.begin(), set_ids.end(), 1);
463     // Store parameterSets, which are initial points for multi_start and weights
464     // for pareto_set.
465     DimScaleMap pset_scales;
466     StringArray pset_location;
467     pset_scales.emplace(0, IntegerScale("set_ids", set_ids));
468     if(methodName == MULTI_START) {
469       pset_scales.emplace(1, StringScale("variables", cv_labels));
470       pset_location = {String("starting_points"), String("continuous")};
471     } else {
472       pset_scales.emplace(1, StringScale("weights", weight_strings));
473       pset_location.push_back("weights");
474     }
475     resultsDB.allocate_matrix(iterator_id, pset_location,
476         Dakota::ResultsOutputType::REAL, num_results, paramSetLen, pset_scales);
477     for(int i = 0; i < num_results; ++i)
478       resultsDB.insert_into(iterator_id, pset_location, parameterSets[i],i);
479     // Store best continuous variables
480     if(num_cv) {  // should always be true
481       DimScaleMap best_parameters_scales;
482       best_parameters_scales.emplace(0, IntegerScale("set_id", set_ids));
483       best_parameters_scales.emplace(1, StringScale("variables", cv_labels));
484       StringArray location = {String("best_parameters"), String("continuous")};
485       resultsDB.allocate_matrix(iterator_id, location, Dakota::ResultsOutputType::REAL,
486           num_results, num_cv, best_parameters_scales);
487       for(int i = 0; i < num_results; ++i)
488         resultsDB.insert_into(iterator_id, location,
489             prpResults[i].variables().continuous_variables(), i);
490     }
491     // Store best discrete ints
492     if(num_div) {
493       DimScaleMap best_parameters_scales;
494       best_parameters_scales.emplace(0, IntegerScale("set_ids", set_ids));
495       best_parameters_scales.emplace(1, StringScale("variables", div_labels));
496       StringArray location = {String("best_parameters"), String("discrete_integer")};
497       resultsDB.allocate_matrix(iterator_id, location, Dakota::ResultsOutputType::INTEGER,
498           num_results, num_div, best_parameters_scales);
499       for(int i = 0; i < num_results; ++i)
500         resultsDB.insert_into(iterator_id, location,
501             prpResults[i].variables().discrete_int_variables(), i);
502     }
503     // Store best discrete strings
504     if(num_dsv) {
505       DimScaleMap best_parameters_scales;
506       best_parameters_scales.emplace(0, IntegerScale("set_ids", set_ids));
507       best_parameters_scales.emplace(1, StringScale("variables", dsv_labels));
508       StringArray location = {String("best_parameters"), String("discrete_string")};
509       resultsDB.allocate_matrix(iterator_id, location, Dakota::ResultsOutputType::STRING,
510           num_results, num_dsv, best_parameters_scales);
511       for(int i = 0; i < num_results; ++i)
512         resultsDB.insert_into(iterator_id, location,
513             prpResults[i].variables().discrete_string_variables(), i);
514     }
515     // Store best discrete reals
516     if(num_drv) {
517       DimScaleMap best_parameters_scales;
518       best_parameters_scales.emplace(0, IntegerScale("set_ids", set_ids));
519       best_parameters_scales.emplace(1, StringScale("variables", drv_labels));
520       StringArray location = {String("best_parameters"), String("discrete_real")};
521       resultsDB.allocate_matrix(iterator_id, location, Dakota::ResultsOutputType::REAL,
522           num_results, num_drv, best_parameters_scales);
523       for(int i = 0; i < num_results; ++i)
524         resultsDB.insert_into(iterator_id, location,
525             prpResults[i].variables().discrete_real_variables(), i);
526     }
527     // Store best responses
528     if(num_fns) {
529       DimScaleMap best_responses_scales;
530       best_responses_scales.emplace(0, IntegerScale("set_ids", set_ids));
531       best_responses_scales.emplace(1, StringScale("responses", fn_labels));
532       StringArray location = {String("best_responses")};
533       resultsDB.allocate_matrix(iterator_id, location, Dakota::ResultsOutputType::REAL,
534           num_results, num_fns, best_responses_scales);
535       for(int i = 0; i < num_results; ++i)
536         resultsDB.insert_into(iterator_id, location,
537             prpResults[i].response().function_values(), i);
538     }
539   }
540 }
541 
542 } // namespace Dakota
543