1 /*  _______________________________________________________________________
2 
3     DAKOTA: Design Analysis Kit for Optimization and Terascale Applications
4     Copyright 2014-2020 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
5     This software is distributed under the GNU Lesser General Public License.
6     For more information, see the README file in the top Dakota directory.
7     _______________________________________________________________________ */
8 
9 //- Class:       SeqHybridMetaIterator
10 //- Description: Implementation code for the SeqHybridMetaIterator class
11 //- Owner:       Mike Eldred
12 //- Checked by:
13 
14 #include "SeqHybridMetaIterator.hpp"
15 #include "ProblemDescDB.hpp"
16 #include "ParallelLibrary.hpp"
17 #include "ParamResponsePair.hpp"
18 #include "dakota_data_io.hpp"
19 #include "EvaluationStore.hpp"
20 
21 static const char rcsId[]="@(#) $Id: SeqHybridMetaIterator.cpp 6972 2010-09-17 22:18:50Z briadam $";
22 
23 
24 namespace Dakota {
25 
SeqHybridMetaIterator(ProblemDescDB & problem_db)26 SeqHybridMetaIterator::SeqHybridMetaIterator(ProblemDescDB& problem_db):
27   MetaIterator(problem_db), singlePassedModel(false)
28   //seqHybridType(problem_db.get_string("method.hybrid.type")),
29   //progressThreshold(problem_db.get_real("method.hybrid.progress_threshold"))
30 {
31   // ***************************************************************************
32   // TO DO: support sequences for both Minimizer (solution points) & Analyzer
33   // (global SA --> anisotropic UQ): general purpose sequencing with iterator
34   // concurrency.
35   // ***************************************************************************
36 
37   // ***************************************************************************
38   // TO DO: once NestedModel has been updated to use IteratorScheduler, consider
39   // design using NestedModel lightweight ctor for simple Iterator sequences.
40   // Iterators define available I/O and the meta-iterator checks compatibility.
41   // ***************************************************************************
42 
43   const StringArray& method_ptrs
44     = problem_db.get_sa("method.hybrid.method_pointers");
45   const StringArray& method_names
46     = problem_db.get_sa("method.hybrid.method_names");
47 
48   if (!method_ptrs.empty())
49     { lightwtMethodCtor = false; methodStrings = method_ptrs;  }
50   else if (!method_names.empty()) {
51     lightwtMethodCtor = true;    methodStrings = method_names;
52     modelStrings = problem_db.get_sa("method.hybrid.model_pointers");
53     // define an array of null strings to use for set_db_model_nodes()
54     if (modelStrings.empty()) modelStrings.resize(method_names.size());
55     // allow input of single string
56     else      Pecos::inflate_scalar(modelStrings, method_names.size());
57   }
58   else {
59     Cerr << "Error: incomplete hybrid meta-iterator specification."<< std::endl;
60     abort_handler(METHOD_ERROR);
61   }
62 
63   maxIteratorConcurrency = 1; // to be updated in derived_init_communicators()
64 }
65 
66 
67 SeqHybridMetaIterator::
SeqHybridMetaIterator(ProblemDescDB & problem_db,Model & model)68 SeqHybridMetaIterator(ProblemDescDB& problem_db, Model& model):
69   MetaIterator(problem_db, model), singlePassedModel(true)
70   //seqHybridType(problem_db.get_string("method.hybrid.type")),
71   //progressThreshold(problem_db.get_real("method.hybrid.progress_threshold"))
72 {
73   const StringArray& method_ptrs
74     = problem_db.get_sa("method.hybrid.method_pointers");
75   const StringArray& method_names
76     = problem_db.get_sa("method.hybrid.method_names");
77   const StringArray& model_ptrs
78     = problem_db.get_sa("method.hybrid.model_pointers");
79 
80   // process and validate method and model strings
81   size_t i, num_iterators; String empty_str;
82   if (!method_ptrs.empty()) {
83     lightwtMethodCtor = false;
84     num_iterators = method_ptrs.size();
85     for (i=0; i<num_iterators; ++i)
86       check_model(method_ptrs[i], empty_str);
87     methodStrings = method_ptrs;
88   }
89   else if (!method_names.empty()) {
90     lightwtMethodCtor = true;
91     methodStrings = method_names;
92     num_iterators = method_names.size();
93     // define an array of strings to use for set_db_model_nodes()
94     if (model_ptrs.empty()) // assign array using id from iteratedModel
95       modelStrings.assign(num_iterators, iteratedModel.model_id());
96     else {
97       size_t num_models = model_ptrs.size();
98       for (i=0; i<num_models; ++i)
99 	check_model(empty_str, model_ptrs[i]);
100       modelStrings = model_ptrs;
101       Pecos::inflate_scalar(modelStrings, num_iterators); // allow single input
102     }
103   }
104   else {
105     Cerr << "Error: incomplete hybrid meta-iterator specification."<< std::endl;
106     abort_handler(METHOD_ERROR);
107   }
108 
109   maxIteratorConcurrency = 1; // to be updated in derived_init_communicators()
110 }
111 
112 
~SeqHybridMetaIterator()113 SeqHybridMetaIterator::~SeqHybridMetaIterator()
114 { }
115 
116 
derived_init_communicators(ParLevLIter pl_iter)117 void SeqHybridMetaIterator::derived_init_communicators(ParLevLIter pl_iter)
118 {
119   size_t i, num_iterators = methodStrings.size();
120   selectedIterators.resize(num_iterators); // all procs need for iterator sched
121   if (!singlePassedModel)
122     selectedModels.resize(num_iterators);
123 
124   iterSched.update(methodPCIter);
125 
126   int pl_rank = pl_iter->server_communicator_rank();
127   IntIntPair ppi_pr_i, ppi_pr(INT_MAX, 0);
128   String empty_str;
129   size_t running_product = 1, sizet_max = std::numeric_limits<size_t>::max();
130   bool sizet_max_replace = false;
131   for (i=0; i<num_iterators; ++i) {
132     // compute min/max processors per iterator for each method
133     Iterator& the_iterator = selectedIterators[i];
134     Model& the_model = (singlePassedModel) ? iteratedModel : selectedModels[i];
135     ppi_pr_i = (lightwtMethodCtor) ?
136       estimate_by_name(methodStrings[i], modelStrings[i], the_iterator,
137 		       the_model) :
138       estimate_by_pointer(methodStrings[i], the_iterator, the_model);
139     if (ppi_pr_i.first  < ppi_pr.first)  ppi_pr.first  = ppi_pr_i.first;
140     if (ppi_pr_i.second > ppi_pr.second) ppi_pr.second = ppi_pr_i.second;
141 
142     // selectedIterator[i] now exists on pl rank 0; use it to update
143     // maxIteratorConcurrency, where the iterator concurrency lags the
144     // number of final solutions by one step in the sequence
145     if (pl_rank == 0) {
146       // manage number of points accepted per iterator instance
147       if (the_iterator.accepts_multiple_points())
148         running_product = 1; // reset
149       // max concurrency tracking
150       else if (running_product > maxIteratorConcurrency)
151 	maxIteratorConcurrency = running_product;
152       // manage number of points generated per iterator instance
153       if (the_iterator.returns_multiple_points()) {
154 	size_t num_final = the_iterator.num_final_solutions();
155 	// if unlimited final solns (e.g. MOGA), use a stand-in (e.g. pop_size)
156 	if (num_final == sizet_max) {
157 	  sizet_max_replace = true;
158 	  running_product *= the_iterator.maximum_evaluation_concurrency();
159 	}
160 	else
161 	  running_product *= num_final;
162       }
163     }
164   }
165   // bcast the maxIteratorConcurrency result to other ranks
166   if (pl_rank == 0) {
167     if (pl_iter->server_communicator_size() > 1)
168       parallelLib.bcast(maxIteratorConcurrency, *pl_iter);
169   }
170   else
171     parallelLib.bcast(maxIteratorConcurrency, *pl_iter);
172 
173   // with maxIteratorConcurrency defined, initialize the concurrent
174   // iterator parallelism level
175   iterSched.partition(maxIteratorConcurrency, ppi_pr);
176   summaryOutputFlag = iterSched.lead_rank();
177   // from this point on, we can specialize logic in terms of iterator servers.
178   // An idle partition need not instantiate iterators/models (empty Iterator
179   // envelopes are adequate for serve_iterators()), so return now.  A dedicated
180   // master processor is managed in IteratorScheduler::init_iterator().
181   if (iterSched.iteratorServerId > iterSched.numIteratorServers)
182     return;
183 
184   if (!num_iterators) { // verify at least one method in list
185     if (summaryOutputFlag)
186       Cerr << "Error: hybrid method list must have a least one entry."
187 	   << std::endl;
188     abort_handler(-1);
189   }
190   if (summaryOutputFlag && outputLevel >= VERBOSE_OUTPUT)
191     Cout << "maxIteratorConcurrency = " << maxIteratorConcurrency << '\n';
192 
193   if (seqHybridType == "adaptive") {
194     if (iterSched.messagePass) {
195       // adaptive hybrid does not support iterator concurrency
196       if (summaryOutputFlag)
197 	Cerr << "Error: adaptive Sequential Hybrid does not support concurrent "
198 	     << "iterator parallelism." << std::endl;
199       abort_handler(-1);
200     }
201     if (progressThreshold > 1.) {
202       if (summaryOutputFlag)
203 	Cerr << "Warning: progress_threshold should be <= 1. Setting to 1.\n";
204       progressThreshold = 1.;
205     }
206     else if (progressThreshold < 0.) {
207       if (summaryOutputFlag)
208 	Cerr << "Warning: progress_threshold should be >= 0. Setting to 0.\n";
209       progressThreshold = 0.;
210     }
211   }
212 
213   // Instantiate all Models and Iterators
214   for (i=0; i<num_iterators; ++i) {
215     Model& the_model = (singlePassedModel) ? iteratedModel : selectedModels[i];
216     if (lightwtMethodCtor)
217       allocate_by_name(methodStrings[i], modelStrings[i],
218 		       selectedIterators[i], the_model);
219     else
220       allocate_by_pointer(methodStrings[i], selectedIterators[i], the_model);
221   }
222 
223   // now that parallel paritioning and iterator allocation has occurred,
224   // manage acceptable values for Iterator::numFinalSolutions (needed for
225   // results_msg_len estimation in run function)
226   if (sizet_max_replace && iterSched.iteratorCommRank == 0)
227     for (i=0; i<num_iterators; ++i) {
228       Iterator& the_iterator = selectedIterators[i];
229       if (the_iterator.num_final_solutions() == sizet_max)
230 	the_iterator.num_final_solutions(
231 	  the_iterator.maximum_evaluation_concurrency());
232     }
233 }
234 
235 
derived_set_communicators(ParLevLIter pl_iter)236 void SeqHybridMetaIterator::derived_set_communicators(ParLevLIter pl_iter)
237 {
238   size_t mi_pl_index = methodPCIter->mi_parallel_level_index(pl_iter) + 1;
239   iterSched.update(methodPCIter, mi_pl_index);
240   if (iterSched.iteratorServerId <= iterSched.numIteratorServers) {
241     ParLevLIter si_pl_iter
242       = methodPCIter->mi_parallel_level_iterator(mi_pl_index);
243     size_t i, num_iterators = methodStrings.size();
244     for (i=0; i<num_iterators; ++i)
245       iterSched.set_iterator(selectedIterators[i], si_pl_iter);
246   }
247 }
248 
249 
derived_free_communicators(ParLevLIter pl_iter)250 void SeqHybridMetaIterator::derived_free_communicators(ParLevLIter pl_iter)
251 {
252   size_t mi_pl_index = methodPCIter->mi_parallel_level_index(pl_iter) + 1;
253   iterSched.update(methodPCIter, mi_pl_index);
254   if (iterSched.iteratorServerId <= iterSched.numIteratorServers) {
255     ParLevLIter si_pl_iter
256       = methodPCIter->mi_parallel_level_iterator(mi_pl_index);
257     size_t i, num_iterators = methodStrings.size();
258     for (i=0; i<num_iterators; ++i)
259       iterSched.free_iterator(selectedIterators[i], si_pl_iter);
260   }
261 
262   // deallocate the mi_pl parallelism level
263   iterSched.free_iterator_parallelism();
264 }
265 
266 
core_run()267 void SeqHybridMetaIterator::core_run()
268 {
269   if (seqHybridType == "adaptive") run_sequential_adaptive();
270   else                             run_sequential();
271 }
272 
273 
274 /** In the sequential nonadaptive case, there is no interference with
275     the iterators.  Each runs until its own convergence criteria is
276     satisfied.  Status: fully operational. */
run_sequential()277 void SeqHybridMetaIterator::run_sequential()
278 {
279   size_t num_iterators = methodStrings.size();
280   int server_id =  iterSched.iteratorServerId;
281   bool    rank0 = (iterSched.iteratorCommRank == 0);
282 
283   // use methodPCIter rather than relying on ParallelLibrary::currPCIter
284   const ParallelLevel& mi_pl
285     = methodPCIter->mi_parallel_level(iterSched.miPLIndex);
286   const ParallelLevel& parent_pl = (iterSched.miPLIndex) ?
287     methodPCIter->mi_parallel_level(iterSched.miPLIndex - 1) : mi_pl;
288 
289   for (seqCount=0; seqCount<num_iterators; seqCount++) {
290 
291     // each of these is safe for all processors
292     Iterator& curr_iterator = selectedIterators[seqCount];
293     Model&    curr_model
294       = (singlePassedModel) ? iteratedModel : selectedModels[seqCount];
295 
296     if (summaryOutputFlag)
297       Cout << "\n>>>>> Running Sequential Hybrid with iterator "
298 	   << methodStrings[seqCount] << ".\n";
299 
300     if (server_id <= iterSched.numIteratorServers) {
301 
302       // For graphics data, limit to iterator server comm leaders; this is
303       // further segregated within initialize_graphics(): all iterator masters
304       // stream tabular data, but only server 1 generates a graphics window.
305       if (rank0 && server_id > 0)
306 	curr_iterator.initialize_graphics(server_id);
307 
308       // -------------------------------------------------------------
309       // Define total number of runs for this iterator in the sequence
310       // -------------------------------------------------------------
311       // > run 1st iterator as is, using single default starting pt
312       // > subsequent iteration may involve multipoint data flow
313       // > In the future, we may support concurrent multipoint iterators, but
314       //   prior to additional specification data, we either have a single
315       //   multipoint iterator or concurrent single-point iterators.
316       if (seqCount == 0) // initialize numIteratorJobs
317 	iterSched.numIteratorJobs = 1;
318       else {
319 	bool curr_accepts_multi = curr_iterator.accepts_multiple_points();
320 	//bool curr_returns_multi = curr_iterator.returns_multiple_points();
321 	// update numIteratorJobs
322 	if (iterSched.iteratorScheduling == MASTER_SCHEDULING) {
323 	  // send curr_accepts_multi from 1st iterator master to strategy master
324 	  if (rank0 && server_id == 1) {
325 	    int multi_flag = (int)curr_accepts_multi; // bool -> int
326 	    parallelLib.send(multi_flag, 0, 0, parent_pl, mi_pl);
327 	  }
328 	  else if (server_id == 0) {
329 	    int multi_flag; MPI_Status status;
330 	    parallelLib.recv(multi_flag, 1, 0, status, parent_pl, mi_pl);
331 	    curr_accepts_multi = (bool)multi_flag; // int -> bool
332 	    iterSched.numIteratorJobs
333 	      = (curr_accepts_multi) ? 1 : parameterSets.size();
334 	  }
335 	}
336 	else { // static scheduling
337 	  if (rank0)
338 	    iterSched.numIteratorJobs
339 	      = (curr_accepts_multi) ? 1 : parameterSets.size();
340 	  // bcast numIteratorJobs over iteratorComm
341 	  if (iterSched.iteratorCommSize > 1)
342 	    parallelLib.bcast(iterSched.numIteratorJobs, mi_pl);
343 	}
344       }
345       // --------------------------
346       // size prpResults (2D array)
347       // --------------------------
348       // The total aggregated set of results:
349       // > can grow if multiple iterator jobs return multiple points or if
350       //   single instance returns more than used for initialization
351       // > can only shrink in the case where single instance returns fewer
352       //   than used for initialization
353       if (rank0)
354 	prpResults.resize(iterSched.numIteratorJobs);
355 
356       // -----------------------------------------
357       // Define buffer lengths for message passing
358       // -----------------------------------------
359       if (iterSched.messagePass && rank0) {
360 	int params_msg_len, results_msg_len;
361 	// define params_msg_len
362 	if (iterSched.iteratorScheduling == MASTER_SCHEDULING) {
363 	  MPIPackBuffer params_buffer;
364 	  pack_parameters_buffer(params_buffer, 0);
365 	  params_msg_len = params_buffer.size();
366 	}
367 	// define results_msg_len
368 	MPIPackBuffer results_buffer;
369 	// pack_results_buffer() is not reliable for several reasons:
370 	// > for seqCount == 0, prpResults contains empty envelopes
371 	// > for seqCount >= 1, the previous state of prpResults may not
372 	//   accurately reflect the future state due to the presence of some
373 	//   multi-point iterators which do not define the results array.
374 	//pack_results_buffer(results_buffer, 0);
375 	// The following may be conservative in some cases (e.g., if the results
376 	// arrays will be empty), but should be reliable.
377 	ParamResponsePair prp_star(curr_iterator.variables_results(),
378           curr_model.interface_id(), curr_iterator.response_results());//shallow
379 	// Note: max size_t removed from Iterator::numFinalSolutions in ctor
380 	size_t prp_return_size = curr_iterator.num_final_solutions();
381 	results_buffer << prp_return_size;
382 	for (size_t i=0; i<prp_return_size; ++i)
383 	  results_buffer << prp_star;
384 	results_msg_len = results_buffer.size();
385 	// publish lengths to IteratorScheduler
386 	iterSched.iterator_message_lengths(params_msg_len, results_msg_len);
387       }
388     }
389 
390     // ---------------------------------------------------
391     // Schedule the runs for this iterator in the sequence
392     // ---------------------------------------------------
393     iterSched.schedule_iterators(*this, curr_iterator);
394 
395     // ---------------------------------
396     // Post-process the iterator results
397     // ---------------------------------
398     // convert prpResults to parameterSets for next iteration
399     if (server_id <= iterSched.numIteratorServers && rank0 &&
400 	seqCount+1 < num_iterators) {
401       size_t i, j, num_param_sets = 0, cntr = 0, num_prp_i;
402       for (i=0; i<iterSched.numIteratorJobs; ++i)
403 	num_param_sets += prpResults[i].size();
404       parameterSets.resize(num_param_sets);
405       for (i=0; i<iterSched.numIteratorJobs; ++i) {
406 	const PRPArray& prp_results_i = prpResults[i];
407 	num_prp_i = prp_results_i.size();
408 	for (j=0; j<num_prp_i; ++j, ++cntr)
409 	  parameterSets[cntr] = prp_results_i[j].variables();
410       }
411       // migrate results among procs as required for parallel scheduling, e.g.,
412       // from multiple single-point iterators to a single multi-point iterator
413       // > for dedicated master scheduling, all results data resides on the
414       //   dedicated master and no additional migration is required.
415       // > for peer static scheduling, the full parameterSets array needs to be
416       //   propagated back to peers 2 though n (like an All-Reduce, except that
417       //   IteratorScheduler::static_schedule_iterators() enforces reduction to
418       //   peer 1 and the code below enforces repropagation from 1 to 2-n).
419       if (iterSched.iteratorScheduling == PEER_SCHEDULING &&
420 	  iterSched.numIteratorServers > 1) {
421 	if (server_id == 1) { // send complete list
422 	  MPIPackBuffer send_buffer;
423 	  send_buffer << parameterSets;
424 	  int buffer_len = send_buffer.size();
425 	  parallelLib.bcast_hs(buffer_len, mi_pl);
426 	  parallelLib.bcast_hs(send_buffer, mi_pl);
427 	}
428 	else { // replace partial list
429 	  int buffer_len;
430 	  parallelLib.bcast_hs(buffer_len, mi_pl);
431 	  MPIUnpackBuffer recv_buffer(buffer_len);
432 	  parallelLib.bcast_hs(recv_buffer, mi_pl);
433 	  recv_buffer >> parameterSets;
434 	}
435       }
436     }
437   }
438 }
439 
440 
441 /** In the sequential adaptive case, there is interference with the
442     iterators through the use of the ++ overloaded operator.  iterator++ runs
443     the iterator for one cycle, after which a progress_metric is computed.
444     This progress metric is used to dictate method switching instead of
445     each iterator's internal convergence criteria.  Status: incomplete. */
run_sequential_adaptive()446 void SeqHybridMetaIterator::run_sequential_adaptive()
447 {
448   // NOTE 1: The case where the iterator's internal convergence criteria are
449   // satisfied BEFORE the progress_metric must be either handled or prevented.
450 
451   // NOTE 2: Parallel iterator scheduling is not currently supported (and this
452   // code will fail if non-default iterator servers or scheduling is specified).
453 
454   size_t num_iterators = methodStrings.size();
455   int server_id =  iterSched.iteratorServerId;
456   bool    rank0 = (iterSched.iteratorCommRank == 0);
457   Real progress_metric = 1.0;
458   for (seqCount=0; seqCount<num_iterators; seqCount++) {
459 
460     // TO DO: don't run on ded master (see NOTE 2 above)
461     //if (server_id) {
462 
463     Iterator& curr_iterator = selectedIterators[seqCount];
464 
465     // For graphics data, limit to iterator server comm leaders; this is
466     // further segregated within initialize_graphics(): all iterator masters
467     // stream tabular data, but only server 1 generates a graphics window.
468     if (rank0 && server_id > 0 && server_id <= iterSched.numIteratorServers)
469       curr_iterator.initialize_graphics(server_id);
470 
471     if (summaryOutputFlag)
472       Cout << "\n>>>>> Running adaptive Sequential Hybrid with iterator "
473 	   << methodStrings[seqCount] << '\n';
474 
475     curr_iterator.initialize_run();
476     while (progress_metric >= progressThreshold) {
477       //++selectedIterators[seqCount];
478       const Response& resp_star = curr_iterator.response_results();
479       //progress_metric = compute_progress(resp_star);
480     }
481     curr_iterator.finalize_run();
482 
483     if (summaryOutputFlag)
484       Cout << "\n<<<<< Iterator " << methodStrings[seqCount] << " completed."
485 	   << "  Progress metric has fallen below threshold.\n";
486 
487     // Set the starting point for the next iterator.
488     if (seqCount+1 < num_iterators) {//prevent index out of range on last pass
489       // Get best pt. from completed iteration.
490       Variables vars_star = curr_iterator.variables_results();
491       // Set best pt. as starting point for subsequent iterator
492       selectedModels[seqCount+1].active_variables(vars_star);
493     }
494 
495     // Send the termination message to the servers for this iterator/model
496     selectedModels[seqCount].stop_servers();
497   }
498 }
499 
500 
501 void SeqHybridMetaIterator::
update_local_results(PRPArray & prp_results,int job_id)502 update_local_results(PRPArray& prp_results, int job_id)
503 {
504   Iterator& curr_iterator = selectedIterators[seqCount];
505   Model&    curr_model    = (selectedModels.empty()) ?
506     iteratedModel : selectedModels[seqCount];
507   // Analyzers do not currently support returns_multiple_points() since the
508   // distinction between Hybrid sampling and Multistart sampling is that
509   // the former performs fn evals and processes the data (and current
510   // implementations of update_best() only log a single best point).
511   if (curr_iterator.returns_multiple_points()) {
512     const VariablesArray& vars_results
513       = curr_iterator.variables_array_results();
514     const ResponseArray& resp_results = curr_iterator.response_array_results();
515     // workaround: some methods define vars_results, but not resp_results
516     size_t num_vars_results = vars_results.size(),
517            num_resp_results = resp_results.size(),
518            num_results      = std::max(num_vars_results, num_resp_results);
519     prp_results.resize(num_results);
520     Variables dummy_vars; Response dummy_resp;
521     for (size_t i=0; i<num_results; ++i) {
522       const Variables& vars = (num_vars_results) ? vars_results[i] : dummy_vars;
523       const Response&  resp = (num_resp_results) ? resp_results[i] : dummy_resp;
524       // need a deep copy for case where multiple instances of
525       // best{Variables,Response}Array will be assimilated
526       prp_results[i] = ParamResponsePair(vars, curr_model.interface_id(),
527 					 resp, job_id);
528     }
529   }
530   else {
531     // need a deep copy for case where multiple instances of
532     // best{Variables,Response}Array.front() will be assimilated
533     prp_results.resize(1);
534     prp_results[0] = ParamResponsePair(curr_iterator.variables_results(),
535 				       curr_model.interface_id(),
536 				       curr_iterator.response_results(),job_id);
537   }
538 }
539 
declare_sources()540 void SeqHybridMetaIterator::declare_sources() {
541   for(const auto & si : selectedIterators)
542     evaluationsDB.declare_source(method_id(), "metaiterator",
543         si.method_id(), "iterator");
544 }
545 
print_results(std::ostream & s,short results_state)546 void SeqHybridMetaIterator::print_results(std::ostream& s, short results_state)
547 {
548   // provide a final summary in cases where the default iterator output
549   // is insufficient
550   if (iterSched.messagePass) {// || numIteratorJobs > 1
551     size_t i, j, cntr = 0, num_prp_res = prpResults.size(), num_prp_i;
552     s << "\n<<<<< Sequential hybrid final solution sets:\n";
553     for (i=0; i<num_prp_res; ++i) {
554       const PRPArray& prp_i = prpResults[i];
555       num_prp_i = prp_i.size();
556       for (j=0; j<num_prp_i; ++j, ++cntr) {
557 	const Variables& vars = prp_i[j].variables();
558 	const Response&  resp = prp_i[j].response();
559 	if (!vars.is_null())
560 	  s << "<<<<< Best parameters          (set " << cntr+1 << ") =\n"
561 	    << vars;
562 	if (!resp.is_null())
563 	  s << "<<<<< Best response functions  (set " << cntr+1 << ") =\n"
564 	    << resp.function_values();
565       }
566     }
567   }
568 }
569 
570 } // namespace Dakota
571