1 /* _______________________________________________________________________
2
3 DAKOTA: Design Analysis Kit for Optimization and Terascale Applications
4 Copyright 2014-2020 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
5 This software is distributed under the GNU Lesser General Public License.
6 For more information, see the README file in the top Dakota directory.
7 _______________________________________________________________________ */
8
9 //- Class: SeqHybridMetaIterator
10 //- Description: A hybrid meta-iterator that sequentially invokes several
11 //- methods, initializing subsequent iterators with prior results
12 //- Owner: Mike Eldred
13 //- Checked by:
14 //- Version: $Id: SeqHybridMetaIterator.hpp 7029 2010-10-22 00:17:02Z mseldre $
15
16 #ifndef SEQ_HYBRID_META_ITERATOR_H
17 #define SEQ_HYBRID_META_ITERATOR_H
18
19 #include "MetaIterator.hpp"
20
21
22 namespace Dakota {
23
24
25 /// Method for sequential hybrid iteration using multiple
26 /// optimization and nonlinear least squares methods on multiple
27 /// models of varying fidelity.
28
29 /** Sequential hybrid meta-iteration supports two approaches: (1) the
30 non-adaptive sequential hybrid runs one method to completion,
31 passes its best results as the starting point for a subsequent
32 method, and continues this succession until all methods have been
33 executed (the stopping rules are controlled internally by each
34 iterator), and (2) the adaptive sequential hybrid uses adaptive
35 stopping rules for the iterators that are controlled externally by
36 this method. Any iterator may be used so long as it defines the
37 notion of a final solution which can be passed as starting data
38 for subsequent iterators. */
39
40 class SeqHybridMetaIterator: public MetaIterator
41 {
42 //
43 //- Heading: Friends
44 //
45
46 /// protect scheduler callback functions from general access
47 friend class IteratorScheduler;
48
49 public:
50
51 //
52 //- Heading: Constructors and destructor
53 //
54
55 /// standard constructor
56 SeqHybridMetaIterator(ProblemDescDB& problem_db);
57 /// alternate constructor
58 SeqHybridMetaIterator(ProblemDescDB& problem_db, Model& model);
59 /// destructor
60 ~SeqHybridMetaIterator();
61
62 protected:
63
64 //
65 //- Heading: Virtual function redefinitions
66 //
67
68 /// Performs the hybrid iteration by executing a sequence of iterators,
69 /// using a similar sequence of models that may vary in fidelity
70 void core_run();
71 void print_results(std::ostream& s, short results_state = FINAL_RESULTS);
72
73 void derived_init_communicators(ParLevLIter pl_iter);
74 void derived_set_communicators(ParLevLIter pl_iter);
75 void derived_free_communicators(ParLevLIter pl_iter);
76
77 IntIntPair estimate_partition_bounds();
78
79 /// return the final solution from selectedIterators (variables)
80 const Variables& variables_results() const;
81 /// return the final solution from selectedIterators (response)
82 const Response& response_results() const;
83
84 void initialize_iterator(int job_index);
85 void pack_parameters_buffer(MPIPackBuffer& send_buffer, int job_index);
86 void unpack_parameters_initialize(MPIUnpackBuffer& recv_buffer,
87 int job_index);
88 void pack_results_buffer(MPIPackBuffer& send_buffer, int job_index);
89 void unpack_results_buffer(MPIUnpackBuffer& recv_buffer, int job_index);
90 void update_local_results(int job_index);
91
92 void declare_sources();
93 private:
94
95 //
96 //- Heading: Convenience member functions
97 //
98
99 void run_sequential(); ///< run a sequential hybrid
100 void run_sequential_adaptive(); ///< run a sequential adaptive hybrid
101
102 /// convert num_sets and job_index into a start_index and job_size for
103 /// extraction from parameterSets
104 void partition_sets(size_t num_sets, int job_index, size_t& start_index,
105 size_t& job_size);
106 /// extract partial_param_sets from parameterSets based on job_index
107 void extract_parameter_sets(int job_index,VariablesArray& partial_param_sets);
108 /// update the partial set of final results from the local iterator execution
109 void update_local_results(PRPArray& prp_results, int job_id);
110 /// called by unpack_parameters_initialize(MPIUnpackBuffer) and
111 /// initialize_iterator(int) to update the active Model and Iterator
112 void initialize_iterator(const VariablesArray& param_sets);
113
114 //
115 //- Heading: Data members
116 //
117
118 String seqHybridType; ///< empty (default) or "adaptive"
119
120 /// the list of method pointer or method name identifiers
121 StringArray methodStrings;
122 /// the list of model pointer identifiers for method identification by name
123 StringArray modelStrings;
124
125 /// use of lightweight Iterator construction by name
126 bool lightwtMethodCtor;
127 /// use of constructor that enforces use of a single passed Model
128 bool singlePassedModel;
129
130 /// the set of iterators, one for each entry in methodStrings
131 IteratorArray selectedIterators;
132 /// the set of models, one for each iterator (if not lightweight construction)
133 ModelArray selectedModels;
134
135 /// hybrid sequence counter: 0 to numIterators-1
136 size_t seqCount;
137
138 /// when the progress metric falls below this threshold, the
139 /// sequential adaptive hybrid switches to the next method
140 Real progressThreshold;
141
142 /// 2-D array of results corresponding to numIteratorJobs, one set
143 /// of results per job (iterators may return multiple final solutions)
144 PRP2DArray prpResults;
145 /// 1-D array of variable starting points for the iterator jobs
146 VariablesArray parameterSets;
147 };
148
149
estimate_partition_bounds()150 inline IntIntPair SeqHybridMetaIterator::estimate_partition_bounds()
151 {
152 int min_procs = INT_MAX, max_procs = 0; IntIntPair min_max;
153 size_t i, num_meth = selectedIterators.size(); String empty_str;
154 for (i=0; i<num_meth; ++i) {
155 Model& model = (singlePassedModel) ? iteratedModel : selectedModels[i];
156 if (lightwtMethodCtor)
157 iterSched.construct_sub_iterator(probDescDB, selectedIterators[i], model,
158 empty_str, methodStrings[i], // ptr, name
159 modelStrings[i]); // ptr
160 else
161 iterSched.construct_sub_iterator(probDescDB, selectedIterators[i], model,
162 methodStrings[i], empty_str, empty_str);
163
164 min_max = selectedIterators[i].estimate_partition_bounds();
165 if (min_max.first < min_procs) min_procs = min_max.first;
166 if (min_max.second > max_procs) max_procs = min_max.second;
167 }
168
169 // now apply scheduling data for this level (recursion is complete)
170 min_max.first = ProblemDescDB::min_procs_per_level(min_procs,
171 iterSched.procsPerIterator, iterSched.numIteratorServers);
172 min_max.second = ProblemDescDB::max_procs_per_level(max_procs,
173 iterSched.procsPerIterator, iterSched.numIteratorServers,
174 iterSched.iteratorScheduling, 1, false, maxIteratorConcurrency);
175 return min_max;
176 }
177
178
variables_results() const179 inline const Variables& SeqHybridMetaIterator::variables_results() const
180 { return selectedIterators[methodStrings.size()-1].variables_results(); }
181
182
response_results() const183 inline const Response& SeqHybridMetaIterator::response_results() const
184 { return selectedIterators[methodStrings.size()-1].response_results(); }
185
186
187 inline void SeqHybridMetaIterator::
partition_sets(size_t num_sets,int job_index,size_t & start_index,size_t & job_size)188 partition_sets(size_t num_sets, int job_index, size_t& start_index,
189 size_t& job_size)
190 {
191 size_t set_remainder = num_sets % iterSched.numIteratorJobs;
192 job_size = num_sets / iterSched.numIteratorJobs;
193 start_index = job_index * job_size;
194 if (set_remainder) { // allocate 1 addtnl job to first set_remainder jobs
195 if (set_remainder > job_index) { // this job is offset and grown
196 start_index += job_index;
197 ++job_size;
198 }
199 else // this job is only offset
200 start_index += set_remainder;
201 }
202 }
203
204
205 /** This convenience function is executed on an iterator master
206 (static scheduling) or a meta-iterator master (self scheduling) at
207 run initialization time and has access to the full parameterSets
208 array (this is All-Reduced for all peers at the completion of each
209 cycle in run_sequential()). */
210 inline void SeqHybridMetaIterator::
extract_parameter_sets(int job_index,VariablesArray & partial_param_sets)211 extract_parameter_sets(int job_index, VariablesArray& partial_param_sets)
212 {
213 size_t start_index, job_size;
214 partition_sets(parameterSets.size(), job_index, start_index, job_size);
215 if (partial_param_sets.size() != job_size)
216 partial_param_sets.resize(job_size);
217 for (size_t i=0; i<job_size; ++i)
218 partial_param_sets[i] = parameterSets[start_index+i];
219 }
220
221
update_local_results(int job_index)222 inline void SeqHybridMetaIterator::update_local_results(int job_index)
223 { update_local_results(prpResults[job_index], job_index+1); }
224
225
226 inline void SeqHybridMetaIterator::
initialize_iterator(const VariablesArray & param_sets)227 initialize_iterator(const VariablesArray& param_sets)
228 {
229 // BMA TODO: This mixed use of pushing data at the Iterator
230 // vs. Models likely indicates we should standardize on pushing to
231 // the sub-iterators instead of maintaining a handle to the Model.
232 // The Iterator would then have to manage any recursive updates of
233 // its underlying models. In particular, this could break if the
234 // Models associated with two Iterators have difference scaling.
235
236 // Note: in current usage, we update an iterator with either:
237 // > 1 set from parameterSets (numIteratorJobs == parameterSets.size())
238 // > all of parameterSets (numIteratorJobs == 1)
239 size_t num_param_sets = param_sets.size();
240 if (num_param_sets == 1)
241 selectedModels[seqCount].active_variables(param_sets[0]);
242 else if (selectedIterators[seqCount].accepts_multiple_points())
243 selectedIterators[seqCount].initial_points(param_sets);
244 else {
245 std::cerr << "Error: bad parameter sets array in SeqHybridMetaIterator::"
246 << "initialize_iterator()" << std::endl;
247 abort_handler(-1);
248 }
249 }
250
251
initialize_iterator(int job_index)252 inline void SeqHybridMetaIterator::initialize_iterator(int job_index)
253 {
254 if (seqCount) { // else default initialization is used
255 VariablesArray partial_param_sets;
256 extract_parameter_sets(job_index, partial_param_sets);
257 initialize_iterator(partial_param_sets);
258 }
259 }
260
261
262 inline void SeqHybridMetaIterator::
pack_parameters_buffer(MPIPackBuffer & send_buffer,int job_index)263 pack_parameters_buffer(MPIPackBuffer& send_buffer, int job_index)
264 {
265 if (seqCount) { // else default initialization is used
266 VariablesArray partial_param_sets;
267 extract_parameter_sets(job_index, partial_param_sets);
268 send_buffer << partial_param_sets;
269 }
270 }
271
272
273 inline void SeqHybridMetaIterator::
unpack_parameters_initialize(MPIUnpackBuffer & recv_buffer,int job_index)274 unpack_parameters_initialize(MPIUnpackBuffer& recv_buffer, int job_index)
275 {
276 if (seqCount) { // else default initialization is used
277 VariablesArray param_sets;
278 recv_buffer >> param_sets; // job_index can be ignored
279 initialize_iterator(param_sets);
280 }
281 }
282
283
284 inline void SeqHybridMetaIterator::
pack_results_buffer(MPIPackBuffer & send_buffer,int job_index)285 pack_results_buffer(MPIPackBuffer& send_buffer, int job_index)
286 { send_buffer << prpResults[job_index]; }
287
288
289 inline void SeqHybridMetaIterator::
unpack_results_buffer(MPIUnpackBuffer & recv_buffer,int job_index)290 unpack_results_buffer(MPIUnpackBuffer& recv_buffer, int job_index)
291 { recv_buffer >> prpResults[job_index]; }
292
293 } // namespace Dakota
294
295 #endif
296