1 /* _______________________________________________________________________
2
3 DAKOTA: Design Analysis Kit for Optimization and Terascale Applications
4 Copyright 2014-2020 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
5 This software is distributed under the GNU Lesser General Public License.
6 For more information, see the README file in the top Dakota directory.
7 _______________________________________________________________________ */
8
9 //- Class: CONMINOptimizer
10 //- Description: Implementation code for the CONMINOptimizer class
11 //- Owner: Tony Giunta
12 //- Checked by:
13
14 #include "DakotaModel.hpp"
15 #include "DakotaResponse.hpp"
16 #include "CONMINOptimizer.hpp"
17 #include "ProblemDescDB.hpp"
18
19 static const char rcsId[]="@(#) $Id: CONMINOptimizer.cpp 7029 2010-10-22 00:17:02Z mseldre $";
20
21 // Prototype for a direct call to the F77 CONMIN code
22 #define CONMIN_F77 F77_FUNC(conmin,CONMIN)
23 extern "C" void CONMIN_F77(double*, double*, double*, double*, double*, double*,
24 double*, double*, double*, double*, double*, double*,
25 int*, int*, int*, int&, int&, int&, int&, int&,
26 double&, double&, double&, double&, double&, double&,
27 double&, double&, double&, double&, double&, double&,
28 int&, int&, int&, int&, int&, int&, int&, int&,
29 int&, int&, int&, int&, int&, int&, int&);
30
31 using namespace std;
32
33 namespace Dakota {
34
35
CONMINOptimizer(ProblemDescDB & problem_db,Model & model)36 CONMINOptimizer::CONMINOptimizer(ProblemDescDB& problem_db, Model& model):
37 Optimizer(problem_db, model, std::shared_ptr<TraitsBase>(new CONMINTraits()))
38 {
39 // If speculativeFlag is set with vendor numerical_gradients, output a warning
40 if (speculativeFlag && vendorNumericalGradFlag)
41 Cerr << "\nWarning: speculative method specification is ignored for"
42 << "\n vendor numerical gradients.\n\n";
43
44 initialize(); // convenience fn for shared ctor code
45 }
46
47
CONMINOptimizer(const String & method_string,Model & model)48 CONMINOptimizer::CONMINOptimizer(const String& method_string, Model& model):
49 Optimizer(method_string_to_enum(method_string), model, std::shared_ptr<TraitsBase>(new CONMINTraits()))
50 { initialize(); } // convenience fn for shared ctor code
51
52
initialize()53 void CONMINOptimizer::initialize()
54 {
55 // Prevent nesting of an instance of a Fortran iterator within another
56 // instance of the same iterator (which would result in data clashes since
57 // Fortran does not support object independence). Recurse through all
58 // sub-models and test each sub-iterator for CONMIN presence.
59 // Note: This check is performed for DOT, CONMIN, and SOLBase, but not
60 // for LHS since it is only active in pre-processing.
61 Iterator sub_iterator = iteratedModel.subordinate_iterator();
62 if (!sub_iterator.is_null() &&
63 ( sub_iterator.method_name() == CONMIN_FRCG ||
64 sub_iterator.method_name() == CONMIN_MFD ||
65 sub_iterator.uses_method() == CONMIN_FRCG ||
66 sub_iterator.uses_method() == CONMIN_MFD ) )
67 sub_iterator.method_recourse();
68 ModelList& sub_models = iteratedModel.subordinate_models();
69 for (ModelLIter ml_iter = sub_models.begin();
70 ml_iter != sub_models.end(); ml_iter++) {
71 sub_iterator = ml_iter->subordinate_iterator();
72 if (!sub_iterator.is_null() &&
73 ( sub_iterator.method_name() == CONMIN_FRCG ||
74 sub_iterator.method_name() == CONMIN_MFD ||
75 sub_iterator.uses_method() == CONMIN_FRCG ||
76 sub_iterator.uses_method() == CONMIN_MFD ) )
77 sub_iterator.method_recourse();
78 }
79
80 // Initialize CONMIN specific data
81 NFDG = 0; // default finite difference flag
82 IPRINT = 1; // default flag to control amount of output info
83 ITMAX = 100; // default max number of iterations
84 FDCH = 1.0e-5; // default relative finite difference step size
85 FDCHM = 1.0e-5; // default absolute finite difference step size
86 CT = -0.1; // default constraint thickness tolerance
87 // (for determining active/inactive constraint status)
88 CTMIN = 0.001; // default absolute constraint tolerance
89 // (note: the CONMIN manual default is 0.004)
90 CTL = -0.01; // default side constraint thickness tolerance (see CT)
91 CTLMIN = 0.001; // default absolute side constraint tolerance
92 DELFUN = 1.0e-7; // default minimum relative change in the objective
93 // function needed for convergence
94 DABFUN = 1.0e-7; // default minimum absolute change in the objective
95 // function needed for convergence
96
97 conminInfo = 0; // Must be set to 0 before calling CONMIN
98 ITMAX = maxIterations;
99
100 // Set the print control flag
101 if (outputLevel > NORMAL_OUTPUT) {
102 IPRINT = printControl = 4;
103 Cout << "CONMIN print control = " << printControl << endl;
104 }
105 else
106 IPRINT = printControl = 2;
107
108 // assigns a nondefault constraint tolerance if a valid value has been
109 // set in dakota.in; otherwise use CONMIN default.
110 if (constraintTol > 0.0) {
111 CTMIN = CTLMIN = constraintTol;
112 if (outputLevel > QUIET_OUTPUT)
113 Cout << "constraint violation tolerance = " << constraintTol << '\n';
114 }
115
116 // convergenceTol is an optional parameter in dakota.input.nspec, but
117 // defining our own default (in the DataMethod constructor) and
118 // always assigning it applies some consistency across methods.
119 // Therefore, the CONMIN default is not used.
120 DELFUN = DABFUN = convergenceTol; // needed in CONMIN
121
122 // Default CONMIN gradients = numerical;forward;vendor setting.
123 const String& grad_type = iteratedModel.gradient_type();
124 const String& method_src = iteratedModel.method_source();
125 const String& interval_type = iteratedModel.interval_type();
126 if ( grad_type == "analytic" || grad_type == "mixed" ||
127 ( grad_type == "numerical" && method_src == "dakota" ) )
128 // Set NFDG=1 before calling CONMIN. This invokes the
129 // user-supplied gradient mode which DAKOTA uses for analytic,
130 // dakota numerical, or mixed analytic/dakota numerical gradients.
131 NFDG=1;
132 else if (grad_type == "none") {
133 Cerr << "\nError: gradient type = none is invalid with CONMIN.\n"
134 << "Please select numerical, analytic, or mixed gradients." << endl;
135 abort_handler(-1);
136 }
137 else if (interval_type == "central") {
138 Cerr << "\nFinite Difference Type = 'central' is invalid with CONMIN.\n"
139 << "Forward difference is only available internal to CONMIN." << endl;
140 abort_handler(-1);
141 }
142 else { // Vendor numerical gradients with forward differences
143 NFDG = 0; // use CONMIN's default internal forward difference method
144
145 Real fd_grad_ss = iteratedModel.fd_gradient_step_size()[0];
146
147 // CONMIN's forward differencing uses fdss*X_i as one would expect
148 FDCH = fd_grad_ss;
149
150 // for FDCHM (minimum delta), use 2 orders of magnitude smaller than fdss to
151 // be consistent with Model::estimate_derivatives():
152 FDCHM = fd_grad_ss*.01;
153 }
154 }
155
156
~CONMINOptimizer()157 CONMINOptimizer::~CONMINOptimizer()
158 { }
159
160
allocate_constraints()161 void CONMINOptimizer::allocate_constraints()
162 {
163 // CONMIN handles all constraints as 1-sided inequalities. Compute the number
164 // of 1-sided inequalities to pass to CONMIN (numConminConstr) as well as the
165 // mappings (indices, multipliers, offsets) between the DAKOTA constraints and
166 // the CONMIN constraints. TO DO: support for automatic constraint scaling.
167 //
168 // ***Note: CONMIN has difficulty when handling a nonlinear equality
169 // constraint as two oppositely-signed inequality constraints (CONMIN's MFD
170 // algorithm cannot handle the nonlinearity created by the two inequalities).
171 // This is the reason behind the Modified MFD in DOT. See Vanderplaats' book,
172 // Numer. Opt. Techniques for Eng. Design, for additional details.
173 size_t
174 num_nln_ineq = iteratedModel.num_nonlinear_ineq_constraints(),
175 num_nln_eq = iteratedModel.num_nonlinear_eq_constraints(),
176 num_lin_ineq = iteratedModel.num_linear_ineq_constraints(),
177 num_lin_eq = iteratedModel.num_linear_eq_constraints();
178 numConminNlnConstr = 2*num_nln_eq;
179 numConminNlnConstr += (int)constraintMapOffsets.size();
180
181 // Augment nonlinear inequality maps (set in Optimizer::configure_constraint_maps) with additional constraint info ...
182 configure_equality_constraints(CONSTRAINT_TYPE::NONLINEAR, num_nln_ineq);
183 numConminLinConstr = 2*num_lin_eq;
184 numConminLinConstr += configure_inequality_constraints(CONSTRAINT_TYPE::LINEAR);
185 configure_equality_constraints(CONSTRAINT_TYPE::LINEAR, num_lin_ineq);
186
187 numConminConstr = numConminNlnConstr + numConminLinConstr;
188
189 // Check method setting versus constraints
190 if (methodName == CONMIN_MFD && !numConminConstr) {
191 Cerr << "\nWarning: for no constraints, conmin_mfd request will be changed"
192 << "\n to conmin_frcg.\n\n";
193 methodName = CONMIN_FRCG; // for output header/footer
194 }
195 else if (methodName == CONMIN_FRCG && numConminConstr) {
196 Cerr << "\nWarning: for constrained optimization, conmin_frcg request will"
197 << "\n be changed to conmin_mfd.\n\n";
198 methodName = CONMIN_MFD; // for output header/footer
199 }
200 }
201
202
allocate_workspace()203 void CONMINOptimizer::allocate_workspace()
204 {
205 // Compute sizes of arrays and matrices needed for CONMIN.
206 // These sizes are listed in the 1978 Addendum to the CONMIN User's Manual.
207 N1 = numContinuousVars + 2;
208 N2 = numContinuousVars*2 + numConminConstr;
209 // N3 = 1 + an estimate of the max # of active constraints,
210 // including side constraints. The largest N3 can be is
211 // N3 = 1 + numConminConstr + numContinuousVars
212 // (i.e., there are, at most, numContinuousVars active side constraints)
213 // NOTE: with a bad user input of lowerBnd==upperBnd, this could break down.
214 N3 = 1 + numConminConstr + numContinuousVars;
215 N4 = (N3 >= numContinuousVars) ? N3 : numContinuousVars; // always N3
216 N5 = 2*N4;
217
218 // Declare arrays needed for CONMIN. The sizes for these arrays are
219 // listed in the 1978 Addendum to the CONMIN User's Manual
220 conminDesVars = new Real[N1];
221 conminLowerBnds = new Real[N1];
222 conminUpperBnds = new Real[N1];
223 S = new Real[N1];
224 G1 = new Real[N2];
225 G2 = new Real[N2];
226 B = new Real[N3*N3];
227 C = new Real[N4];
228 MS1 = new int[N5];
229 SCAL = new Real[N1];
230 DF = new Real[N1];
231 A = new Real[N1*N3];
232 ISC = new int[N2];
233 IC = new int[N3];
234
235 // Size the array that holds the constraint values.
236 // For CONMIN the minimum size of the vector of constraints is "N2"
237 constraintValues.resize(N2);
238 }
239
240
deallocate_workspace()241 void CONMINOptimizer::deallocate_workspace()
242 {
243 delete [] conminDesVars;
244 delete [] conminLowerBnds;
245 delete [] conminUpperBnds;
246 delete [] S;
247 delete [] G1;
248 delete [] G2;
249 delete [] B;
250 delete [] C;
251 delete [] MS1;
252 delete [] SCAL;
253 delete [] DF;
254 delete [] A;
255 delete [] ISC;
256 delete [] IC;
257 }
258
259
initialize_run()260 void CONMINOptimizer::initialize_run()
261 {
262 Optimizer::initialize_run();
263
264 // Allocate space for CONMIN arrays
265 allocate_constraints();
266 allocate_workspace();
267
268 // initialize the IC and ISC vectors
269 size_t i;
270 for (i=0; i<numConminConstr; i++)
271 IC[i] = ISC[i] = 0;
272
273 // Initialize CONMIN's local vars and bounds arrays
274 // Note: these are different than DAKOTA's local vars and bounds arrays
275 // because CONMIN uses arrays for vars and bounds that are larger than
276 // used by DAKOTA, i.e., DAKOTA's local_cdv has length numContinuousVars,
277 // and CONMIN's conminDesVars has length N1 = numContinuousVars+2
278 //
279 // copy DAKOTA arrays to CONMIN arrays and check for the existence of bounds.
280 const RealVector& local_cdv = iteratedModel.continuous_variables();
281 const RealVector& lower_bnds = iteratedModel.continuous_lower_bounds();
282 const RealVector& upper_bnds = iteratedModel.continuous_upper_bounds();
283 for (i=0; i<numContinuousVars; i++) {
284 conminDesVars[i] = local_cdv[i];
285 conminLowerBnds[i] = lower_bnds[i];
286 conminUpperBnds[i] = upper_bnds[i];
287 }
288 // Initialize array padding (N1 = numContinuousVars + 2).
289 for (i=numContinuousVars; i<N1; i++)
290 conminDesVars[i] = conminLowerBnds[i] = conminUpperBnds[i] = 0.;
291 }
292
293
core_run()294 void CONMINOptimizer::core_run()
295 {
296 size_t i, j, fn_eval_cntr;
297 int num_cv = numContinuousVars;
298
299 // Any MOO/NLS recasting is responsible for setting the scalar min/max
300 // sense within the recast.
301 const BoolDeque& max_sense = iteratedModel.primary_response_fn_sense();
302 bool max_flag = (!max_sense.empty() && max_sense[0]);
303
304 // Initialize variables internal to CONMIN
305 int NSIDE = 0; // flag for upper/lower var bounds: 1=bounds, 0=no bounds
306 // NSIDE must be set to 0 for unbounded since CONMIN cannot handle having
307 // upper/lower bounds set to +/-inf. Set NSIDE to 1 if bounds arrays have
308 // nondefault values.
309 for (i=0; i<numContinuousVars; i++) {
310 if (conminLowerBnds[i] > -bigRealBoundSize ||
311 conminUpperBnds[i] < bigRealBoundSize) {
312 NSIDE = 1;
313 break;
314 }
315 }
316 int ICNDIR = numContinuousVars+1; // conjugate direction restart parameter
317 int NSCAL = 0; // (unused) scaling flag
318 //int NACMX1 = N3; // estimate of 1+(max # of active constraints)
319 int LINOBJ = 0; // (unused) set to 1 if obj fcn is linear
320 int ITRM = 3; // number of consecutive iterations of less than DELFUN
321 // or DABFUN progress before optimization terminated
322 double THETA = 1.0; // mean value of "push-off factor" in mtd of feasible
323 // directions
324 //double PHI = 5.0; // "participation coefficient" to force an infeasible
325 // design to be feasible
326 double ALPHAX = 0.1; // 1-D search fractional change parameter
327 double ABOBJ1 = 0.1; // 1-D search fractional change parameter for 1st step
328
329 // variables for loop that calls CONMIN
330 int IGOTO = 0; // CONMIN internal optimization termination flag: 0=stop
331 int NAC; // number of active constraints computed in CONMIN
332 int INFOG; // (unused) flag that allows for non-essential function
333 // evaluations in FD gradients to be avoided
334 int ITER; // CONMIN internal iteration counter
335
336 // Initialize local vars and linear constraints
337 RealVector local_cdv(numContinuousVars);
338 size_t num_lin_ineq = iteratedModel.num_linear_ineq_constraints(),
339 num_lin_eq = iteratedModel.num_linear_eq_constraints();
340 const RealMatrix& lin_ineq_coeffs
341 = iteratedModel.linear_ineq_constraint_coeffs();
342 const RealMatrix& lin_eq_coeffs
343 = iteratedModel.linear_eq_constraint_coeffs();
344 const String& grad_type = iteratedModel.gradient_type();
345
346 // Start FOR loop to execute calls to CONMIN
347 for (fn_eval_cntr=1; fn_eval_cntr<=maxFunctionEvals; fn_eval_cntr++) {
348
349 CONMIN_F77(conminDesVars, conminLowerBnds, conminUpperBnds,
350 constraintValues.values(), SCAL, DF, A, S, G1, G2, B, C, ISC, IC,
351 MS1, N1, N2, N3, N4, N5, DELFUN, DABFUN, FDCH, FDCHM, CT, CTMIN,
352 CTL, CTLMIN, ALPHAX, ABOBJ1, THETA, objFnValue, num_cv,
353 numConminConstr, NSIDE, IPRINT, NFDG, NSCAL, LINOBJ, ITMAX, ITRM,
354 ICNDIR, IGOTO, NAC, conminInfo, INFOG, ITER);
355
356 if (IGOTO == 0) break; // CONMIN's flag that optimization is complete
357
358 // conminInfo=1: Line search/Finite differencing, eval obj & all constr fns.
359 // conminInfo=2: Evaluate gradients of objective & active constraints
360 if (conminInfo == 1) {
361 if (outputLevel > NORMAL_OUTPUT) // output info on CONMIN request
362 Cout << "\nCONMIN requests function values:";
363 if (speculativeFlag && !vendorNumericalGradFlag) {
364 if (outputLevel > NORMAL_OUTPUT)
365 Cout << "\nSpeculative optimization: evaluation augmented with "
366 << "speculative gradients.";
367 activeSet.request_values(3);
368 }
369 else
370 activeSet.request_values(1);
371 }
372 else if (conminInfo == 2) {
373 if (outputLevel > NORMAL_OUTPUT) { // output info on CONMIN request
374 if (grad_type == "numerical")
375 Cout << "\nCONMIN requests dakota-numerical gradients:";
376 else
377 Cout << "\nCONMIN requests analytic gradients:";
378 if (speculativeFlag && !vendorNumericalGradFlag)
379 Cout << "\nSpeculative optimization: retrieving gradients already "
380 << "evaluated from database.";
381 }
382 activeSet.request_values(0);
383 for (i=0; i<numObjectiveFns; i++)
384 activeSet.request_value(conminInfo, i); // objective function(s)
385 // CONMIN does not compute the number of active constraints and
386 // requires the user to do so. Store this value in CONMIN's variable
387 // NAC and store the indices of the constraints in array IC.
388 NAC = 0;
389 for (i=0; i<numConminConstr; i++)
390 if ( constraintValues[i] >= CT )
391 IC[NAC++] = i + 1; // The +1 is needed for F77 array indexing
392
393 // for each of the active constraints, assign a value to DAKOTA's
394 // active set vector
395 for (i=0; i<NAC; i++) {
396 // IC contains the active constraint #'s While CONMIN returns
397 // 1-based constraint id's, work in 0-based id's for indexing
398 // convenience.
399 size_t conmin_constr = IC[i] - 1; // (0-based)
400 if (conmin_constr < numConminNlnConstr) {
401 size_t dakota_constr = constraintMapIndices[conmin_constr];
402 activeSet.request_value(conminInfo, dakota_constr + numObjectiveFns);
403 // some DAKOTA equality and 2-sided inequality constraints may
404 // have their ASV assigned multiple times depending on which of
405 // CONMIN's 1-sided inequalities are active.
406 }
407 }
408 }
409
410 copy_data(conminDesVars, num_cv, local_cdv);
411 iteratedModel.continuous_variables(local_cdv);
412 iteratedModel.evaluate(activeSet);
413 const Response& local_response = iteratedModel.current_response();
414
415 // Populate proper data for input back to CONMIN through parameter list.
416 // This include gradient data, along with the number and indices of
417 // the active constraints.
418 if (conminInfo == 2) {
419 // Populate CONMIN's obj fcn gradient vector "DF"
420 const RealMatrix& local_fn_grads = local_response.function_gradients();
421 const int num_vars = local_fn_grads.numRows();
422 for (j=0; j<num_vars; ++j) // obj. fn. grad. always needed
423 DF[j] = (max_flag) ? -local_fn_grads(j, 0) : local_fn_grads(j, 0);
424 // Return the gradients of the active constraints in the matrix "A".
425 for (i=0; i<NAC; i++) {
426 // Populate CONMIN's active constraint gradient matrix "A". For some
427 // reason, CONMIN's A-matrix has a column length of N1 (and N1 =
428 // numContinuousVars+2).
429 size_t conmin_constr = IC[i]-1;
430 size_t dakota_constr = constraintMapIndices[conmin_constr];
431 if (conmin_constr < numConminNlnConstr) { // nonlinear ineq & eq
432 // gradients pick up multiplier mapping only (offsets drop out)
433 for (j=0; j<num_vars; ++j)
434 A[i*N1 + j] = constraintMapMultipliers[conmin_constr] *
435 local_fn_grads(j,dakota_constr+1);
436 }
437 else if (dakota_constr < num_lin_ineq) { // linear ineq
438 for (j=0; j<num_vars; ++j)
439 A[i*N1 + j] = constraintMapMultipliers[conmin_constr] *
440 lin_ineq_coeffs(dakota_constr,j);
441 }
442 else { // linear eq
443 size_t dakota_leq_constr = dakota_constr - num_lin_ineq;
444 for (j=0; j<num_vars; ++j)
445 A[i*N1 + j] = constraintMapMultipliers[conmin_constr] *
446 lin_eq_coeffs(dakota_leq_constr,j);
447 }
448 }
449 }
450 else {
451 // Get objective function and constraint values
452 // note: no active/inactive distinction needed with constraints
453 const RealVector& local_fn_vals = local_response.function_values();
454 objFnValue = (max_flag) ? -local_fn_vals[0] : local_fn_vals[0];
455 // CONMIN constraint requests must be mapped to DAKOTA constraints and
456 // offsets/multipliers must be applied.
457 size_t conmin_constr, dakota_constr;
458 for (conmin_constr=0; conmin_constr<numConminConstr; conmin_constr++) {
459 dakota_constr = constraintMapIndices[conmin_constr];
460 if (conmin_constr < numConminNlnConstr) // nonlinear ineq & eq
461 constraintValues[conmin_constr] =
462 constraintMapOffsets[conmin_constr] +
463 constraintMapMultipliers[conmin_constr] *
464 local_fn_vals[dakota_constr+1];
465 else {
466 Real Ax = 0.0;
467 if (dakota_constr < num_lin_ineq) { // linear ineq
468 for (j=0; j<numContinuousVars; j++)
469 Ax += lin_ineq_coeffs(dakota_constr,j) * local_cdv[j];
470 }
471 else { // linear eq
472 size_t dakota_leq_constr = dakota_constr - num_lin_ineq;
473 for (j=0; j<numContinuousVars; j++)
474 Ax += lin_eq_coeffs(dakota_leq_constr,j) * local_cdv[j];
475 }
476 constraintValues[conmin_constr] =
477 constraintMapOffsets[conmin_constr] +
478 constraintMapMultipliers[conmin_constr] * Ax;
479 }
480 }
481 }
482
483 } // end of FOR loop starting above call to CONMIN
484
485 if (fn_eval_cntr == maxFunctionEvals+1)
486 Cout << "Iteration terminated: max_function_evaluations limit has been "
487 << "met.\n";
488
489 // Set best variables and response for use by strategy level.
490 // If conminInfo = 0, then optimization was complete and CONMIN
491 // contains the best variables & responses data in the argument list
492 // data structures. If conminInfo is not 0, then there was an abort
493 // (e.g., maxFunctionEvals) and it is not clear how to return the
494 // best solution. For now, return the argument list data (as if
495 // conminInfo = 0) which should be the last evaluation (?).
496 copy_data(conminDesVars, num_cv, local_cdv);
497 bestVariablesArray.front().continuous_variables(local_cdv);
498 if (!localObjectiveRecast) { // else local_objective_recast_retrieve()
499 // used in Optimizer::post_run()
500 RealVector best_fns(numFunctions);
501 best_fns[0] = (max_flag) ? -objFnValue : objFnValue;
502 // NOTE: best_fn_vals[i] may be recomputed multiple times, but this
503 // should be OK so long as all of constraintValues is populated
504 // (no active set deletions).
505 for (size_t i=0; i<numConminNlnConstr; i++) {
506 size_t dakota_constr = constraintMapIndices[i];
507 // back out the offset and multiplier
508 best_fns[dakota_constr+1] = ( constraintValues[i] -
509 constraintMapOffsets[i] ) / constraintMapMultipliers[i];
510 }
511 bestResponseArray.front().function_values(best_fns);
512 }
513 deallocate_workspace();
514 }
515
516 } // namespace Dakota
517