1 /* _______________________________________________________________________
2
3 DAKOTA: Design Analysis Kit for Optimization and Terascale Applications
4 Copyright 2014-2020 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
5 This software is distributed under the GNU Lesser General Public License.
6 For more information, see the README file in the top Dakota directory.
7 _______________________________________________________________________ */
8
9 //- Class: rSQPOptimizer
10 //- Description: Implementation code for the rSQPOptimizer class
11 //- Owner: Roscoe Bartlett
12 //- Checked by:
13
14 #include "system_defs.h"
15 #include "rSQPOptimizer.H"
16 #include "ProblemDescDB.H"
17
18 #include "MoochoPack/configurations/MoochoSolver.hpp"
19 #include "DenseLinAlgPack/src/LinAlgOpPack.hpp"
20 #include "ThrowException.hpp"
21
22 static const char rcsId[]="@(#) $Id: rSQPOptimizer.C 7029 2010-10-22 00:17:02Z mseldre $";
23
24 namespace {
25
my_vec_view(Dakota::RealVector & v)26 DenseLinAlgPack::DVectorSlice my_vec_view( Dakota::RealVector& v )
27 {
28 return DenseLinAlgPack::DVectorSlice( &const_cast<Dakota::RealVector&>(v)[0], v.length() );
29 }
30
my_vec_view(const Dakota::RealVector & v)31 const DenseLinAlgPack::DVectorSlice my_vec_view( const Dakota::RealVector& v )
32 {
33 return DenseLinAlgPack::DVectorSlice( &const_cast<Dakota::RealVector&>(v)[0], v.length() );
34 }
35
36 } // end namespace
37
38 //
39 // NLPDakota
40 //
41
42 namespace NLPInterfacePack {
43
44 // Constructors / initializers
45
NLPDakota(Dakota::Model * model,rSQPOptimizer * dakota_rsqp_opt,size_type num_lin_eq,size_type num_nonlin_eq,size_type num_lin_ineq,size_type num_nonlin_ineq)46 NLPDakota::NLPDakota(
47 Dakota::Model *model
48 ,rSQPOptimizer *dakota_rsqp_opt
49 ,size_type num_lin_eq
50 ,size_type num_nonlin_eq
51 ,size_type num_lin_ineq
52 ,size_type num_nonlin_ineq
53 )
54 :model_(model)
55 ,dakota_rsqp_opt_(dakota_rsqp_opt)
56 ,multi_obj_weights_(dakota_rsqp_opt->primary_response_fn_weights())
57 ,num_lin_eq_(num_lin_eq)
58 ,num_nonlin_eq_(num_nonlin_eq)
59 ,num_lin_ineq_(num_lin_ineq)
60 ,num_nonlin_ineq_(num_nonlin_ineq)
61 ,is_initialized_(false)
62 ,n_orig_(0)
63 ,f_orig_updated_(false)
64 ,c_orig_updated_(false)
65 ,h_orig_updated_(false)
66 ,Gf_orig_updated_(false)
67 ,Gc_orig_updated_(false)
68 ,Gh_orig_updated_(false)
69 {
70 THROW_EXCEPTION(
71 model_ == NULL, std::logic_error
72 ,"NLPDakota::NLPDakota(model): Error, model can not be NULL!"
73 );
74 // Get a representation for an invalid value
75 typedef std::numeric_limits<value_type> nl_t;
76 invalid_func_value_ = nl_t::has_infinity
77 ? nl_t::infinity()
78 : ( nl_t::has_quiet_NaN
79 ? nl_t::quiet_NaN()
80 : nl_t::max() // If Inf and Nan are not supported just put in a large value
81 );
82 // Get the dummy value for failure
83 const Dakota::RealVector &failure_values = model_->problem_description_db().get_rdv("interface.failure_capture.recovery_fn_vals");
84 if(failure_values.length()) {
85 // Dakota will return failure_values in case the simulation failes
86 dakota_failed_value_ = failure_values[0]; // Just watch out for the first one
87 }
88 else { // Dakota will not return a failed value
89 dakota_failed_value_ = invalid_func_value_; // This should not be generated by accident
90 }
91 }
92
93 // Overridden public members from NLP
94
initialize(bool test_setup)95 void NLPDakota::initialize(bool test_setup)
96 {
97
98 if(is_initialized_) {
99 NLPSerialPreprocessExplJac::initialize(test_setup);
100 return;
101 }
102
103 //
104 // Get the dimensions of the problem
105 //
106
107 const int num_responces = model_->num_functions();
108 num_objectives_ = dakota_rsqp_opt_->num_objectives();
109
110 n_orig_ = model_->cv();
111 m_orig_ = num_lin_eq_ + num_nonlin_eq_;
112 mI_orig_ = num_lin_ineq_ + num_nonlin_ineq_;
113
114 //
115 // Get the data for the problem
116 //
117
118 // xinit, xl, xu
119 xinit_orig_ = my_vec_view(model_->continuous_variables());
120 xl_orig_ = my_vec_view(model_->continuous_lower_bounds());
121 xu_orig_ = my_vec_view(model_->continuous_upper_bounds());
122
123 has_var_bounds_ = true; // We will assume this here
124
125 // hl, hu
126 hl_orig_.resize(mI_orig_);
127 hu_orig_.resize(mI_orig_);
128 if(num_lin_ineq_) {
129 hl_orig_(1,num_lin_ineq_) = my_vec_view(dakota_rsqp_opt_->lin_ineq_lb());
130 hu_orig_(1,num_lin_ineq_) = my_vec_view(dakota_rsqp_opt_->lin_ineq_ub());
131 }
132 if(num_nonlin_ineq_) {
133 hl_orig_(num_lin_ineq_+1,mI_orig_) = my_vec_view(dakota_rsqp_opt_->nonlin_ineq_lb());
134 hu_orig_(num_lin_ineq_+1,mI_orig_) = my_vec_view(dakota_rsqp_opt_->nonlin_ineq_ub());
135 }
136
137 // Gc, Gh
138 Gc_orig_nz_ = n_orig_ * m_orig_;
139 Gh_orig_nz_ = n_orig_ * mI_orig_;
140
141 // Dakota interface stuff
142 dakota_x_.reshape(n_orig_);
143 dakota_asv_.reshape(num_responces);
144 dakota_functions_.reshape(num_responces);
145
146 is_initialized_ = true;
147 NLPSerialPreprocessExplJac::initialize(test_setup);
148
149 // throw std::logic_error("NLPDakota::initialize(): End, dam it!");
150
151 }
152
is_initialized()153 bool NLPDakota::is_initialized() const
154 {
155 return is_initialized_;
156 }
157
max_var_bounds_viol()158 value_type NLPDakota::max_var_bounds_viol() const
159 {
160 return 1.0; // I don't know what to assume?
161 }
162
set_multi_calc(bool multi_calc)163 void NLPDakota::set_multi_calc(bool multi_calc) const
164 {
165 multi_calc_ = multi_calc;
166 }
167
multi_calc()168 bool NLPDakota::multi_calc() const
169 {
170 return multi_calc_;
171 }
172
173 // Overridden from NLPVarReductPerm
174
nlp_selects_basis()175 bool NLPDakota::nlp_selects_basis() const
176 {
177 return false; // We don't know a basis
178 }
179
180 // Overridden protected methods from NLPSerialPreprocess
181
imp_nlp_has_changed()182 bool NLPDakota::imp_nlp_has_changed() const
183 {
184 return false; // There is no way to modify the nlp accept though the constructor
185 }
186
imp_n_orig()187 size_type NLPDakota::imp_n_orig() const
188 {
189 return n_orig_;
190 }
191
imp_m_orig()192 size_type NLPDakota::imp_m_orig() const
193 {
194 return m_orig_;
195 }
196
imp_mI_orig()197 size_type NLPDakota::imp_mI_orig() const
198 {
199 return mI_orig_;
200 }
201
imp_xinit_orig()202 const DVectorSlice NLPDakota::imp_xinit_orig() const
203 {
204 return xinit_orig_;
205 }
206
imp_has_var_bounds()207 bool NLPDakota::imp_has_var_bounds() const
208 {
209 return has_var_bounds_;
210 }
211
imp_xl_orig()212 const DVectorSlice NLPDakota::imp_xl_orig() const
213 {
214 return xl_orig_;
215 }
216
imp_xu_orig()217 const DVectorSlice NLPDakota::imp_xu_orig() const
218 {
219 return xu_orig_;
220 }
221
imp_hl_orig()222 const DVectorSlice NLPDakota::imp_hl_orig() const
223 {
224 return hl_orig_;
225 }
226
imp_hu_orig()227 const DVectorSlice NLPDakota::imp_hu_orig() const
228 {
229 return hu_orig_;
230 }
231
imp_calc_f_orig(const DVectorSlice & x_full,bool newx,const ZeroOrderInfoSerial & zero_order_info)232 void NLPDakota::imp_calc_f_orig(
233 const DVectorSlice &x_full
234 ,bool newx
235 ,const ZeroOrderInfoSerial &zero_order_info
236 ) const
237 {
238 imp_calc_point(CALC_F,x_full,newx,&zero_order_info,NULL,NULL);
239 }
240
imp_calc_c_orig(const DVectorSlice & x_full,bool newx,const ZeroOrderInfoSerial & zero_order_info)241 void NLPDakota::imp_calc_c_orig(
242 const DVectorSlice &x_full
243 ,bool newx
244 ,const ZeroOrderInfoSerial &zero_order_info
245 ) const
246 {
247 imp_calc_point(CALC_C,x_full,newx,&zero_order_info,NULL,NULL);
248 }
249
imp_calc_h_orig(const DVectorSlice & x_full,bool newx,const ZeroOrderInfoSerial & zero_order_info)250 void NLPDakota::imp_calc_h_orig(
251 const DVectorSlice &x_full
252 ,bool newx
253 ,const ZeroOrderInfoSerial &zero_order_info
254 ) const
255 {
256 imp_calc_point(CALC_H,x_full,newx,&zero_order_info,NULL,NULL);
257 }
258
imp_calc_Gf_orig(const DVectorSlice & x_full,bool newx,const ObjGradInfoSerial & obj_grad_info)259 void NLPDakota::imp_calc_Gf_orig(
260 const DVectorSlice &x_full
261 ,bool newx
262 ,const ObjGradInfoSerial &obj_grad_info
263 ) const
264 {
265 imp_calc_point(CALC_GF,x_full,newx,NULL,&obj_grad_info,NULL);
266 }
267
imp_report_orig_final_solution(const DVectorSlice & x_orig,const DVectorSlice * lambda_orig,const DVectorSlice * lambdaI_orig,const DVectorSlice * nu_orig,bool is_optimal)268 void NLPDakota::imp_report_orig_final_solution(
269 const DVectorSlice &x_orig
270 ,const DVectorSlice *lambda_orig
271 ,const DVectorSlice *lambdaI_orig
272 ,const DVectorSlice *nu_orig
273 ,bool is_optimal
274 ) const
275 {
276 // I don't know where to send this stuff? I could save it in a
277 // file or something for later use.
278 }
279
280 // Overridden protected methods from NLPSerialPreprocessExplJac
281
imp_Gc_nz_orig()282 size_type NLPDakota::imp_Gc_nz_orig() const
283 {
284 return Gc_orig_nz_;
285 }
286
imp_Gh_nz_orig()287 size_type NLPDakota::imp_Gh_nz_orig() const
288 {
289 return Gh_orig_nz_;
290 }
291
imp_calc_Gc_orig(const DVectorSlice & x_full,bool newx,const FirstOrderExplInfo & first_order_expl_info)292 void NLPDakota::imp_calc_Gc_orig(
293 const DVectorSlice& x_full, bool newx
294 , const FirstOrderExplInfo& first_order_expl_info
295 ) const
296 {
297 imp_calc_point(CALC_GC,x_full,newx,NULL,NULL,&first_order_expl_info);
298 }
299
imp_calc_Gh_orig(const DVectorSlice & x_full,bool newx,const FirstOrderExplInfo & first_order_expl_info)300 void NLPDakota::imp_calc_Gh_orig(
301 const DVectorSlice& x_full, bool newx
302 , const FirstOrderExplInfo& first_order_expl_info
303 ) const
304 {
305 imp_calc_point(CALC_GH,x_full,newx,NULL,NULL,&first_order_expl_info);
306 }
307
308 // Private member functions
309
assert_is_initialized()310 void NLPDakota::assert_is_initialized() const
311 {
312 THROW_EXCEPTION(
313 !is_initialized_, std::logic_error
314 ,"NLPDakota::assert_is_initialized(): Error, the object has not been initialized properly!"
315 );
316 }
317
imp_calc_point(ERequiredFunc required,const DVectorSlice & x_full,bool newx,const ZeroOrderInfoSerial * zero_order_info,const ObjGradInfoSerial * obj_grad_info,const FirstOrderExplInfo * first_order_expl_info)318 void NLPDakota::imp_calc_point(
319 ERequiredFunc required
320 ,const DVectorSlice &x_full
321 ,bool newx
322 ,const ZeroOrderInfoSerial *zero_order_info
323 ,const ObjGradInfoSerial *obj_grad_info
324 ,const FirstOrderExplInfo *first_order_expl_info
325 ) const
326 {
327 using LinAlgOpPack::Vp_StV;
328 typedef FirstOrderExplInfo::val_t G_val_t;
329 typedef FirstOrderExplInfo::ivect_t G_ivect_t;
330 typedef FirstOrderExplInfo::jvect_t G_jvect_t;
331
332 Cout << "\nEntering NLPDakota::imp_calc_point(...) ...\n";
333
334 // throw std::logic_error("NLPDakota::imp_calc_point(): End, dam it!");
335
336 if( newx ) {
337 f_orig_updated_ = false;
338 c_orig_updated_ = false;
339 h_orig_updated_ = false;
340 Gf_orig_updated_ = false;
341 Gc_orig_updated_ = false;
342 Gh_orig_updated_ = false;
343 my_vec_view(dakota_functions_) = 0.0;
344 }
345
346 const DVectorSlice x_orig = x_full(1,n_orig_);
347
348 //
349 // Determine which quantities to compute
350 //
351
352 const bool calc_f = ( required==CALC_F || multi_calc_ ) && !f_orig_updated_;
353 const bool calc_c = ( required==CALC_C || multi_calc_ ) && !c_orig_updated_;
354 const bool calc_h = ( required==CALC_H || multi_calc_ ) && !h_orig_updated_;
355 const bool calc_Gf = ( required==CALC_GF || ( (int)required >= (int)CALC_GF && multi_calc_ ) ) && !Gf_orig_updated_;
356 const bool calc_Gc = ( required==CALC_GC || ( (int)required >= (int)CALC_GF && multi_calc_ ) ) && !Gc_orig_updated_;
357 const bool calc_Gh = ( required==CALC_GH || ( (int)required >= (int)CALC_GF && multi_calc_ ) ) && !Gh_orig_updated_;
358
359 if( !calc_f && !calc_c && !calc_h && !calc_Gf && !calc_Gc && !calc_Gh )
360 return; // Everything is already computed at this point!
361
362 value_type *f = NULL;
363 DVector *c = NULL;
364 DVector *h = NULL;
365 DVector *Gf = NULL;
366 G_val_t *Gc_val = NULL;
367 G_ivect_t *Gc_ivect = NULL;
368 G_jvect_t *Gc_jvect = NULL;
369 G_val_t *Gh_val = NULL;
370 G_ivect_t *Gh_ivect = NULL;
371 G_jvect_t *Gh_jvect = NULL;
372
373 if(zero_order_info) {
374 if(calc_f) f = zero_order_info->f;
375 if(calc_c) c = zero_order_info->c;
376 if(calc_h) h = zero_order_info->h;
377 }
378 else if(obj_grad_info) {
379 if(calc_f) f = obj_grad_info->f;
380 if(calc_c) c = obj_grad_info->c;
381 if(calc_h) h = obj_grad_info->h;
382 if(calc_Gf) Gf = obj_grad_info->Gf;
383 }
384 else if(first_order_expl_info) {
385 if(calc_f) f = first_order_expl_info->f;
386 if(calc_c) c = first_order_expl_info->c;
387 if(calc_h) h = first_order_expl_info->h;
388 if(calc_Gf) Gf = first_order_expl_info->Gf;
389 if(calc_Gc) {
390 Gc_val = first_order_expl_info->Gc_val;
391 Gc_ivect = first_order_expl_info->Gc_ivect;
392 Gc_jvect = first_order_expl_info->Gc_jvect;
393 }
394 if(calc_Gh) {
395 Gh_val = first_order_expl_info->Gh_val;
396 Gh_ivect = first_order_expl_info->Gh_ivect;
397 Gh_jvect = first_order_expl_info->Gh_jvect;
398 }
399 }
400 else {
401 THROW_EXCEPTION(
402 !(zero_order_info || obj_grad_info || first_order_expl_info), std::logic_error
403 ,"NLPDakota::imp_calc_point(): Error!"
404 );
405 }
406
407 //
408 // Set the Dakota active-set vector.
409 //
410 // Dakota sorts the responce functions as general inequalities first
411 // and general equalities second.
412 //
413
414 std::fill_n( &dakota_asv_[0], dakota_asv_.length(), 0 );
415
416 const int num_obj_funcs = multi_obj_weights_.length();
417
418 if( calc_f || calc_Gf ) {
419 std::fill_n(
420 &dakota_asv_[0], num_obj_funcs
421 ,(calc_f ? 1 : 0) + (calc_Gf ? 2 : 0)
422 );
423 }
424 if( ( calc_c || calc_Gc ) && num_nonlin_eq_ ) {
425 std::fill_n(
426 &dakota_asv_[num_obj_funcs + num_nonlin_ineq_], num_nonlin_eq_
427 ,(calc_c ? 1 : 0) + (calc_Gc ? 2 : 0)
428 );
429 }
430 if( ( calc_h || calc_Gh ) && num_nonlin_ineq_ ) {
431 std::fill_n(
432 &dakota_asv_[num_obj_funcs], num_nonlin_ineq_
433 ,(calc_h ? 1 : 0) + (calc_Gh ? 2 : 0)
434 );
435 }
436
437 //
438 // Compute the nonlinear functions
439 //
440
441 my_vec_view(dakota_x_) = x_orig;
442 model_->continuous_variables(dakota_x_);
443 model_->compute_response(dakota_asv_);
444 const Dakota::Response& local_response = model_->current_response();
445 const Dakota::RealVector& local_function_values = local_response.function_values();
446 const Dakota::RealMatrix& local_function_gradients = local_response.function_gradients();
447
448 // Look for failed response
449 const bool eval_failed = (local_function_values[0] == dakota_failed_value_);
450
451 //
452 // Map to the NLP objects
453 //
454
455 if(!eval_failed) {
456
457 //
458 // The evaluation succeeded so copy the values
459 //
460
461 // f, Gf
462 if(calc_f) *f = 0.0;
463 if(calc_Gf) *Gf = 0.0;
464 for( int i = 0; i < num_obj_funcs; ++i ) {
465 if(calc_f) {
466 *f += multi_obj_weights_[i] * local_function_values[i];
467 dakota_functions_[i] = local_function_values[i]; // Save in dakota_functions
468 }
469 if(calc_Gf) {
470 Vp_StV(
471 &(*Gf)(1,n_orig_), multi_obj_weights_[i]
472 ,DVectorSlice(&const_cast<Dakota::RealMatrix&>(local_function_gradients)[i][0],n_orig_)
473 );
474 }
475 }
476 if(calc_Gf && Gf->dim() > n_orig_ )
477 (*Gf)(n_orig_+1,Gf->dim()) = 0.0; // Zero for slack variables
478
479 // c = [ A_lin_eq' * x - lin_eq_targ ]
480 // [ response(num_obj_funcs+num_nonlin_ineq+1:num_obj_funs+num_nonlin_ineq+num_nonlin_eq) - nonlin_eq_targ ]
481 if( calc_c ) {
482 if( num_lin_eq_ ) {
483 DVectorSlice c_lin = (*c)(1,num_lin_eq_);
484 V_mV( &c_lin, my_vec_view(dakota_rsqp_opt_->lin_eq_targ()) );
485 const Dakota::RealMatrix &lin_eq_jac = dakota_rsqp_opt_->lin_eq_jac();
486 for( int j = 0; j < num_lin_eq_; ++j ) {
487 for( int i = 0; i < n_orig_; ++i ) {
488 c_lin[j] += lin_eq_jac[j][i] * x_orig[i];
489 }
490 }
491 }
492 if( num_nonlin_eq_ ) {
493 const value_type
494 *nonlin_eq_ptr = &const_cast<Dakota::RealVector&>(local_function_values)[num_obj_funcs+num_nonlin_ineq_];
495 V_VmV(
496 &(*c)(num_lin_eq_+1,num_lin_eq_+num_nonlin_eq_)
497 ,DVectorSlice(const_cast<value_type*>(nonlin_eq_ptr),num_nonlin_eq_)
498 ,my_vec_view(dakota_rsqp_opt_->nonlin_eq_targ())
499 );
500 // Save in dakota_functions
501 std::copy( nonlin_eq_ptr, nonlin_eq_ptr + num_nonlin_eq_, &dakota_functions_[num_obj_funcs+num_nonlin_ineq_] );
502 }
503 }
504
505 // h = [ A_lin_ineq' * x ]
506 // [ response(num_obj_funcs+1:num_obj_funs+num_lin_eq) ]
507 if( calc_h ) {
508 //
509 if( num_lin_ineq_ ) {
510 DVectorSlice h_lin = (*h)(1,num_lin_ineq_);
511 const Dakota::RealMatrix &lin_ineq_jac = dakota_rsqp_opt_->lin_ineq_jac();
512 for( int j = 0; j < num_lin_ineq_; ++j ) {
513 for( int i = 0; i < n_orig_; ++i ) {
514 h_lin[j] += lin_ineq_jac[j][i] * x_orig[i];
515 }
516 }
517 }
518 //
519 if( num_nonlin_ineq_ ) {
520 const value_type
521 *nonlin_ineq_ptr = &const_cast<Dakota::RealVector&>(local_function_values)[num_obj_funcs];
522 (*h)(num_lin_ineq_+1,num_lin_ineq_+num_nonlin_ineq_)
523 = DVectorSlice(const_cast<value_type*>(nonlin_ineq_ptr),num_nonlin_ineq_);
524 // Save in dakota_functions
525 std::copy( nonlin_ineq_ptr, nonlin_ineq_ptr + num_nonlin_ineq_, &dakota_functions_[num_obj_funcs] );
526 }
527 }
528
529 // Gc = [ A_lin_eq' ]
530 // [ response.grad[num_nonlin_ineq+1]' ]
531 // [ .. ]
532 // [ response.grad[num_nonlin_ineq+num_nonlin_eq]' ]
533 if( calc_Gc ) {
534 index_type nz = 0;
535 if( num_lin_eq_ ) {
536 const Dakota::RealMatrix &lin_eq_jac = dakota_rsqp_opt_->lin_eq_jac();
537 for( int j = 1; j <= num_lin_eq_; ++j ) {
538 for( int i = 1; i <= n_orig_; ++i ) {
539 (*Gc_val)[nz] = lin_eq_jac[j-1][i-1];
540 (*Gc_ivect)[nz] = i;
541 (*Gc_jvect)[nz] = j;
542 ++nz;
543 }
544 }
545 }
546 if( num_nonlin_eq_ ) {
547 for( int j = 1; j <= num_nonlin_eq_; ++j ) {
548 for( int i = 1; i <= n_orig_; ++i ) {
549 (*Gc_val)[nz] = local_function_gradients[num_obj_funcs+num_nonlin_ineq_+j-1][i-1];
550 (*Gc_ivect)[nz] = i;
551 (*Gc_jvect)[nz] = j + num_lin_eq_;
552 ++nz;
553 }
554 }
555 }
556 }
557
558 // Gh = [ A_lin_ineq' ]
559 // [ response.grad[1]' ]
560 // [ .. ]
561 // [ response.grad[num_nonlin_eq]' ]
562 if( calc_Gh ) {
563 index_type nz = 0;
564 if( num_lin_ineq_ ) {
565 const Dakota::RealMatrix &lin_ineq_jac = dakota_rsqp_opt_->lin_ineq_jac();
566 for( int j = 1; j <= num_lin_ineq_; ++j ) {
567 for( int i = 1; i <= n_orig_; ++i ) {
568 (*Gh_val)[nz] = lin_ineq_jac[j-1][i-1];
569 (*Gh_ivect)[nz] = i;
570 (*Gh_jvect)[nz] = j;
571 ++nz;
572 }
573 }
574 }
575 if( num_nonlin_ineq_ ) {
576 for( int j = 1; j <= num_nonlin_ineq_; ++j ) {
577 for( int i = 1; i <= n_orig_; ++i ) {
578 (*Gh_val)[nz] = local_function_gradients[num_obj_funcs+j-1][i-1];
579 (*Gh_ivect)[nz] = i;
580 (*Gh_jvect)[nz] = j;
581 ++nz;
582 }
583 }
584 }
585 }
586 }
587 else {
588 //
589 // The evaluation failed so just fill everything with
590 // and invalid number that will trigger a back tracking
591 //
592
593 // f, Gf
594 if(calc_f) *f = invalid_func_value_;
595 if(calc_Gf) *Gf = invalid_func_value_;
596 // c
597 if(calc_c) *c = invalid_func_value_;
598 // h
599 if(calc_h) *h = invalid_func_value_;
600 // Gc
601 if(calc_Gc) {
602 index_type nz = 0;
603 for( int j = 1; j <= m_orig_; ++j ) {
604 for( int i = 1; i <= n_orig_; ++i ) {
605 (*Gc_val)[nz] = invalid_func_value_;
606 (*Gc_ivect)[nz] = i;
607 (*Gc_jvect)[nz] = j;
608 ++nz;
609 }
610 }
611 }
612 // Gh
613 if( calc_Gh ) {
614 index_type nz = 0;
615 for( int j = 1; j <= mI_orig_; ++j ) {
616 for( int i = 1; i <= n_orig_; ++i ) {
617 (*Gh_val)[nz] = invalid_func_value_;
618 (*Gh_ivect)[nz] = i;
619 (*Gh_jvect)[nz] = j;
620 ++nz;
621 }
622 }
623 }
624
625 }
626
627 if(calc_f) f_orig_updated_ = true;
628 if(calc_c) c_orig_updated_ = true;
629 if(calc_h) h_orig_updated_ = true;
630 if(calc_Gf) Gf_orig_updated_ = true;
631 if(calc_Gc) Gc_orig_updated_ = true;
632 if(calc_Gh) Gh_orig_updated_ = true;
633
634 }
635
636 } // end namespace NLPInterfacePack
637
638 namespace Dakota {
639
640 //
641 // rSQPOptimizer
642 //
643
644 // This constructor accepts a Model
rSQPOptimizer(Model & model)645 rSQPOptimizer::rSQPOptimizer(Model& model)
646 :Optimizer(model)
647 ,model_(&model)
648 ,nlp_(&model,this
649 ,numLinearEqConstraints,numNonlinearEqConstraints
650 ,numLinearIneqConstraints,numNonlinearIneqConstraints)
651 {}
652
~rSQPOptimizer()653 rSQPOptimizer::~rSQPOptimizer()
654 {}
655
find_optimum()656 void rSQPOptimizer::find_optimum()
657 {
658
659 //------------------------------------------------------------------
660 // Solve the problem.
661 //------------------------------------------------------------------
662
663 namespace mmp = MemMngPack;
664 namespace mp = MoochoPack;
665 using mp::MoochoSolver;
666
667 Cout << "\nCalling rSQP++ to solve the NLP! ...\n";
668
669 if(model_->dv()) {
670 Cout << "\nWarning!!! The dakota model has " << model_->dv() << " discrete variables "
671 << "associated with it that will be ignored by this optimizer!";
672 }
673
674 try {
675
676 MoochoSolver solver;
677
678 /*
679 nlp_.initialize(true);
680
681 // Terminate early!
682 Cout << "\nrSQPOptimizer::find_optimum(): Terminate early!\n";
683 bestVariablesArray.front().continuous_variables(model_->cv());
684 RealVector dummy_func_values(model_->num_functions());
685 bestResponseArray.front().function_values(dummy_func_values);
686 return; // Don't do anything!
687 */
688
689 solver.set_nlp( mmp::rcp(&nlp_,false) );
690
691 std::ofstream Moocho_out("MoochoConsole.out");
692 solver.set_console_out(mmp::rcp(&Moocho_out,false));
693 solver.set_error_handling(true,mmp::rcp(&Moocho_out,false));
694
695 const MoochoSolver::ESolutionStatus
696 solution_status = solver.solve_nlp();
697
698
699 }
700 catch(const std::exception& excpt) {
701 std::cerr << "\nrSQPOptimizer::find_optimum(): Caught a std::exception: " << excpt.what() << std::endl;
702 }
703 catch(...) {
704 std::cerr << "\nrSQPOptimizer::find_optimum(): Caught an unknown exception: \n";
705 }
706
707 //------------------------------------------------------------------
708 // Save the final results.
709 //------------------------------------------------------------------
710
711 bestVariablesArray.front().continuous_variables(nlp_.dakota_x()); // Just use the last point computed
712 bestResponseArray.front().function_values(nlp_.dakota_functions()); // ""
713
714 }
715
716 } // namespace Dakota
717