1 // Copyright (C) 2004, 2006 International Business Machines and others.
2 // All Rights Reserved.
3 // This code is published under the Common Public License.
4 //
5 // $Id: IpOrigIpoptNLP.cpp 765 2006-07-14 18:03:23Z andreasw $
6 //
7 // Authors:  Carl Laird, Andreas Waechter     IBM    2004-08-13
8 
9 #include "IpOrigIpoptNLP.hpp"
10 #include "IpLowRankUpdateSymMatrix.hpp"
11 #include "IpIpoptData.hpp"
12 #include "IpIpoptCalculatedQuantities.hpp"
13 
14 namespace SimTKIpopt
15 {
16 #ifdef IP_DEBUG
17   static const Index dbg_verbosity = 0;
18 #endif
19 
OrigIpoptNLP(const SmartPtr<const Journalist> & jnlst,const SmartPtr<NLP> & nlp,const SmartPtr<NLPScalingObject> & nlp_scaling)20   OrigIpoptNLP::OrigIpoptNLP(const SmartPtr<const Journalist>& jnlst,
21                              const SmartPtr<NLP>& nlp,
22                              const SmartPtr<NLPScalingObject>& nlp_scaling)
23       :
24       IpoptNLP(nlp_scaling),
25       jnlst_(jnlst),
26       nlp_(nlp),
27       x_space_(NULL),
28       f_cache_(1),
29       grad_f_cache_(1),
30       c_cache_(1),
31       jac_c_cache_(1),
32       d_cache_(1),
33       jac_d_cache_(1),
34       h_cache_(1),
35       f_evals_(0),
36       grad_f_evals_(0),
37       c_evals_(0),
38       jac_c_evals_(0),
39       d_evals_(0),
40       jac_d_evals_(0),
41       h_evals_(0),
42       initialized_(false)
43   {}
44 
~OrigIpoptNLP()45   OrigIpoptNLP::~OrigIpoptNLP()
46   {}
47 
RegisterOptions(SmartPtr<RegisteredOptions> roptions)48   void OrigIpoptNLP::RegisterOptions(SmartPtr<RegisteredOptions> roptions)
49   {
50     roptions->AddLowerBoundedNumberOption(
51       "bound_relax_factor",
52       "Factor for initial relaxation of the bounds.",
53       0, false,
54       1e-8,
55       "Before start of the optimization, the bounds given by the user are "
56       "relaxed.  This option sets the factor for this relaxation.  If it "
57       "is set to zero, then then bounds relaxation is disabled. "
58       "(See Eqn.(35) in implementation paper.)");
59     roptions->AddStringOption2(
60       "honor_original_bounds",
61       "Indicates whether final points should be projected into original bounds.",
62       "yes",
63       "no", "Leave final point unchanged",
64       "yes", "Project final point back into original bounds",
65       "Ipopt might relax the bounds during the optimization (see, e.g., option "
66       "\"bound_relax_factor\").  This option determines whether the final "
67       "point should be projected back into the user-provide original bounds "
68       "after the optimization.");
69     roptions->SetRegisteringCategory("Warm Start");
70     roptions->AddStringOption2(
71       "warm_start_same_structure",
72       "Indicates whether a problem with a structure identical to the previous one is to be solved.",
73       "no",
74       "no", "Assume this is a new problem.",
75       "yes", "Assume this is problem has known structure",
76       "If \"yes\" is chosen, then the algorithm assumes that an NLP is now to "
77       "be solved, whose strcture is identical to one that already was "
78       "considered (with the same NLP object).");
79     roptions->SetRegisteringCategory("NLP");
80     roptions->AddStringOption2(
81       "check_derivatives_for_naninf",
82       "Indicates whether it is desired to check for Nan/Inf in derivative matrices",
83       "no",
84       "no", "Don't check (faster).",
85       "yes", "Check Jacobians and Hessian for Nan and Inf.",
86       "Activating this option will cause an error if an invalid number is "
87       "detected in the constraint Jacobians or the Lagrangian Hessian.  If "
88       "this is not activated, the test is skipped, and the algorithm might "
89       "proceed with invalid numbers and fail.");
90     roptions->SetRegisteringCategory("Hessian Approximation");
91     roptions->AddStringOption2(
92       "hessian_approximation",
93       "Indicates what Hessian information is to be used.",
94       "exact",
95       "exact", "Use second derivatives provided by the NLP.",
96       "limited-memory", "Perform a limited-memory quasi-Newton  approximation",
97       "This determines which kind of information for the Hessian of the "
98       "Lagrangian function is used by the algorithm.");
99   }
100 
Initialize(const Journalist & jnlst,const OptionsList & options,const std::string & prefix)101   bool OrigIpoptNLP::Initialize(const Journalist& jnlst,
102                                 const OptionsList& options,
103                                 const std::string& prefix)
104   {
105     options.GetNumericValue("bound_relax_factor", bound_relax_factor_, prefix);
106     options.GetBoolValue("honor_original_bounds",
107                          honor_original_bounds_, prefix);
108     options.GetBoolValue("warm_start_same_structure",
109                          warm_start_same_structure_, prefix);
110     options.GetBoolValue("check_derivatives_for_naninf",
111                          check_derivatives_for_naninf_, prefix);
112     Index enum_int;
113     options.GetEnumValue("hessian_approximation", enum_int, prefix);
114     hessian_approximation_ = HessianApproximationType(enum_int);
115 
116     // Reset the function evaluation counters (for warm start)
117     f_evals_=0;
118     grad_f_evals_=0;
119     c_evals_=0;
120     jac_c_evals_=0;
121     d_evals_=0;
122     jac_d_evals_=0;
123     h_evals_=0;
124 
125     // Reset the cache entries belonging to a dummy dependency.  This
126     // is required for repeated solve, since the cache is not updated
127     // if a dimension is zero
128     std::vector<const TaggedObject*> deps(1);
129     deps[0] = NULL;
130     std::vector<Number> sdeps(0);
131     c_cache_.InvalidateResult(deps, sdeps);
132     d_cache_.InvalidateResult(deps, sdeps);
133     jac_c_cache_.InvalidateResult(deps, sdeps);
134     jac_d_cache_.InvalidateResult(deps, sdeps);
135 
136     if (!nlp_->ProcessOptions(options, prefix)) {
137       return false;
138     }
139 
140     initialized_ = true;
141     return IpoptNLP::Initialize(jnlst, options, prefix);
142   }
143 
InitializeStructures(SmartPtr<Vector> & x,bool init_x,SmartPtr<Vector> & y_c,bool init_y_c,SmartPtr<Vector> & y_d,bool init_y_d,SmartPtr<Vector> & z_L,bool init_z_L,SmartPtr<Vector> & z_U,bool init_z_U,SmartPtr<Vector> & v_L,SmartPtr<Vector> & v_U)144   bool OrigIpoptNLP::InitializeStructures(SmartPtr<Vector>& x,
145                                           bool init_x,
146                                           SmartPtr<Vector>& y_c,
147                                           bool init_y_c,
148                                           SmartPtr<Vector>& y_d,
149                                           bool init_y_d,
150                                           SmartPtr<Vector>& z_L,
151                                           bool init_z_L,
152                                           SmartPtr<Vector>& z_U,
153                                           bool init_z_U,
154                                           SmartPtr<Vector>& v_L,
155                                           SmartPtr<Vector>& v_U
156                                          )
157   {
158     DBG_START_METH("OrigIpoptNLP::InitializeStructures", dbg_verbosity);
159     DBG_ASSERT(initialized_);
160     bool retValue;
161 
162     if (!warm_start_same_structure_) {
163 
164       retValue = nlp_->GetSpaces(x_space_, c_space_, d_space_,
165                                  x_l_space_, px_l_space_,
166                                  x_u_space_, px_u_space_,
167                                  d_l_space_, pd_l_space_,
168                                  d_u_space_, pd_u_space_,
169                                  jac_c_space_, jac_d_space_,
170                                  h_space_);
171 
172       if (!retValue) {
173         return false;
174       }
175 
176       // Check if the Hessian space is actually a limited-memory
177       // approximation.  If so, get the required information from the
178       // NLP and create an appropreate h_space
179       if (hessian_approximation_==LIMITED_MEMORY) {
180         SmartPtr<VectorSpace> approx_vecspace;
181         SmartPtr<Matrix> P_approx;
182         nlp_->GetQuasiNewtonApproximationSpaces(approx_vecspace,
183                                                 P_approx);
184         if (IsValid(approx_vecspace)) {
185           DBG_ASSERT(IsValid(P_approx));
186           h_space_ = new LowRankUpdateSymMatrixSpace(x_space_->Dim(),
187                      ConstPtr(P_approx),
188                      ConstPtr(approx_vecspace),
189                      true);
190           jnlst_->Printf(J_DETAILED, J_INITIALIZATION,
191                          "Hessian approximation will be done in smaller space of dimension %d (instead of %d)\n\n",
192                          P_approx->NCols(), P_approx->NRows());
193         }
194         else {
195           DBG_ASSERT(IsNull(P_approx));
196           h_space_ = new LowRankUpdateSymMatrixSpace(x_space_->Dim(),
197                      ConstPtr(P_approx),
198                      ConstPtr(x_space_),
199                      true);
200           jnlst_->Printf(J_DETAILED, J_INITIALIZATION,
201                          "Hessian approximation will be done in the space of all %d x variables.\n\n",
202                          x_space_->Dim());
203         }
204       }
205 
206       NLP_scaling()->DetermineScaling(x_space_,
207                                       c_space_, d_space_,
208                                       jac_c_space_, jac_d_space_,
209                                       h_space_,
210                                       scaled_jac_c_space_, scaled_jac_d_space_,
211                                       scaled_h_space_);
212 
213       ASSERT_EXCEPTION(x_space_->Dim() >= c_space_->Dim(), TOO_FEW_DOF,
214                        "Too few degrees of freedom!");
215       ASSERT_EXCEPTION(x_space_->Dim() > 0, TOO_FEW_DOF,
216                        "Too few degrees of freedom (no free variables)!");
217 
218       // cannot have any null pointers, want zero length vectors
219       // instead of null - this will later need to be changed for _h;
220       retValue = (IsValid(x_space_) && IsValid(c_space_) && IsValid(d_space_)
221                   && IsValid(x_l_space_) && IsValid(px_l_space_)
222                   && IsValid(x_u_space_) && IsValid(px_u_space_)
223                   && IsValid(d_u_space_) && IsValid(pd_u_space_)
224                   && IsValid(d_l_space_) && IsValid(pd_l_space_)
225                   && IsValid(jac_c_space_) && IsValid(jac_d_space_)
226                   && IsValid(h_space_)
227                   && IsValid(scaled_jac_c_space_)
228                   && IsValid(scaled_jac_d_space_)
229                   && IsValid(scaled_h_space_));
230 
231       DBG_ASSERT(retValue && "Model cannot return null vector or matrix prototypes or spaces,"
232                  " please return zero length vectors instead");
233     }
234     else {
235       ASSERT_EXCEPTION(IsValid(x_space_), INVALID_WARMSTART,
236                        "OrigIpoptNLP called with warm_start_same_structure, but the problem is solved for the first time.");
237     }
238 
239     // Create the bounds structures
240     SmartPtr<Vector> x_L = x_l_space_->MakeNew();
241     SmartPtr<Matrix> Px_L = px_l_space_->MakeNew();
242     SmartPtr<Vector> x_U = x_u_space_->MakeNew();
243     SmartPtr<Matrix> Px_U = px_u_space_->MakeNew();
244     SmartPtr<Vector> d_L = d_l_space_->MakeNew();
245     SmartPtr<Matrix> Pd_L = pd_l_space_->MakeNew();
246     SmartPtr<Vector> d_U = d_u_space_->MakeNew();
247     SmartPtr<Matrix> Pd_U = pd_u_space_->MakeNew();
248 
249     retValue = nlp_->GetBoundsInformation(*Px_L, *x_L, *Px_U, *x_U,
250                                           *Pd_L, *d_L, *Pd_U, *d_U);
251 
252     if (!retValue) {
253       return false;
254     }
255 
256     x_L->Print(*jnlst_, J_MOREVECTOR, J_INITIALIZATION,
257                "original x_L unscaled");
258     x_U->Print(*jnlst_, J_MOREVECTOR, J_INITIALIZATION,
259                "original x_U unscaled");
260     d_L->Print(*jnlst_, J_MOREVECTOR, J_INITIALIZATION,
261                "original d_L unscaled");
262     d_U->Print(*jnlst_, J_MOREVECTOR, J_INITIALIZATION,
263                "original d_U unscaled");
264 
265     if (honor_original_bounds_) {
266       SmartPtr<Vector> tmp;
267       tmp = x_L->MakeNewCopy();
268       orig_x_L_ = ConstPtr(tmp);
269       tmp = x_U->MakeNewCopy();
270       orig_x_U_ = ConstPtr(tmp);
271     }
272 
273     relax_bounds(-bound_relax_factor_, *x_L);
274     relax_bounds( bound_relax_factor_, *x_U);
275     relax_bounds(-bound_relax_factor_, *d_L);
276     relax_bounds( bound_relax_factor_, *d_U);
277 
278     x_L_ = ConstPtr(x_L);
279     Px_L_ = ConstPtr(Px_L);
280     x_U_ = ConstPtr(x_U);
281     Px_U_ = ConstPtr(Px_U);
282     d_L_ = ConstPtr(d_L);
283     Pd_L_ = ConstPtr(Pd_L);
284     d_U_ = ConstPtr(d_U);
285     Pd_U_ = ConstPtr(Pd_U);
286 
287     // now create and store the scaled bounds
288     x_L_ = NLP_scaling()->apply_vector_scaling_x_LU(*Px_L_, x_L_, *x_space_);
289     x_U_ = NLP_scaling()->apply_vector_scaling_x_LU(*Px_U_, x_U_, *x_space_);
290     d_L_ = NLP_scaling()->apply_vector_scaling_d_LU(*Pd_L_, d_L_, *d_space_);
291     d_U_ = NLP_scaling()->apply_vector_scaling_d_LU(*Pd_U_, d_U_, *d_space_);
292 
293     x_L->Print(*jnlst_, J_VECTOR, J_INITIALIZATION,
294                "modified x_L scaled");
295     x_U->Print(*jnlst_, J_VECTOR, J_INITIALIZATION,
296                "modified x_U scaled");
297     d_L->Print(*jnlst_, J_VECTOR, J_INITIALIZATION,
298                "modified d_L scaled");
299     d_U->Print(*jnlst_, J_VECTOR, J_INITIALIZATION,
300                "modified d_U scaled");
301 
302     // Create the iterates structures
303     x = x_space_->MakeNew();
304     y_c = c_space_->MakeNew();
305     y_d = d_space_->MakeNew();
306     z_L = x_l_space_->MakeNew();
307     z_U = x_u_space_->MakeNew();
308     v_L = d_l_space_->MakeNew();
309     v_U = d_u_space_->MakeNew();
310 
311     retValue = nlp_->GetStartingPoint(GetRawPtr(x), init_x,
312                                       GetRawPtr(y_c), init_y_c,
313                                       GetRawPtr(y_d), init_y_d,
314                                       GetRawPtr(z_L), init_z_L,
315                                       GetRawPtr(z_U), init_z_U);
316 
317     if (!retValue) {
318       return false;
319     }
320 
321 
322     Number obj_scal = NLP_scaling()->apply_obj_scaling(1.);
323     if (init_x) {
324       x->Print(*jnlst_, J_VECTOR, J_INITIALIZATION, "initial x unscaled");
325       if (NLP_scaling()->have_x_scaling()) {
326         x = NLP_scaling()->apply_vector_scaling_x_NonConst(ConstPtr(x));
327       }
328     }
329     if (init_y_c) {
330       y_c->Print(*jnlst_, J_VECTOR, J_INITIALIZATION, "initial y_c unscaled");
331       if (NLP_scaling()->have_c_scaling()) {
332         y_c = NLP_scaling()->unapply_vector_scaling_c_NonConst(ConstPtr(y_c));
333       }
334       if (obj_scal!=1.) {
335         y_c->Scal(obj_scal);
336       }
337     }
338     if (init_y_d) {
339       y_d->Print(*jnlst_, J_VECTOR, J_INITIALIZATION, "initial y_d unscaled");
340       if (NLP_scaling()->have_d_scaling()) {
341         y_d = NLP_scaling()->unapply_vector_scaling_d_NonConst(ConstPtr(y_d));
342       }
343       if (obj_scal!=1.) {
344         y_d->Scal(obj_scal);
345       }
346     }
347     if (init_z_L) {
348       z_L->Print(*jnlst_, J_VECTOR, J_INITIALIZATION, "initial z_L unscaled");
349       if (NLP_scaling()->have_x_scaling()) {
350         z_L = NLP_scaling()->apply_vector_scaling_x_LU_NonConst(*Px_L_, ConstPtr(z_L), *x_space_);
351       }
352       if (obj_scal!=1.) {
353         z_L->Scal(obj_scal);
354       }
355     }
356     if (init_z_U) {
357       z_U->Print(*jnlst_, J_VECTOR, J_INITIALIZATION, "initial z_U unscaled");
358       if (NLP_scaling()->have_x_scaling()) {
359         z_U = NLP_scaling()->apply_vector_scaling_x_LU_NonConst(*Px_U_, ConstPtr(z_U), *x_space_);
360       }
361       if (obj_scal!=1.) {
362         z_U->Scal(obj_scal);
363       }
364     }
365 
366     return true;
367   }
368 
369   void
relax_bounds(Number bound_relax_factor,Vector & bounds)370   OrigIpoptNLP::relax_bounds(Number bound_relax_factor, Vector& bounds)
371   {
372     DBG_START_METH("OrigIpoptNLP::relax_bounds", dbg_verbosity);
373     if (bound_relax_factor!=0.) {
374       SmartPtr<Vector> tmp = bounds.MakeNew();
375       tmp->Copy(bounds);
376       tmp->ElementWiseAbs();
377       SmartPtr<Vector> ones = bounds.MakeNew();
378       ones->Set(1.);
379       tmp->ElementWiseMax(*ones);
380       DBG_PRINT((1, "bound_relax_factor = %e", bound_relax_factor));
381       DBG_PRINT_VECTOR(2, "tmp", *tmp);
382       DBG_PRINT_VECTOR(2, "bounds before", bounds);
383       bounds.Axpy(bound_relax_factor, *tmp);
384       DBG_PRINT_VECTOR(2, "bounds after", bounds);
385     }
386   }
387 
f(const Vector & x)388   Number OrigIpoptNLP::f(const Vector& x)
389   {
390     x.Dot(x);
391     x.Dot(x);
392     DBG_START_METH("OrigIpoptNLP::f", dbg_verbosity);
393     Number ret = 0.0;
394     DBG_PRINT((2, "x.Tag = %d\n", x.GetTag()));
395     if (!f_cache_.GetCachedResult1Dep(ret, &x)) {
396       f_evals_++;
397       SmartPtr<const Vector> unscaled_x = NLP_scaling()->unapply_vector_scaling_x(&x);
398       f_eval_time_.Start();
399       bool success = nlp_->Eval_f(*unscaled_x, ret);
400       f_eval_time_.End();
401       DBG_PRINT((1, "success = %d ret = %e\n", success, ret));
402       ASSERT_EXCEPTION(success && IsFiniteNumber(ret), Eval_Error,
403                        "Error evaluating the objective function");
404       ret = NLP_scaling()->apply_obj_scaling(ret);
405       f_cache_.AddCachedResult1Dep(ret, &x);
406     }
407 
408     return ret;
409   }
410 
f(const Vector & x,Number mu)411   Number OrigIpoptNLP::f(const Vector& x, Number mu)
412   {
413     assert(false && "ERROR: This method is only a placeholder for f(mu) and should not be called");
414     return 0.;
415   }
416 
grad_f(const Vector & x)417   SmartPtr<const Vector> OrigIpoptNLP::grad_f(const Vector& x)
418   {
419     SmartPtr<Vector> unscaled_grad_f;
420     SmartPtr<const Vector> retValue;
421     if (!grad_f_cache_.GetCachedResult1Dep(retValue, &x)) {
422       grad_f_evals_++;
423       unscaled_grad_f = x_space_->MakeNew();
424 
425       SmartPtr<const Vector> unscaled_x = NLP_scaling()->unapply_vector_scaling_x(&x);
426       grad_f_eval_time_.Start();
427       bool success = nlp_->Eval_grad_f(*unscaled_x, *unscaled_grad_f);
428       grad_f_eval_time_.End();
429       ASSERT_EXCEPTION(success && IsFiniteNumber(unscaled_grad_f->Nrm2()),
430                        Eval_Error, "Error evaluating the gradient of the objective function");
431       retValue = NLP_scaling()->apply_grad_obj_scaling(ConstPtr(unscaled_grad_f));
432       grad_f_cache_.AddCachedResult1Dep(retValue, &x);
433     }
434 
435     return retValue;
436   }
437 
grad_f(const Vector & x,Number mu)438   SmartPtr<const Vector> OrigIpoptNLP::grad_f(const Vector& x, Number mu)
439   {
440     THROW_EXCEPTION(INTERNAL_ABORT,
441                     "ERROR: This method is only a placeholder for grad_f(mu) and should not be called");
442     return NULL;
443   }
444 
445   /** Equality constraint residual */
c(const Vector & x)446   SmartPtr<const Vector> OrigIpoptNLP::c(const Vector& x)
447   {
448     SmartPtr<const Vector> retValue;
449     if (c_space_->Dim()==0) {
450       // We do this caching of an empty vector so that the returned
451       // Vector has always the same tag (this might make a difference
452       // in cases where only the constraints are supposed to change...
453       SmartPtr<const Vector> dep = NULL;
454       if (!c_cache_.GetCachedResult1Dep(retValue, GetRawPtr(dep))) {
455         retValue = c_space_->MakeNew();
456         c_cache_.AddCachedResult1Dep(retValue, GetRawPtr(dep));
457       }
458     }
459     else {
460       if (!c_cache_.GetCachedResult1Dep(retValue, x)) {
461         SmartPtr<Vector> unscaled_c = c_space_->MakeNew();
462         c_evals_++;
463         SmartPtr<const Vector> unscaled_x = NLP_scaling()->unapply_vector_scaling_x(&x);
464         c_eval_time_.Start();
465         bool success = nlp_->Eval_c(*unscaled_x, *unscaled_c);
466         c_eval_time_.End();
467         ASSERT_EXCEPTION(success && IsFiniteNumber(unscaled_c->Nrm2()),
468                          Eval_Error, "Error evaluating the equality constraints");
469         retValue = NLP_scaling()->apply_vector_scaling_c(ConstPtr(unscaled_c));
470         c_cache_.AddCachedResult1Dep(retValue, x);
471       }
472     }
473 
474     return retValue;
475   }
476 
d(const Vector & x)477   SmartPtr<const Vector> OrigIpoptNLP::d(const Vector& x)
478   {
479     DBG_START_METH("OrigIpoptNLP::d", dbg_verbosity);
480     SmartPtr<const Vector> retValue;
481     if (d_space_->Dim()==0) {
482       // We do this caching of an empty vector so that the returned
483       // Vector has always the same tag (this might make a difference
484       // in cases where only the constraints are supposed to change...
485       SmartPtr<const Vector> dep = NULL;
486       if (!d_cache_.GetCachedResult1Dep(retValue, GetRawPtr(dep))) {
487         retValue = d_space_->MakeNew();
488         d_cache_.AddCachedResult1Dep(retValue, GetRawPtr(dep));
489       }
490     }
491     else {
492       if (!d_cache_.GetCachedResult1Dep(retValue, x)) {
493         d_evals_++;
494         SmartPtr<Vector> unscaled_d = d_space_->MakeNew();
495 
496         DBG_PRINT_VECTOR(2, "scaled_x", x);
497         SmartPtr<const Vector> unscaled_x = NLP_scaling()->unapply_vector_scaling_x(&x);
498         d_eval_time_.Start();
499         bool success = nlp_->Eval_d(*unscaled_x, *unscaled_d);
500         d_eval_time_.End();
501         DBG_PRINT_VECTOR(2, "unscaled_d", *unscaled_d);
502         ASSERT_EXCEPTION(success && IsFiniteNumber(unscaled_d->Nrm2()),
503                          Eval_Error, "Error evaluating the inequality constraints");
504         retValue = NLP_scaling()->apply_vector_scaling_d(ConstPtr(unscaled_d));
505         d_cache_.AddCachedResult1Dep(retValue, x);
506       }
507     }
508 
509     return retValue;
510   }
511 
jac_c(const Vector & x)512   SmartPtr<const Matrix> OrigIpoptNLP::jac_c(const Vector& x)
513   {
514     SmartPtr<const Matrix> retValue;
515     if (c_space_->Dim()==0) {
516       // We do this caching of an empty vector so that the returned
517       // Matrix has always the same tag
518       SmartPtr<const Vector> dep = NULL;
519       if (!jac_c_cache_.GetCachedResult1Dep(retValue, GetRawPtr(dep))) {
520         SmartPtr<Matrix> unscaled_jac_c = jac_c_space_->MakeNew();
521         retValue = NLP_scaling()->apply_jac_c_scaling(ConstPtr(unscaled_jac_c));
522         jac_c_cache_.AddCachedResult1Dep(retValue, GetRawPtr(dep));
523       }
524     }
525     else {
526       if (!jac_c_cache_.GetCachedResult1Dep(retValue, x)) {
527         jac_c_evals_++;
528         SmartPtr<Matrix> unscaled_jac_c = jac_c_space_->MakeNew();
529 
530         SmartPtr<const Vector> unscaled_x = NLP_scaling()->unapply_vector_scaling_x(&x);
531         jac_c_eval_time_.Start();
532         bool success = nlp_->Eval_jac_c(*unscaled_x, *unscaled_jac_c);
533         jac_c_eval_time_.End();
534         ASSERT_EXCEPTION(success, Eval_Error, "Error evaluating the jacobian of the equality constraints");
535         if (check_derivatives_for_naninf_) {
536           if (!unscaled_jac_c->HasValidNumbers()) {
537             jnlst_->Printf(J_WARNING, J_NLP,
538                            "The Jacobian for the equality constraints contains an invalid number\n");
539             THROW_EXCEPTION(Eval_Error, "The Jacobian for the equality constraints contains an invalid number");
540           }
541         }
542         retValue = NLP_scaling()->apply_jac_c_scaling(ConstPtr(unscaled_jac_c));
543         jac_c_cache_.AddCachedResult1Dep(retValue, x);
544       }
545     }
546 
547     return retValue;
548   }
549 
jac_d(const Vector & x)550   SmartPtr<const Matrix> OrigIpoptNLP::jac_d(const Vector& x)
551   {
552     SmartPtr<const Matrix> retValue;
553     if (d_space_->Dim()==0) {
554       // We do this caching of an empty vector so that the returned
555       // Matrix has always the same tag
556       SmartPtr<const Vector> dep = NULL;
557       if (!jac_d_cache_.GetCachedResult1Dep(retValue, GetRawPtr(dep))) {
558         SmartPtr<Matrix> unscaled_jac_d = jac_d_space_->MakeNew();
559         retValue = NLP_scaling()->apply_jac_d_scaling(ConstPtr(unscaled_jac_d));
560         jac_d_cache_.AddCachedResult1Dep(retValue, GetRawPtr(dep));
561       }
562     }
563     else {
564       if (!jac_d_cache_.GetCachedResult1Dep(retValue, x)) {
565         jac_d_evals_++;
566         SmartPtr<Matrix> unscaled_jac_d = jac_d_space_->MakeNew();
567 
568         SmartPtr<const Vector> unscaled_x = NLP_scaling()->unapply_vector_scaling_x(&x);
569         jac_d_eval_time_.Start();
570         bool success = nlp_->Eval_jac_d(*unscaled_x, *unscaled_jac_d);
571         jac_d_eval_time_.End();
572         ASSERT_EXCEPTION(success, Eval_Error, "Error evaluating the jacobian of the inequality constraints");
573         retValue = NLP_scaling()->apply_jac_d_scaling(ConstPtr(unscaled_jac_d));
574         jac_d_cache_.AddCachedResult1Dep(retValue, x);
575       }
576     }
577 
578     return retValue;
579   }
580 
uninitialized_h()581   SmartPtr<const SymMatrix> OrigIpoptNLP::uninitialized_h()
582   {
583     return h_space_->MakeNewSymMatrix();
584   }
585 
h(const Vector & x,Number obj_factor,const Vector & yc,const Vector & yd)586   SmartPtr<const SymMatrix> OrigIpoptNLP::h(const Vector& x,
587       Number obj_factor,
588       const Vector& yc,
589       const Vector& yd)
590   {
591     std::vector<const TaggedObject*> deps(3);
592     deps[0] = &x;
593     deps[1] = &yc;
594     deps[2] = &yd;
595     std::vector<Number> scalar_deps(1);
596     scalar_deps[0] = obj_factor;
597 
598     SmartPtr<SymMatrix> unscaled_h;
599     SmartPtr<const SymMatrix> retValue;
600     if (!h_cache_.GetCachedResult(retValue, deps, scalar_deps)) {
601       h_evals_++;
602       unscaled_h = h_space_->MakeNewSymMatrix();
603 
604       SmartPtr<const Vector> unscaled_x = NLP_scaling()->unapply_vector_scaling_x(&x);
605       SmartPtr<const Vector> unscaled_yc = NLP_scaling()->apply_vector_scaling_c(&yc);
606       SmartPtr<const Vector> unscaled_yd = NLP_scaling()->apply_vector_scaling_d(&yd);
607       Number scaled_obj_factor = NLP_scaling()->apply_obj_scaling(obj_factor);
608       h_eval_time_.Start();
609       bool success = nlp_->Eval_h(*unscaled_x, scaled_obj_factor, *unscaled_yc, *unscaled_yd, *unscaled_h);
610       h_eval_time_.End();
611       ASSERT_EXCEPTION(success, Eval_Error, "Error evaluating the hessian of the lagrangian");
612       retValue = NLP_scaling()->apply_hessian_scaling(ConstPtr(unscaled_h));
613       h_cache_.AddCachedResult(retValue, deps, scalar_deps);
614     }
615 
616     return retValue;
617   }
618 
h(const Vector & x,Number obj_factor,const Vector & yc,const Vector & yd,Number mu)619   SmartPtr<const SymMatrix> OrigIpoptNLP::h(const Vector& x,
620       Number obj_factor,
621       const Vector& yc,
622       const Vector& yd,
623       Number mu)
624   {
625     THROW_EXCEPTION(INTERNAL_ABORT,
626                     "ERROR: This method is only a for h(mu) and should not be called");
627     return NULL;
628   }
629 
630 
GetSpaces(SmartPtr<const VectorSpace> & x_space,SmartPtr<const VectorSpace> & c_space,SmartPtr<const VectorSpace> & d_space,SmartPtr<const VectorSpace> & x_l_space,SmartPtr<const MatrixSpace> & px_l_space,SmartPtr<const VectorSpace> & x_u_space,SmartPtr<const MatrixSpace> & px_u_space,SmartPtr<const VectorSpace> & d_l_space,SmartPtr<const MatrixSpace> & pd_l_space,SmartPtr<const VectorSpace> & d_u_space,SmartPtr<const MatrixSpace> & pd_u_space,SmartPtr<const MatrixSpace> & Jac_c_space,SmartPtr<const MatrixSpace> & Jac_d_space,SmartPtr<const SymMatrixSpace> & Hess_lagrangian_space)631   void OrigIpoptNLP::GetSpaces(SmartPtr<const VectorSpace>& x_space,
632                                SmartPtr<const VectorSpace>& c_space,
633                                SmartPtr<const VectorSpace>& d_space,
634                                SmartPtr<const VectorSpace>& x_l_space,
635                                SmartPtr<const MatrixSpace>& px_l_space,
636                                SmartPtr<const VectorSpace>& x_u_space,
637                                SmartPtr<const MatrixSpace>& px_u_space,
638                                SmartPtr<const VectorSpace>& d_l_space,
639                                SmartPtr<const MatrixSpace>& pd_l_space,
640                                SmartPtr<const VectorSpace>& d_u_space,
641                                SmartPtr<const MatrixSpace>& pd_u_space,
642                                SmartPtr<const MatrixSpace>& Jac_c_space,
643                                SmartPtr<const MatrixSpace>& Jac_d_space,
644                                SmartPtr<const SymMatrixSpace>& Hess_lagrangian_space)
645   {
646     // Make sure that we already have all the pointers
647     DBG_ASSERT(IsValid(x_space_) &&
648                IsValid(c_space_) &&
649                IsValid(d_space_) &&
650                IsValid(x_l_space_) &&
651                IsValid(px_l_space_) &&
652                IsValid(x_u_space_) &&
653                IsValid(px_u_space_) &&
654                IsValid(d_l_space_) &&
655                IsValid(pd_l_space_) &&
656                IsValid(d_u_space_) &&
657                IsValid(pd_u_space_) &&
658                IsValid(scaled_jac_c_space_) &&
659                IsValid(scaled_jac_d_space_) &&
660                IsValid(scaled_h_space_));
661 
662     DBG_ASSERT(IsValid(NLP_scaling()));
663 
664     x_space = x_space_;
665     c_space = c_space_;
666     d_space = d_space_;
667     x_l_space = x_l_space_;
668     px_l_space = px_l_space_;
669     x_u_space = x_u_space_;
670     px_u_space = px_u_space_;
671     d_l_space = d_l_space_;
672     pd_l_space = pd_l_space_;
673     d_u_space = d_u_space_;
674     pd_u_space = pd_u_space_;
675     Jac_c_space = scaled_jac_c_space_;
676     Jac_d_space = scaled_jac_d_space_;
677     Hess_lagrangian_space = scaled_h_space_;
678   }
679 
FinalizeSolution(SolverReturn status,const Vector & x,const Vector & z_L,const Vector & z_U,const Vector & c,const Vector & d,const Vector & y_c,const Vector & y_d,Number obj_value)680   void OrigIpoptNLP::FinalizeSolution(SolverReturn status,
681                                       const Vector& x, const Vector& z_L, const Vector& z_U,
682                                       const Vector& c, const Vector& d,
683                                       const Vector& y_c, const Vector& y_d,
684                                       Number obj_value)
685   {
686     DBG_START_METH("OrigIpoptNLP::FinalizeSolution", dbg_verbosity);
687     // need to submit the unscaled solution back to the nlp
688     SmartPtr<const Vector> unscaled_x =
689       NLP_scaling()->unapply_vector_scaling_x(&x);
690     SmartPtr<const Vector> unscaled_c =
691       NLP_scaling()->unapply_vector_scaling_c(&c);
692     SmartPtr<const Vector> unscaled_d =
693       NLP_scaling()->unapply_vector_scaling_d(&d);
694     const Number unscaled_obj = NLP_scaling()->unapply_obj_scaling(obj_value);
695 
696     SmartPtr<const Vector> unscaled_z_L;
697     SmartPtr<const Vector> unscaled_z_U;
698     SmartPtr<const Vector> unscaled_y_c;
699     SmartPtr<const Vector> unscaled_y_d;
700 
701     // The objective function scaling factor also appears in the constraints
702     Number obj_unscale_factor = NLP_scaling()->unapply_obj_scaling(1.);
703     if (obj_unscale_factor!=1.) {
704       SmartPtr<Vector> tmp = NLP_scaling()->apply_vector_scaling_x_LU_NonConst(*Px_L_, &z_L, *x_space_);
705       tmp->Scal(obj_unscale_factor);
706       unscaled_z_L = ConstPtr(tmp);
707 
708       tmp = NLP_scaling()->apply_vector_scaling_x_LU_NonConst(*Px_U_, &z_U, *x_space_);
709       tmp->Scal(obj_unscale_factor);
710       unscaled_z_U = ConstPtr(tmp);
711 
712       tmp = NLP_scaling()->apply_vector_scaling_c_NonConst(&y_c);
713       tmp->Scal(obj_unscale_factor);
714       unscaled_y_c = ConstPtr(tmp);
715 
716       tmp = NLP_scaling()->apply_vector_scaling_d_NonConst(&y_d);
717       tmp->Scal(obj_unscale_factor);
718       unscaled_y_d = ConstPtr(tmp);
719     }
720     else {
721       unscaled_z_L = NLP_scaling()->apply_vector_scaling_x_LU(*Px_L_, &z_L, *x_space_);
722       unscaled_z_U = NLP_scaling()->apply_vector_scaling_x_LU(*Px_U_, &z_U, *x_space_);
723       unscaled_y_c = NLP_scaling()->apply_vector_scaling_c(&y_c);
724       unscaled_y_d = NLP_scaling()->apply_vector_scaling_d(&y_d);
725     }
726 
727     if (honor_original_bounds_ && (Px_L_->NCols()>0 || Px_U_->NCols()>0)) {
728       // Make sure the user specified bounds are satisfied
729       SmartPtr<Vector> tmp;
730       SmartPtr<Vector> un_x = unscaled_x->MakeNewCopy();
731       if (Px_L_->NCols()>0) {
732         tmp = orig_x_L_->MakeNewCopy();
733         Px_L_->TransMultVector(1., *un_x, 0., *tmp);
734         Px_L_->MultVector(-1., *tmp, 1., *un_x);
735         tmp->ElementWiseMax(*orig_x_L_);
736         Px_L_->MultVector(1., *tmp, 1., *un_x);
737       }
738       if (Px_U_->NCols()>0) {
739         tmp = orig_x_U_->MakeNewCopy();
740         Px_U_->TransMultVector(1., *un_x, 0., *tmp);
741         Px_U_->MultVector(-1., *tmp, 1., *un_x);
742         tmp->ElementWiseMin(*orig_x_U_);
743         Px_U_->MultVector(1., *tmp, 1., *un_x);
744       }
745       unscaled_x = ConstPtr(un_x);
746     }
747 
748     unscaled_x->Print(*jnlst_, J_VECTOR, J_SOLUTION, "final x unscaled");
749     unscaled_y_c->Print(*jnlst_, J_VECTOR, J_SOLUTION, "final y_c unscaled");
750     unscaled_y_d->Print(*jnlst_, J_VECTOR, J_SOLUTION, "final y_d unscaled");
751     unscaled_z_L->Print(*jnlst_, J_VECTOR, J_SOLUTION, "final z_L unscaled");
752     unscaled_z_U->Print(*jnlst_, J_VECTOR, J_SOLUTION, "final z_U unscaled");
753 
754     nlp_->FinalizeSolution(status, *unscaled_x,
755                            *unscaled_z_L, *unscaled_z_U,
756                            *unscaled_c, *unscaled_d,
757                            *unscaled_y_c, *unscaled_y_d,
758                            unscaled_obj);
759   }
760 
IntermediateCallBack(AlgorithmMode mode,Index iter,Number obj_value,Number inf_pr,Number inf_du,Number mu,Number d_norm,Number regularization_size,Number alpha_du,Number alpha_pr,Index ls_trials,SmartPtr<const IpoptData> ip_data,SmartPtr<IpoptCalculatedQuantities> ip_cq)761   bool OrigIpoptNLP::IntermediateCallBack(AlgorithmMode mode,
762                                           Index iter, Number obj_value,
763                                           Number inf_pr, Number inf_du,
764                                           Number mu, Number d_norm,
765                                           Number regularization_size,
766                                           Number alpha_du, Number alpha_pr,
767                                           Index ls_trials,
768                                           SmartPtr<const IpoptData> ip_data,
769                                           SmartPtr<IpoptCalculatedQuantities> ip_cq)
770   {
771     return nlp_->IntermediateCallBack(mode, iter, obj_value, inf_pr, inf_du,
772                                       mu, d_norm, regularization_size,
773                                       alpha_du, alpha_pr, ls_trials,
774                                       GetRawPtr(ip_data), GetRawPtr(ip_cq));
775   }
776 
AdjustVariableBounds(const Vector & new_x_L,const Vector & new_x_U,const Vector & new_d_L,const Vector & new_d_U)777   void OrigIpoptNLP::AdjustVariableBounds(const Vector& new_x_L, const Vector& new_x_U,
778                                           const Vector& new_d_L, const Vector& new_d_U)
779   {
780     x_L_ = new_x_L.MakeNewCopy();
781     x_U_ = new_x_U.MakeNewCopy();
782     d_L_ = new_d_L.MakeNewCopy();
783     d_U_ = new_d_U.MakeNewCopy();
784   }
785 
786   void
PrintTimingStatistics(Journalist & jnlst,EJournalLevel level,EJournalCategory category) const787   OrigIpoptNLP::PrintTimingStatistics(
788     Journalist& jnlst,
789     EJournalLevel level,
790     EJournalCategory category) const
791   {
792     if (!jnlst.ProduceOutput(level, category))
793       return;
794 
795     jnlst.Printf(level, category,
796                  "Function Evaluations................: %10.3f\n",
797                  TotalFunctionEvaluationCPUTime());
798     jnlst.Printf(level, category,
799                  " Objective function.................: %10.3f\n",
800                  f_eval_time_.TotalTime());
801     jnlst.Printf(level, category,
802                  " Equality constraints...............: %10.3f\n",
803                  c_eval_time_.TotalTime());
804     jnlst.Printf(level, category,
805                  " Inequality constraints.............: %10.3f\n",
806                  d_eval_time_.TotalTime());
807     jnlst.Printf(level, category,
808                  " Equality constraint Jacobian.......: %10.3f\n",
809                  jac_c_eval_time_.TotalTime());
810     jnlst.Printf(level, category,
811                  " Inequality constraint Jacobian.....: %10.3f\n",
812                  jac_d_eval_time_.TotalTime());
813     jnlst.Printf(level, category,
814                  " Lagrangian Hessian.................: %10.3f\n",
815                  h_eval_time_.TotalTime());
816   }
817 
818   Number
TotalFunctionEvaluationCPUTime() const819   OrigIpoptNLP::TotalFunctionEvaluationCPUTime() const
820   {
821     return f_eval_time_.TotalTime()+
822            c_eval_time_.TotalTime()+
823            d_eval_time_.TotalTime()+
824            jac_c_eval_time_.TotalTime()+
825            jac_d_eval_time_.TotalTime()+
826            h_eval_time_.TotalTime();
827   }
828 
829 } // namespace Ipopt
830