1 // (C) Copyright Carnegie Mellon University 2005
2 // All Rights Reserved.
3 // This code is published under the Eclipse Public License.
4 //
5 // Authors :
6 // Pierre Bonami, Carnegie Mellon University,
7 //
8 // Date : 05/25/2005
9 
10 
11 #include "BonTNLP2FPNLP.hpp"
12 
13 using namespace Ipopt;
14 
15 namespace Bonmin
16 {
TNLP2FPNLP(const SmartPtr<TNLP> tnlp,double objectiveScalingFactor)17   TNLP2FPNLP::TNLP2FPNLP(const SmartPtr<TNLP> tnlp, double objectiveScalingFactor):
18       tnlp_(tnlp),
19       inds_(),
20       vals_(),
21       lambda_(1.),
22       sigma_(1.),
23       norm_(2),
24       objectiveScalingFactor_(objectiveScalingFactor),
25       use_feasibility_pump_objective_(false),
26       use_cutoff_constraint_(false),
27       use_local_branching_constraint_(false),
28       cutoff_(COIN_DBL_MAX),
29       rhs_local_branching_constraint_(COIN_DBL_MAX),
30       index_style_(TNLP::C_STYLE)
31   {}
32 
TNLP2FPNLP(const SmartPtr<TNLP> tnlp,const SmartPtr<TNLP2FPNLP> other)33   TNLP2FPNLP::TNLP2FPNLP(const SmartPtr<TNLP> tnlp, const SmartPtr<TNLP2FPNLP> other):
34       tnlp_(tnlp),
35       inds_(other->inds_),
36       vals_(other->vals_),
37       lambda_(other->lambda_),
38       sigma_(other->sigma_),
39       norm_(other->norm_),
40       objectiveScalingFactor_(other->objectiveScalingFactor_),
41       use_feasibility_pump_objective_(other->use_feasibility_pump_objective_),
42       use_cutoff_constraint_(other->use_cutoff_constraint_),
43       use_local_branching_constraint_(other->use_local_branching_constraint_),
44       cutoff_(other->cutoff_),
45       rhs_local_branching_constraint_(other->rhs_local_branching_constraint_),
46       index_style_(other->index_style_)
47   {}
48 
~TNLP2FPNLP()49   TNLP2FPNLP::~TNLP2FPNLP()
50   {
51   }
52 
53   void
set_cutoff(Number cutoff)54   TNLP2FPNLP::set_cutoff(Number cutoff)
55   {
56     Number epsilon = 1.0e-6;
57     if(cutoff > 1.0e-8)
58       cutoff_ = (1-epsilon) * cutoff;
59     else if(cutoff < -1.0e-8)
60       cutoff_ = (1+epsilon) * cutoff;
61     else
62       cutoff_ = - epsilon;
63   }
64 
65   void
set_dist_to_point_obj(size_t n,const Number * vals,const Index * inds)66   TNLP2FPNLP::set_dist_to_point_obj(size_t n, const Number * vals, const Index * inds)
67   {
68     inds_.resize(n);
69     vals_.resize(n);
70     std::copy(vals, vals + n, vals_.begin());
71     std::copy(inds, inds + n, inds_.begin());
72   }
73 
74   /** Compute the distance to the current point to which distance is minimized. */
75   double
dist_to_point(const Number * x)76   TNLP2FPNLP::dist_to_point(const Number *x)
77   {
78     double ret_val = 0;
79     assert(vals_.size() == inds_.size());
80     if(norm_ == 2){
81       for(unsigned int i = 0; i < vals_.size() ; i++) {
82         ret_val += ( x[inds_[i]] - vals_[i] ) * ( x[inds_[i]] - vals_[i] );
83       }
84     }
85     else if(norm_ == 1){
86       for(unsigned int i = 0 ; i < vals_.size() ; i++) {
87         if(vals_[i] <= 0.1)
88          ret_val += x[inds_[i]];
89         else{
90 	  ret_val += (1.0 - x[inds_[i]]);
91 	 //         ret_val -= x[inds_[i]];
92         }
93       }
94     }
95     return ret_val;
96   }
97 
98   bool
get_nlp_info(Index & n,Index & m,Index & nnz_jac_g,Index & nnz_h_lag,TNLP::IndexStyleEnum & index_style)99   TNLP2FPNLP::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
100       Index& nnz_h_lag,
101       TNLP::IndexStyleEnum& index_style)
102   {
103     bool ret_code = tnlp_->get_nlp_info(n, m , nnz_jac_g, nnz_h_lag,
104         index_style);
105 
106     index_style_ = index_style; // this function is called before any
107                                 // other that uses index_style_
108 
109     if(use_feasibility_pump_objective_ && norm_ == 2)
110       nnz_h_lag += (int)vals_.size();
111 
112     if(use_cutoff_constraint_ && use_local_branching_constraint_) {
113       m += 2;
114       nnz_jac_g += (n + (int)vals_.size());
115     }
116     else if(use_cutoff_constraint_) {
117       m++;
118       nnz_jac_g += n;
119     }
120     else if(use_local_branching_constraint_) {
121       m++;
122       nnz_jac_g += (int)vals_.size();
123     }
124 
125     return ret_code;
126   }
127 
128   bool
get_bounds_info(Index n,Number * x_l,Number * x_u,Index m,Number * g_l,Number * g_u)129   TNLP2FPNLP::get_bounds_info(Index n, Number* x_l, Number* x_u,
130 			      Index m, Number* g_l, Number* g_u)
131   {
132     bool ret_code;
133 
134     if(use_cutoff_constraint_ && use_local_branching_constraint_) {
135       ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m-2, g_l, g_u);
136       g_l[m-2] = - COIN_DBL_MAX;
137       g_u[m-2] = cutoff_;
138       g_l[m-1] = - COIN_DBL_MAX;
139       g_u[m-1] = rhs_local_branching_constraint_;
140     }
141     else if(use_cutoff_constraint_) {
142       ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m-1, g_l, g_u);
143       g_l[m-1] = - COIN_DBL_MAX;
144       g_u[m-1] = cutoff_;
145     }
146     else if(use_local_branching_constraint_) {
147       ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m-1, g_l, g_u);
148       g_l[m-1] = - COIN_DBL_MAX;
149       g_u[m-1] = rhs_local_branching_constraint_;
150     }
151     else
152       ret_code = tnlp_->get_bounds_info(n, x_l , x_u, m, g_l, g_u);
153 
154     return ret_code;
155   }
156 
157   bool
eval_f(Index n,const Number * x,bool new_x,Number & obj_value)158   TNLP2FPNLP::eval_f(Index n, const Number* x, bool new_x,
159       Number& obj_value)
160   {
161     bool ret_code = tnlp_->eval_f(n, x, new_x, obj_value);//for new_x
162 
163     if(use_feasibility_pump_objective_) {
164       obj_value *= (1 - lambda_) * sigma_;
165       obj_value += objectiveScalingFactor_*lambda_*dist_to_point(x);
166     }
167 
168     return ret_code;
169   }
170 
171   bool
eval_grad_f(Index n,const Number * x,bool new_x,Number * grad_f)172   TNLP2FPNLP::eval_grad_f(Index n, const Number* x, bool new_x,
173       Number* grad_f)
174   {
175     bool ret_code = tnlp_->eval_grad_f(n, x, new_x, grad_f);
176 
177     if(use_feasibility_pump_objective_) {
178       for(int i = 0 ; i < n ; i++) {
179 	grad_f[i] *= (1-lambda_) * sigma_;
180       }
181       if(norm_ ==2){
182 	for(unsigned int i = 0 ; i < inds_.size() ; i++) {
183 	  grad_f[inds_[i]] += objectiveScalingFactor_*2*lambda_*( x[inds_[i]] - vals_[i] );
184 	}
185       }
186       else{
187 	for(unsigned int i = 0 ; i < inds_.size() ; i++) {
188 	  if(vals_[i] <= 0.1)
189 	    grad_f[inds_[i]] += objectiveScalingFactor_*lambda_;
190 	  else
191 	    grad_f[inds_[i]] -= objectiveScalingFactor_*lambda_;
192 	}
193       }
194     }
195 
196     return ret_code;
197   }
198 
199   bool
eval_g(Index n,const Number * x,bool new_x,Index m,Number * g)200   TNLP2FPNLP::eval_g(Index n, const Number* x, bool new_x,
201 		     Index m, Number* g)
202   {
203     bool ret_code;
204 
205     if(use_cutoff_constraint_ && use_local_branching_constraint_) {
206       ret_code = tnlp_->eval_g(n, x, new_x, m-2, g);
207       // compute the value of the cutoff constraint
208       Number obj_value;
209       if(eval_f(n, x, new_x, obj_value))
210 	g[m-2] = obj_value;
211       else
212 	ret_code = false;
213       // compute the value of the local branching constraint
214       Number g_local_branching = 0.0;
215       for(unsigned int i = 0 ; i < vals_.size() ; i++) {
216         if(vals_[i] <= 0.1)
217           g_local_branching += x[inds_[i]];
218         else
219 	  g_local_branching += (1.0 - x[inds_[i]]);
220       }
221       g[m-1] = g_local_branching;
222     }
223     else if(use_cutoff_constraint_) {
224       ret_code = tnlp_->eval_g(n, x, new_x, m-1, g);
225       Number obj_value;
226       if(eval_f(n, x, new_x, obj_value))
227 	g[m-1] = obj_value;
228       else
229 	ret_code = false;
230     }
231     else if(use_local_branching_constraint_) {
232       ret_code = tnlp_->eval_g(n, x, new_x, m-1, g);
233       Number g_local_branching = 0.0;
234       for(unsigned int i = 0 ; i < vals_.size() ; i++) {
235         if(vals_[i] <= 0.1)
236           g_local_branching += x[inds_[i]];
237         else
238 	  g_local_branching += (1.0 - x[inds_[i]]);
239       }
240       g[m-1] = g_local_branching;
241     }
242     else
243       ret_code = tnlp_->eval_g(n, x, new_x, m, g);
244 
245     return ret_code;
246   }
247 
248   bool
eval_jac_g(Index n,const Number * x,bool new_x,Index m,Index nele_jac,Index * iRow,Index * jCol,Number * values)249   TNLP2FPNLP::eval_jac_g(Index n, const Number* x, bool new_x,
250 			 Index m, Index nele_jac, Index* iRow,
251 			 Index *jCol, Number* values)
252   {
253     bool ret_code;
254 
255     if(use_cutoff_constraint_ && use_local_branching_constraint_) {
256       int n_integers = (int)vals_.size();
257       ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n - n_integers,
258 				   iRow, jCol, values);
259 
260       if (iRow && jCol && !values) { //Initialization phase
261 	int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
262 	// compute the jacobian contribution of the cutoff constraint
263 	int k = nele_jac - n - n_integers;
264 	iRow += k;
265 	jCol += k;
266 	for(int i = 0; i< n; i++) {
267 	  iRow[i] = m - 2 + index_correction;
268 	  jCol[i] = i + index_correction;
269 	}
270 	// compute the jacobian contribution of the local branching constraint
271 	k = nele_jac - n_integers;
272 	iRow += k;
273 	jCol += k;
274 	for(int i = 0; i< n_integers; i++) {
275 	  iRow[i] = m - 1 + index_correction;
276 	  jCol[i] = inds_[i] + index_correction;
277 	}
278       }
279       else if (!iRow & !jCol && values) { //computation phase
280 	// compute the jacobian contribution of the cutoff constraint
281 	Number* grad_f = new Number[n];
282 	bool ret_code_grad_f = eval_grad_f(n, x, new_x, grad_f);
283 	if(ret_code_grad_f) {
284 	  int k = nele_jac - n - n_integers;
285 	  values += k;
286 	  for(int i = 0; i< n; i++) {
287 	    values[i] = grad_f[i];
288 	  }
289 	}
290 	else
291 	  ret_code = false;
292 	delete [] grad_f;
293 	// compute the jacobian contribution of the local branching constraint
294 	int k = nele_jac - n_integers;
295 	values += k;
296 	for(int i = 0; i< n_integers; i++) {
297 	  if(vals_[i] <= 0.1)
298 	    values[i] = 1.0;
299 	  else
300 	    values[i] = -1.0;
301 	}
302       }
303       else { //error phase
304 	DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
305       }
306     }
307     else if(use_cutoff_constraint_) {
308       ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n,
309 				   iRow, jCol, values);
310 
311       if (iRow && jCol && !values) { //Initialization phase
312 	int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
313 	int k = nele_jac - n;
314 	iRow += k;
315 	jCol += k;
316 	for(int i = 0; i< n; i++) {
317 	  iRow[i] = m - 1 + index_correction;
318 	  jCol[i] = i + index_correction;
319 	}
320       }
321       else if (!iRow & !jCol && values) { //computation phase
322 	Number* grad_f = new Number[n];
323 	bool ret_code_grad_f = eval_grad_f(n, x, new_x, grad_f);
324 	if(ret_code_grad_f) {
325 	  int k = nele_jac - n;
326 	  values += k;
327 	  for(int i = 0; i< n; i++) {
328 	    values[i] = grad_f[i];
329 	  }
330 	}
331 	else
332 	  ret_code = false;
333 	delete [] grad_f;
334       }
335       else { //error phase
336 	DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
337       }
338     }
339     else if(use_local_branching_constraint_) {
340       int n_integers = (int)vals_.size();
341       ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac - n_integers,
342 				   iRow, jCol, values);
343 
344       if (iRow && jCol && !values) { //Initialization phase
345 	int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
346 	int k = nele_jac - n_integers;
347 	iRow += k;
348 	jCol += k;
349 	for(int i = 0; i< n_integers; i++) {
350 	  iRow[i] = m - 1 + index_correction;
351 	  jCol[i] = inds_[i] + index_correction;
352 	}
353       }
354       else if (!iRow & !jCol && values) { //computation phase
355 	int k = nele_jac - n_integers;
356 	values += k;
357 	for(int i = 0; i< n_integers; i++) {
358 	  if(vals_[i] <= 0.1)
359 	    values[i] = 1.0;
360 	  else
361 	    values[i] = -1.0;
362 	}
363       }
364       else { //error phase
365 	DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
366       }
367     }
368     else
369       ret_code = tnlp_->eval_jac_g(n, x, new_x, m, nele_jac,
370 				   iRow, jCol, values);
371 
372     return ret_code;
373   }
374 
375   bool
eval_h(Index n,const Number * x,bool new_x,Number obj_factor,Index m,const Number * lambda,bool new_lambda,Index nele_hess,Index * iRow,Index * jCol,Number * values)376   TNLP2FPNLP::eval_h(Index n, const Number* x, bool new_x,
377 		     Number obj_factor, Index m, const Number* lambda,
378 		     bool new_lambda, Index nele_hess,
379 		     Index* iRow, Index* jCol, Number* values)
380   {
381     bool ret_code;
382 
383     int  nnz_obj_h = (norm_ == 2) ? (int)inds_.size() : 0;
384 
385     if(use_cutoff_constraint_ && use_local_branching_constraint_) {
386       double coef_obj = (iRow != NULL)?0 : lambda[m - 2];
387       ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_ + coef_obj,
388 			       m - 2, lambda, new_lambda, nele_hess - nnz_obj_h,
389 			       iRow, jCol, values);
390     }
391     else if(use_cutoff_constraint_) {
392       double coef_obj = (iRow != NULL)?0 : lambda[m - 1];
393       ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_ + coef_obj,
394 			       m - 1, lambda, new_lambda, nele_hess - nnz_obj_h,
395 			       iRow, jCol, values);
396     }
397     else if(use_local_branching_constraint_) {
398       ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_,
399 			       m - 1, lambda, new_lambda, nele_hess - nnz_obj_h,
400 			       iRow, jCol, values);
401     }
402     else { // this is the original feasibility pump implementation
403       ret_code = tnlp_->eval_h(n, x, new_x, obj_factor*(1-lambda_)*sigma_,
404 			       m, lambda, new_lambda, nele_hess - nnz_obj_h,
405 			       iRow, jCol, values);
406     }
407 
408     //Now add extra elements corresponding to the hessian of the distance
409     if(use_feasibility_pump_objective_ && norm_ == 2){
410       if (iRow && jCol && !values) //Initialization phase
411 	{
412     int index_correction = (index_style_ == TNLP::C_STYLE) ? 0 : 1;
413 	  int k = nele_hess - nnz_obj_h;
414 	  iRow += k;
415 	  jCol += k;
416 	  for(unsigned int i = 0; i < inds_.size() ; i++)
417 	    {
418 	      iRow[i] = inds_[i] + index_correction;
419 	      jCol[i] = inds_[i] + index_correction;
420 	    }
421 	  DBG_ASSERT(k==nele_hess);
422 	}
423       else if (!iRow & !jCol && values) //computation phase
424 	{
425 	  int k = nele_hess - nnz_obj_h;
426 	  values += k;
427 	  for(unsigned int i = 0; i < inds_.size() ; i++)
428 	    {
429 	      values[i] = 2* objectiveScalingFactor_* lambda_* obj_factor;
430 	    }
431 	  DBG_ASSERT(k==nele_hess);
432 	}
433       else //error phase
434 	{
435 	  DBG_ASSERT(false && "Invalid combination of iRow, jCol, and values pointers");
436 	}
437     }
438 
439     return ret_code;
440   }
441 
442   void
finalize_solution(SolverReturn status,Index n,const Number * x,const Number * z_L,const Number * z_U,Index m,const Number * g,const Number * lambda,Number obj_value,const IpoptData * ip_data,IpoptCalculatedQuantities * ip_cq)443   TNLP2FPNLP::finalize_solution(SolverReturn status,
444       Index n, const Number* x, const Number* z_L, const Number* z_U,
445       Index m, const Number* g, const Number* lambda,
446       Number obj_value,
447       const IpoptData* ip_data,
448       IpoptCalculatedQuantities* ip_cq)
449   {
450       int m2 = m;
451       if(use_cutoff_constraint_) {
452         m2--;
453       }
454       if(use_local_branching_constraint_) {
455         m2--;
456       }
457     tnlp_->finalize_solution(status,n, x, z_L, z_U,m2, g, lambda, obj_value,
458 			     ip_data, ip_cq);
459   }
460 }
461 
462