1 // (C) Copyright International Business Machines Corporation 2007
2 // All Rights Reserved.
3 //
4 // Authors :
5 // Pierre Bonami, International Business Machines Corporation
6 //
7 // Date : 08/16/2007
8 
9 
10 #ifndef TMINLPLinObj_H
11 #define TMINLPLinObj_H
12 
13 #include "BonTMINLP.hpp"
14 
15 namespace Bonmin {
16 /** From a TMINLP, this class adapts to another TMINLP where the original objective is transformed into a constraint
17     by adding an extra variable which is minimized.
18 
19     More precisely
20     \f[
21     \begin{array}{l}
22     \min f(x)\\
23     s.t\\
24     g_l \leq g(x) \leq g_u\\
25     x_l \leq x \leq u
26     \end{array}
27     \f]
28     is transformed ino
29     \begin{array}{l}
30     \min \eta\\
31     s.t\\
32     -\infty \leq f(x) - \eta \leq 0\\
33     g_l \leq g(x) \leq g_u\\
34     x_l \leq x \leq u
35     \end{array}
36     \f]
37     The objective is put as first constraint of the problem and the extra variable is the last one.
38  .*/
39 class TMINLPLinObj: public Bonmin::TMINLP {
40   public:
41    /** Default constructor*/
42    TMINLPLinObj();
43 
44   /** destructor.*/
45   virtual ~TMINLPLinObj();
46 
47   /** set reference TMINLP */
48   void setTminlp(Ipopt::SmartPtr<TMINLP> tminlp);
49 
50     /**@name methods to gather information about the MINLP */
51     //@{
52     /** Return the number of variables
53      *  and constraints, and the number of non-zeros in the jacobian and
54      *  the hessian. Call tminlp_ one  but number of constraints and non-zeroes in the jacobian is stored internally.*/
55         virtual bool get_nlp_info(Ipopt::Index& n, Ipopt::Index& m, Ipopt::Index& nnz_jac_g,
56                                   Ipopt::Index& nnz_h_lag, Ipopt::TNLP::IndexStyleEnum& index_style);
57     /** Return scaling parameters. If tminlp_ method returns true, translate
58       * constraint scaling (if asked).
59      */
60     virtual bool get_scaling_parameters(Ipopt::Number& obj_scaling,
61                                         bool& use_x_scaling, Ipopt::Index n,
62                                         Ipopt::Number* x_scaling,
63                                         bool& use_g_scaling, Ipopt::Index m,
64                                         Ipopt::Number* g_scaling);
65 
66 
67     /** Get the variable type. Just call tminlp_'s method;. */
get_variables_types(Ipopt::Index n,VariableType * var_types)68     virtual bool get_variables_types(Ipopt::Index n, VariableType* var_types){
69       assert(IsValid(tminlp_));
70       assert(n == n_);
71       var_types[n-1] = TMINLP::CONTINUOUS;
72       return tminlp_->get_variables_types(n - 1, var_types);
73     }
74 
75     /** Return the constraints linearity. Call tminlp_'s method and translate.
76       */
77     virtual bool get_constraints_linearity(Ipopt::Index m,
78 					   Ipopt::TNLP::LinearityType* const_types);
79 
80     /** Return the information about the bound
81      *  on the variables and constraints. Call tminlp_'s method and translate
82      *  constraints bounds.*/
83     virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number* x_l, Ipopt::Number* x_u,
84         Ipopt::Index m, Ipopt::Number* g_l, Ipopt::Number* g_u);
85 
86     /** Return the starting point.
87         Have to translate z_L and z_U.
88      */
89     virtual bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number* x,
90                                     bool init_z, Ipopt::Number* z_L, Ipopt::Number* z_U,
91         Ipopt::Index m, bool init_lambda,
92         Ipopt::Number* lambda);
93 
94     /** Return the value of the objective function.
95       * Just call tminlp_ method. */
eval_f(Ipopt::Index n,const Ipopt::Number * x,bool new_x,Ipopt::Number & obj_value)96     virtual bool eval_f(Ipopt::Index n, const Ipopt::Number* x, bool new_x,
97         Ipopt::Number& obj_value){
98         assert(n == n_);
99         obj_value = x[n-1];
100        return true;}
101 
102     /** Return the vector of the gradient of
103      *  the objective w.r.t. x. Just call tminlp_ method. */
eval_grad_f(Ipopt::Index n,const Ipopt::Number * x,bool new_x,Ipopt::Number * grad_f)104     virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number* x, bool new_x,
105         Ipopt::Number* grad_f){
106        assert(IsValid(tminlp_));
107        assert(n == n_);
108        n--;
109        for(int  i = 0 ; i < n ; i++){
110         grad_f[i] = 0;}
111        grad_f[n] = 1;
112        return true;}
113 
114     /** Return the vector of constraint values.
115       * Use tminlp_ functions and use mapping to get the needed values. */
116     virtual bool eval_g(Ipopt::Index n, const Ipopt::Number* x, bool new_x,
117         Ipopt::Index m, Ipopt::Number* g);
118 
119     /** Return the jacobian of the constraints.
120       * In first call nothing to change. In later just fix the values for the simple concaves
121       * and remove entries corresponding to nonConvex constraints. */
122     virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number* x, bool new_x,
123         Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index* iRow,
124         Ipopt::Index *jCol, Ipopt::Number* values);
125 
126     /** \brief Return the hessian of the lagrangian.
127       * Here we just put lambda in the correct format and call
128       * tminlp_'s function.*/
129     virtual bool eval_h(Ipopt::Index n, const Ipopt::Number* x, bool new_x,
130         Ipopt::Number obj_factor, Ipopt::Index m, const Ipopt::Number* lambda,
131         bool new_lambda, Ipopt::Index nele_hess,
132         Ipopt::Index* iRow, Ipopt::Index* jCol, Ipopt::Number* values);
133     /** Compute the value of a single constraint. The constraint
134      *  number is i (starting counting from 0. */
135     virtual bool eval_gi(Ipopt::Index n, const Ipopt::Number* x, bool new_x,
136 			 Ipopt::Index i, Ipopt::Number& gi);
137     /** Compute the structure or values of the gradient for one
138      *  constraint. The constraint * number is i (starting counting
139      *  from 0.  Other things are like with eval_jac_g. */
140     virtual bool eval_grad_gi(Ipopt::Index n, const Ipopt::Number* x, bool new_x,
141 			      Ipopt::Index i, Ipopt::Index& nele_grad_gi, Ipopt::Index* jCol,
142 			      Ipopt::Number* values);
143     //@}
144 
get_variables_linearity(Ipopt::Index n,Ipopt::TNLP::LinearityType * c)145     virtual bool get_variables_linearity(Ipopt::Index n, Ipopt::TNLP::LinearityType* c){
146       assert(IsValid(tminlp_));
147       assert(n == n_);
148       bool r_val = tminlp_->get_variables_linearity(n-1, c);
149       c[n - 1] = Ipopt::TNLP::LINEAR;
150       return r_val;
151     }
152 
153 
154     /** @name Solution Methods */
155     //@{
156      /**  Use tminlp_ function.*/
finalize_solution(TMINLP::SolverReturn status,Ipopt::Index n,const Ipopt::Number * x,Ipopt::Number obj_value)157     virtual void finalize_solution(TMINLP::SolverReturn status,
158                                    Ipopt::Index n, const Ipopt::Number* x, Ipopt::Number obj_value){
159        return tminlp_->finalize_solution(status, n - 1, x,
160                                   obj_value);
161     }
162     //@}
163 
164      /**  Use tminlp_ function.*/
branchingInfo() const165     virtual const BranchingInfo * branchingInfo() const{
166       return tminlp_->branchingInfo();
167     }
168 
169      /**  Use tminlp_ function.
170           \bug Has to translate sos information.*/
sosConstraints() const171     virtual const SosInfo * sosConstraints() const{
172       return tminlp_->sosConstraints();
173     }
174      /**  Use tminlp_ function.*/
perturbInfo() const175     virtual const PerturbInfo* perturbInfo() const
176     {
177       return tminlp_->perturbInfo();
178     }
179 
180     /**  Use tminlp_ function.*/
hasUpperBoundingObjective()181     virtual bool hasUpperBoundingObjective(){
182       assert(IsValid(tminlp_));
183       return tminlp_->hasUpperBoundingObjective();}
184 
185     /** Use tminlp_ function.*/
eval_upper_bound_f(Ipopt::Index n,const Ipopt::Number * x,Ipopt::Number & obj_value)186     virtual bool eval_upper_bound_f(Ipopt::Index n, const Ipopt::Number* x,
187                                     Ipopt::Number& obj_value){
188        assert(IsValid(tminlp_));
189        return tminlp_->eval_upper_bound_f(n - 1, x, obj_value); }
190 
191   /** Say if problem has a linear objective (for OA) */
hasLinearObjective()192   virtual bool hasLinearObjective(){return true;}
193   /** return pointer to tminlp_.*/
tminlp()194   Ipopt::SmartPtr<TMINLP> tminlp(){return tminlp_;}
195   private:
196   /** Reset all data.*/
197    void gutsOfDestructor();
198 
199   /** Reference TMINLP which is to be relaxed.*/
200   Ipopt::SmartPtr<TMINLP> tminlp_;
201   /** Ipopt::Number of constraints in the transformed MINLP.*/
202   int m_;
203   /** Ipopt::Number of variables in the transformed MINLP.*/
204   int n_;
205   /** number of non-zeroes in the jacobian of the transformed MINLP.*/
206   int nnz_jac_;
207   /** offset for jacobian.*/
208   int offset_;
209 
210 };
211 
212 
213 }/* Ends Bonmin namepsace.*/
214 
215 #endif
216 
217