1 #ifndef OptConstrNewtonLike_h
2 #define OptConstrNewtonLike_h
3 
4 /*--------------------------------------------------------------------
5   Copyright (c) 2001, Sandia Corporation.
6   J.C. Meza, Sandia National Laboratories, meza@ca.sandia.gov
7  -----------------------------------------------------------------------*/
8 
9 #include "Opt.h"
10 
11 namespace OPTPP {
12 
13 /**
14  * Constrained Newton abstract data classes
15  * OptConstrNewtonLike
16  * OptConstrNewton1Deriv
17  * OptConstrNewton2Deriv
18  *
19  * OptConstrNewtonLike provides common data and functionality for
20  * OptConstrFDNewton, OptConstrQNewton, OptConstrNewton, OptFDNIPS,
21  * OptQNIPS, and OptNIPS methods.
22  *
23  * @author J.C. Meza, Lawrence Berkeley National Laboratory
24  * @note Modified by P.J. Williams
25  * @date 03/27/2007
26  */
27 
28 
29 class OptConstrNewtonLike: public OptimizeClass {
30 protected:
31   virtual NLP1* nlprob() const = 0; ///< returns an NLP1 pointer
32   int me;			///< Number of equality constraints
33   int mi;			///< Number of inequality constraints
34   int grad_evals;		///< Number of gradient evaluations
35   Teuchos::SerialDenseVector<int,double> gprev;		///< Gradient at previous iteration
36   Teuchos::SerialDenseVector<int,double> z;		///< Lagrange mult. corr. to ineq constraints
37   Teuchos::SerialDenseVector<int,double> y;		///< Lagrange mult. corr. to eq constraints
38   Teuchos::SerialDenseVector<int,double> s;		///< Slack variables corr. to ineq. constraints
39   Teuchos::SerialDenseVector<int,double> constrType;	///< Vector which contains the type of constraint
40   Teuchos::SerialDenseVector<int,double> constraintResidual; ///< Constraint residual at xc
41   Teuchos::SerialDenseVector<int,double> gradl;		///< Current gradient of the lagrangian
42   Teuchos::SerialDenseVector<int,double> gradlprev;	///< Previous gradient of the lagrangian
43   Teuchos::SerialDenseMatrix<int,double> constraintGradient;	///< Current constraint gradient
44   Teuchos::SerialDenseMatrix<int,double> constraintGradientPrev; ///< Previous constraint gradient
45   Teuchos::SerialSymDenseMatrix<int,double> Hessian;	///< Current Hessian
46   Teuchos::SerialSymDenseMatrix<int,double> hessl;	///< Current Hessian of the lagrangian
47   SearchStrategy strategy;	///< User-specified globalization strategy
48   DerivOption finitediff;	///< User-specified derivative option
49   MeritFcn    mfcn;		///< Merit function option
50   double TR_size;			///< Size of the trust region radius
51   double gradMult;		///< Gradient multiplier to compute TR_size
52   int searchSize;               ///< Search pattern size for TRPDS
53   double cost;			///< Value of the merit function
54   void defaultAcceptStep(int, int);
55   Teuchos::SerialDenseVector<int,double> defaultComputeSearch(Teuchos::SerialSymDenseMatrix<int,double>& );
56   bool WarmStart;
57   bool feas_flag;		///< Switch to turn on the feasibility recovery method
58   int max_feas_iter;            ///< Maximize number of iterations to achieve feasibility
59 
60 public:
61  /**
62   * Default Constructor
63   * @see OptConstrNewtonLike(int n)
64   * @see OptConstrNewtonLike(int n, UPDATEFCN u)
65   * @see OptConstrNewtonLike(int n, TOLS t)
66   */
67 
OptConstrNewtonLike()68   OptConstrNewtonLike() {}
69  /**
70   * @param n an integer argument
71   * @see OptConstrNewtonLike(int n, UPDATEFCN u)
72   * @see OptConstrNewtonLike(int n, TOLS t)
73   */
OptConstrNewtonLike(int n)74   OptConstrNewtonLike(int n):
75     OptimizeClass(n), me(0), mi(0), grad_evals(0),   gprev(n),
76     z(n), y(n), s(n), constrType(n), constraintResidual(n),
77     gradl(n), gradlprev(n),constraintGradient(n,n), constraintGradientPrev(n,n),
78     Hessian(n), hessl(n), strategy(TrustRegion), finitediff(ForwardDiff),
79     mfcn(ArgaezTapia), TR_size(0.0),
80     gradMult(0.1), searchSize(64), cost(0.0), WarmStart(false),
81     feas_flag(false), max_feas_iter(3)
82     {z = 0; y = 0; s = 0;}
83  /**
84   * @param n an integer argument
85   * @param u a function pointer.
86   * @see OptConstrNewtonLike(int n)
87   * @see OptConstrNewtonLike(int n, TOLS t)
88   */
OptConstrNewtonLike(int n,UPDATEFCN u)89   OptConstrNewtonLike(int n, UPDATEFCN u):
90     OptimizeClass(n), me(0), mi(0), grad_evals(0),   gprev(n),
91     z(n), y(n), s(n), constrType(n), constraintResidual(n),
92     gradl(n), gradlprev(n),constraintGradient(n,n), constraintGradientPrev(n,n),
93     Hessian(n), hessl(n), strategy(TrustRegion), finitediff(ForwardDiff),
94     mfcn(ArgaezTapia), TR_size(0.0),
95     gradMult(0.1), searchSize(64), cost(0.0), WarmStart(false),
96     feas_flag(false), max_feas_iter(3)
97     {update_fcn = u; z = 0; y = 0; s = 0;}
98  /**
99   * @param n an integer argument
100   * @param t tolerance class reference.
101   * @see OptConstrNewtonLike(int n)
102   * @see OptConstrNewtonLike(int n, UPDATEFCN u)
103   */
OptConstrNewtonLike(int n,TOLS t)104   OptConstrNewtonLike(int n, TOLS t):
105     OptimizeClass(n,t), me(0), mi(0), grad_evals(0),   gprev(n),
106     z(n), y(n), s(n), constrType(n), constraintResidual(n),
107     gradl(n), gradlprev(n),constraintGradient(n,n), constraintGradientPrev(n,n),
108     Hessian(n), hessl(n), strategy(TrustRegion), finitediff(ForwardDiff),
109     mfcn(ArgaezTapia), TR_size(0.0),
110     gradMult(0.1), searchSize(64), cost(0.0), WarmStart(false),
111     feas_flag(false), max_feas_iter(3)
112     {z = 0; y = 0; s = 0;}
113 
114  /**
115   * Destructor
116   */
~OptConstrNewtonLike()117   virtual ~OptConstrNewtonLike(){}
118 
119  //---------------------------------------
120  // Virtual functions
121  //---------------------------------------
122 
123 // These have default values
124   /// Accept this step and update the nonlinear model
acceptStep(int k,int step_type)125   virtual void acceptStep(int k, int step_type)
126     {defaultAcceptStep(k, step_type);}
127 
128   /// Solve for the Newton direction
computeSearch(Teuchos::SerialSymDenseMatrix<int,double> & H)129   virtual Teuchos::SerialDenseVector<int,double> computeSearch(Teuchos::SerialSymDenseMatrix<int,double>& H )
130     {return defaultComputeSearch(H);}
131 
updateModel(int k,int ndim,Teuchos::SerialDenseVector<int,double> x)132   virtual void updateModel(int k, int ndim, Teuchos::SerialDenseVector<int,double> x)
133     {OptimizeClass::defaultUpdateModel(k, ndim, x);}
134 
135   virtual void reset();
136 
137  // These have to be defined by other classes
138 
139   /// Check to see if algorithm satisfies the convergence criterion
140   virtual int  checkConvg();
141 
142   /// Compare the analytic gradient with the finite-difference approximation
143   virtual int  checkDeriv();
144 
145   /// Compute the steplength along the search direction.
146   /// If an acceptable step not found, returns an error code = -1.
147   virtual int  computeStep(Teuchos::SerialDenseVector<int,double> sk);
148 
149   virtual double computeMaxStep(Teuchos::SerialDenseVector<int,double> sk);
150 
151   /// Initialize algorithmic parameters
152   virtual void initOpt();
153 
154   /// Compute the Hessian or its approximation at the initial point
155   virtual void initHessian();
156 
157   /// Initialize the radius of the trust-region
158   virtual double initTrustRegionSize() const;
159 
160   /// Invoke a constrained Newton's method
161   virtual void optimize();
162 
163   /// Read user-specified input options from a file
164   virtual void readOptInput();
165 
166 // These are used by all derived classes
167 
168   /// Check finite difference approximation with analytic derivatives
169   int checkAnalyticFDGrad();
170 
171   /**
172    * @return The number of function evaluations
173    */
getFevals()174   int getFevals() const {return fcn_evals;}
175 
176   /**
177    * @return The number of gradient evaluations
178    */
getGevals()179   int getGevals() const {return grad_evals;}
180 
181   /**
182    * @return Trust region radius
183    */
getTRSize()184   double getTRSize() const {return TR_size;}
185   /// Set radius of trust-region
setTRSize(double delta)186   void setTRSize(double delta) {TR_size = delta;}
187 
188   /**
189    * @return Gradient multiplier to compute radius of trust-region
190    */
getGradMult()191   double getGradMult() const {return gradMult;}
192   /// Set gradient multiplier which is used to compute radius of trust-region
setGradMult(double tau)193   void setGradMult(double tau) {gradMult = tau;}
194 
195   /**
196    * @return Number of points in search scheme
197    * (only relevant when trustpds search strategy is selected)
198    */
getSearchSize()199   int getSearchSize() const {return searchSize;}
200   /// Set number of points in search scheme for trustpds strategy
setSearchSize(int sss)201   void setSearchSize(int sss) {searchSize = sss;}
202 
getWarmStart()203   bool getWarmStart() const {return WarmStart;}
UseWarmStart(Teuchos::SerialSymDenseMatrix<int,double> & H)204   void UseWarmStart(Teuchos::SerialSymDenseMatrix<int,double>& H) {Hessian = H; WarmStart = true;}
205 
206   /**
207    * @return Globalization strategy for optimization algorithm
208    */
setSearchStrategy(SearchStrategy search)209   void setSearchStrategy(SearchStrategy search) {strategy = search;}
210   /// Set globalization strategy for optimization algorithm
getSearchStrategy()211   SearchStrategy getSearchStrategy() const {return strategy;}
212 
213   /**
214    * @return Type of finite difference approximation
215    */
setDerivOption(DerivOption d)216   void setDerivOption(DerivOption d) {finitediff = d;}
217   /// Set type of finite difference approximation (forward, backward, or central)
getDerivOption()218   DerivOption getDerivOption() const {return finitediff;}
219 
220   /**
221    * @return Lagrangian multipliers corresponding to equality constraints
222    */
getEqualityMultiplier()223   Teuchos::SerialDenseVector<int,double> getEqualityMultiplier() const { return y;}
224   /// Store the current Lagrangian multipliers corresponding to equality constraints
setEqualityMultiplier(const Teuchos::SerialDenseVector<int,double> & ymult)225   void setEqualityMultiplier(const Teuchos::SerialDenseVector<int,double>& ymult) { y  = ymult;}
226 
227   /**
228    * @return Lagrangian multipliers corresponding to inequality constraints
229    */
getInequalityMultiplier()230   Teuchos::SerialDenseVector<int,double> getInequalityMultiplier() const { return z;}
231   /// Store the current Lagrangian multipliers corresponding to inequality constraints
setInequalityMultiplier(const Teuchos::SerialDenseVector<int,double> & zmult)232   void setInequalityMultiplier(const Teuchos::SerialDenseVector<int,double>& zmult){ z = zmult;}
233 
234   /**
235    * @return Slack variables associated with inequality constraints
236    */
getSlacks()237   Teuchos::SerialDenseVector<int,double> getSlacks() const { return s;}
238   /// Store the current slack vector
setSlacks(const Teuchos::SerialDenseVector<int,double> & slackVar)239   void setSlacks(const Teuchos::SerialDenseVector<int,double>& slackVar) { s = slackVar;}
240 
241   /**
242    * @return Merit function
243    */
getMeritFcn()244   MeritFcn getMeritFcn() const  { return mfcn;}
245   /// Specify the merit function to used in step acceptance test
setMeritFcn(MeritFcn option)246   virtual void setMeritFcn(MeritFcn option) { mfcn = option;}
247 
248   /**
249    * @return Gradient of the Lagrangian at the current iteration
250    */
getGradL()251   Teuchos::SerialDenseVector<int,double> getGradL() const  { return gradl;}
252   /// Store the gradient of the Lagrangian at the current iteration
setGradL(Teuchos::SerialDenseVector<int,double> gradl_value)253   virtual void setGradL(Teuchos::SerialDenseVector<int,double> gradl_value) { gradl = gradl_value;
254 }
255 
256   /**
257    * @return Gradient of the Lagrangian at the previous iteration
258    */
getGradLPrev()259   Teuchos::SerialDenseVector<int,double> getGradLPrev() const  { return gradlprev;}
260   /// Store the gradient of the Lagrangian at the previous iteration
setGradLPrev(Teuchos::SerialDenseVector<int,double> gradl_value)261   virtual void setGradLPrev(Teuchos::SerialDenseVector<int,double> gradl_value) { gradlprev = gradl_value;}
262 
263   /**
264    * @return Residuals of the constraints at the current iteration
265    */
getConstraintResidual()266   Teuchos::SerialDenseVector<int,double> getConstraintResidual() const  { return constraintResidual;}
267   /// Store the residuals of the constraints at the current iteration
setConstraintResidual(const Teuchos::SerialDenseVector<int,double> & constraint_value)268   virtual void setConstraintResidual(const Teuchos::SerialDenseVector<int,double>& constraint_value)
269                        { constraintResidual = constraint_value;}
270 
271   /**
272    * @return Gradient of the constraints at the current iteration
273    */
getConstraintGradient()274   Teuchos::SerialDenseMatrix<int,double> getConstraintGradient() const  { return constraintGradient;}
275   /// Store the current gradients of the constraints
setConstraintGradient(const Teuchos::SerialDenseMatrix<int,double> & constraint_grad)276   virtual void setConstraintGradient(const Teuchos::SerialDenseMatrix<int,double>& constraint_grad)
277                        { constraintGradient = constraint_grad;}
278 
279   /**
280    * @return Value of merit function
281    */
getCost()282   double getCost() const  { return cost;}
283   /// Store current value of merit function
setCost(double value)284   void setCost(double value) { cost = value;}
285 
286   /**
287    * @return Switch to turn on feasibility recovery method
288    */
getFeasibilityRecoveryFlag()289   bool getFeasibilityRecoveryFlag() const {return feas_flag;}
290   /// Set switch to turn on feasibility recovery method
setFeasibilityRecoveryFlag(bool flag)291   void setFeasibilityRecoveryFlag(bool flag) {feas_flag = flag;}
292 
293   /**
294    * @return Maximum number of iterations for feasibility recovery method
295    */
getMaxFeasIter()296   int getMaxFeasIter() const {return max_feas_iter;}
297   /// Set maximum number of iterations for feasibility recovery method of trust-region
setMaxFeasIter(int k)298   void setMaxFeasIter(int k) {max_feas_iter = k;}
299 
300   /**
301    * @return Hessian matrix
302    */
getHessian()303   Teuchos::SerialSymDenseMatrix<int,double> getHessian() const {return Hessian;}
304   /// Store current Hessian matrix
setHessian(Teuchos::SerialSymDenseMatrix<int,double> & H)305   void setHessian(Teuchos::SerialSymDenseMatrix<int,double>& H) {Hessian = H;}
306 
307   /// Compute the Hessian of the Lagrangrian or its approximation at iteration k
308   virtual Teuchos::SerialSymDenseMatrix<int,double> updateH(Teuchos::SerialSymDenseMatrix<int,double>& H, int k) = 0;
309 
310   /// Compute the active set according to Facchinei, Fischer, and Kanzow indicator
311   Teuchos::SerialDenseVector<int,double> computeFFK1Ind(const Teuchos::SerialDenseVector<int,double>& xc);
312   /// Compute the active set according to Facchinei, Fischer, and Kanzow indicator
313   Teuchos::SerialDenseVector<int,double> computeFFK2Ind(const Teuchos::SerialDenseVector<int,double>& xc);
314   /// Compute the Tapia indicators for the constrained problem
315   Teuchos::SerialDenseVector<int,double> computeTapiaInd(const Teuchos::SerialDenseVector<int,double>& step);
316 
317   /// Print status of the constrained Newton's method
318   void printStatus(char *);
319   /// Output the Lagrangian multipliers to the screen
320   void printMultipliers(char *);
321   /// Print the Lagrangian multipliers to a file
322   void fPrintMultipliers(std::ostream *nlpout, char *);
323   /// Print second order sufficiency information to a file
324   void fPrintSecSuff(std::ostream *nlpout, Teuchos::SerialDenseVector<int,double>& info);
325 
326   friend int trustregion(NLP1*, std::ostream*, Teuchos::SerialSymDenseMatrix<int,double>&,
327 			 Teuchos::SerialDenseVector<int,double>&, Teuchos::SerialDenseVector<int,double>&,
328 			 double&, double&, double stpmax, double stpmin);
329 
330   friend int trustpds(NLP1*, std::ostream*, Teuchos::SerialSymDenseMatrix<int,double>&,
331 		      Teuchos::SerialDenseVector<int,double>&, Teuchos::SerialDenseVector<int,double>&,
332 		      double&, double&, double stpmax, double stpmin, int);
333 };
334 
335 /**
336  * Constrained Newton classes that will accept either an NLP1 or NLP2
337  */
338 
339 class OptConstrNewton1Deriv: public OptConstrNewtonLike {
340 public:
341 
OptConstrNewton1Deriv()342   OptConstrNewton1Deriv() {}
343 
OptConstrNewton1Deriv(NLP1 * p)344   OptConstrNewton1Deriv(NLP1* p):
345     OptConstrNewtonLike(p->getDim()), mem_nlp(p) {;}
346 
OptConstrNewton1Deriv(NLP1 * p,UPDATEFCN u)347   OptConstrNewton1Deriv(NLP1* p, UPDATEFCN u):
348     OptConstrNewtonLike(p->getDim(),u), mem_nlp(p) {;}
349 
OptConstrNewton1Deriv(NLP1 * p,TOLS t)350   OptConstrNewton1Deriv(NLP1* p, TOLS t):
351     OptConstrNewtonLike(p->getDim(),t), mem_nlp(p) {;}
352 
~OptConstrNewton1Deriv()353   virtual ~OptConstrNewton1Deriv(){}
354 
355 private:
356   NLP1* mem_nlp;
357 
358 protected:
359   /**
360    * @ returns a pointer to an NLP1
361    */
nlprob()362   NLP1* nlprob() const { return mem_nlp; }
363 };
364 
365 /**
366  * Constrained Newton classes that require an NLP2
367  */
368 
369 class OptConstrNewton2Deriv: public OptConstrNewtonLike {
370 public:
371 
OptConstrNewton2Deriv()372   OptConstrNewton2Deriv() {}
373 
OptConstrNewton2Deriv(NLP2 * p)374   OptConstrNewton2Deriv(NLP2* p):
375     OptConstrNewtonLike(p->getDim()), mem_nlp(p){;}
376 
OptConstrNewton2Deriv(NLP2 * p,UPDATEFCN u)377   OptConstrNewton2Deriv(NLP2* p, UPDATEFCN u):
378     OptConstrNewtonLike(p->getDim(),u), mem_nlp(p){;}
379 
OptConstrNewton2Deriv(NLP2 * p,TOLS t)380   OptConstrNewton2Deriv(NLP2* p, TOLS t):
381     OptConstrNewtonLike(p->getDim(),t), mem_nlp(p){;}
382 
~OptConstrNewton2Deriv()383   virtual ~OptConstrNewton2Deriv(){}
384 
385 private:
386   NLP2* mem_nlp;
387 
388 protected:
389   /**
390    * @ returns a pointer to an NLP1
391    */
nlprob()392   NLP1* nlprob() const { return mem_nlp;}
393   /**
394    * @ returns a pointer to an NLP2
395    */
nlprob2()396   NLP2* nlprob2() const { return mem_nlp;}
397 };
398 
399 } // namespace OPTPP
400 
401 #endif
402