1 //------------------------------------------------------------------------
2 // P.J. Williams
3 // Sandia National Laboratories
4 // pwillia@sandia.gov
5 // Last Modified December 2000
6 //------------------------------------------------------------------------
7 
8 #ifdef HAVE_CONFIG_H
9 #include "OPT++_config.h"
10 #endif
11 
12 #include <typeinfo>
13 #ifdef HAVE_STD
14 #include <cstring>
15 #include <ctime>
16 #else
17 #include <string.h>
18 #include <time.h>
19 #endif
20 
21 #include "OptFDNIPS.h"
22 #include "cblas.h"
23 #include "ioformat.h"
24 #include <float.h>
25 
26 using Teuchos::SerialDenseVector;
27 using Teuchos::SerialDenseMatrix;
28 using Teuchos::SerialSymDenseMatrix;
29 
30 
31 namespace OPTPP {
32 
checkDeriv()33 int OptFDNIPS::checkDeriv() // check the analytic gradient with FD gradient
34 
35 {return GOOD;}
36 
updateH(SerialSymDenseMatrix<int,double> & Hk,int k)37 SerialSymDenseMatrix<int,double> OptFDNIPS::updateH(SerialSymDenseMatrix<int,double>&Hk, int k)
38 {
39   NLP1* nlp1 = nlprob();
40   int ndim  = nlp1->getDim();
41   SerialDenseVector<int,double> xc, xtmp, gradtmp, yzmultiplier,grad;
42   SerialDenseMatrix<int,double> Htmp(ndim,ndim);
43   double hi, hieps;
44 
45   double mcheps = DBL_EPSILON;
46   SerialDenseVector<int,double> fcn_accrcy(nlp1->getFcnAccrcy().length());
47   fcn_accrcy = nlp1->getFcnAccrcy();
48 
49   Htmp   = 0;
50   xc.resize(nlp1->getXc().length());
51   xc     = nlp1->getXc();
52   // yzmultiplier = y & z;
53   int alpha = y.length();
54   int beta = z.length();
55   yzmultiplier.resize(alpha+beta);
56 
57   for(int i=0;i<alpha+beta;i++)
58     {if(i<alpha)
59 	{yzmultiplier(i) = y(i);}
60       else{yzmultiplier(i) = z(i-alpha);}
61     }
62 
63   // Get the gradient of the Lagrangian
64   grad.resize(getGradL().length());
65   grad  = getGradL();
66 
67   // Build approximation column by column
68   for(int j = 0; j < ndim ; j++){
69 
70      hieps = sqrt(max(mcheps,fcn_accrcy(j)));
71      hi     = hieps*max(fabs(xc(j)), sx(j));
72      hi     = copysign(hi, xc(j));
73      xtmp.resize(xc.length());
74      xtmp   = xc;
75      xtmp(j)= xc(j) + hi;
76 
77      // Evaluate the gradient of the Lagrangian at the new point
78      gradtmp.resize(grad.length());
79      gradtmp  = nlp1->evalLagrangianGradient(xtmp,yzmultiplier,constrType);
80 
81      /* Calculate jth column of Hessian using a first-order
82         finite-difference approximation  */
83      //Htmp.Column(j) << (gradtmp - grad)/hi;
84      for(int i=0;i<ndim;i++) {
85        Htmp(i,j) =(gradtmp(i)- grad(i))/hi;
86      }
87   }
88 
89   // Symmetrize the Hessian Approximation
90   for(int i=0;i<ndim;i++)
91     for(int j=0;j<=i;j++)
92       {		Hk(i,j) =(Htmp(i,j) + Htmp(j,i))/2.0;
93       }
94   // Hk   << (Htmp + Htmp.t())/2.0;
95   // Htmp.Release();
96   hessl = Hk;
97   return Hk;
98 }
99 
100 } // namespace OPTPP
101