1 // This is core/vnl/algo/vnl_lbfgs.h
2 #ifndef vnl_lbfgs_h_
3 #define vnl_lbfgs_h_
4 //:
5 // \file
6 // \brief Limited memory Broyden Fletcher Goldfarb Shannon minimization
7 // \author Andrew W. Fitzgibbon, Oxford RRG
8 // \date   22 Aug 99
9 //
10 // \verbatim
11 // Modifications
12 //  990822 AWF Initial version.
13 //  dac (Manchester) 28/03/2001: tidied up documentation
14 //  scottim 4/02/2002: Added a better description
15 // \endverbatim
16 //
17 
18 #include <vnl/vnl_cost_function.h>
19 #include <vnl/vnl_nonlinear_minimizer.h>
20 #include <vnl/algo/vnl_algo_export.h>
21 
22 //: Limited memory Broyden Fletcher Goldfarb Shannon minimization
23 // Considered to be the best optimisation algorithm for functions
24 // which are well behaved (i.e. locally smooth
25 // without too many local minima,) have 1st derivatives available,
26 // and do not have a structure that makes them suitable for alternative
27 // methods (e.g. if f(x) is a sum of squared terms you should use
28 // vnl_levenberg_marquardt.)
29 //
30 // This limited-memory rank-2 quasi-newton method
31 // maintains an estimate of (the inverse of) the Hessian matrix of f at x.
32 // Unlike Newton's method, it doesn't need 2nd derivatives of f(x),
33 // has superlinear rather than quadratic convergence and is
34 // better behaved away from minima. 2 ranks of this matrix are updated at each
35 // step. In order to reduce memory and time requirements, this limited memory
36 // version of BFGS only maintains a certain number of vector corrections
37 // to a diagonal estimate of the inverse Hessian estimate.
38 
39 class VNL_ALGO_EXPORT vnl_lbfgs : public vnl_nonlinear_minimizer
40 {
41  public:
42   vnl_lbfgs();
43   vnl_lbfgs(vnl_cost_function& f);
44 
45   bool minimize(vnl_vector<double>& x);
46 
47   //: Step accuracy/speed tradeoff.
48   // Effectively the number of correction vectors to the diagonal approximation
49   // of the inverse Hessian estimate that are kept.
50   //
51   // Large values of M will result in excessive computing time.
52   // 3<= memory <=7 is recommended.
53   // Memory requirements will be roughly Const+sizeof(element)*dim(X)*memory.
54   // Default is 5.
55   int memory;
56 
57   //: Accuracy of line search.
58   // If function evaluations are cheap wrt the actual minimization steps,
59   // change to 0.1, from default of 0.9;
60   double line_search_accuracy;
61 
62   //: Default step length in line search.
63   // If, on tracing, the STP is always 1, then you could try setting this to a
64   // higher value to see how far along the gradient the minimum typically is.
65   // Then set this to a number just below that to get maximally far with the
66   // single evaluation.
67   double default_step_length;
68 
69  private:
70   void init_parameters();
71   vnl_cost_function* f_;
72   //  vnl_lbfgs() {} // default constructor makes no sense
73   // does too.  Can set values for parameters.
74 };
75 
76 #endif // vnl_lbfgs_h_
77