1#!/usr/bin/env python
2# Author : Pierre Schnizer
3"""
4Wrapper over the functions as described in Chapter 33 of the
5reference manual.
6
7Routines for approximating a data set with a non linear function
8
9"""
10import pygsl
11from . import _callback
12from .gsl_function import gsl_multifit_function_fdf, gsl_multifit_function
13from ._generic_solver import _generic_solver
14
15class _fsolver(_generic_solver):
16    type = None
17    _alloc     = _callback.gsl_multifit_fsolver_alloc
18    _free      = _callback.gsl_multifit_fsolver_free
19    _set       = _callback.gsl_multifit_fsolver_set
20    _name      = _callback.gsl_multifit_fsolver_name
21    _position  = _callback.gsl_multifit_fsolver_position
22    _iterate   = _callback.gsl_multifit_fsolver_iterate
23    _getx      = _callback.gsl_multifit_fsolver_getx
24    _getdx      = _callback.gsl_multifit_fsolver_getdx
25    _getf      = _callback.gsl_multifit_fsolver_getf
26
27    def __init__(self, system, n, p):
28        self._ptr = None
29        assert(self._free != None)
30        assert(self._alloc != None)
31        assert(self._set != None)
32        assert(self._name != None)
33        assert(self._iterate != None)
34        self._ptr = self._alloc(self.type, n, p)
35        self._isset = 0
36        self.system = system
37
38    def set(self, x):
39        f = self.system.get_ptr()
40        self._set(self._ptr, f, x)
41        self._isset = 1
42
43    def position(self, x):
44        f = self.system.get_ptr()
45        self._position(self._ptr, f, x)
46        self._isset = 1
47
48    def getx(self):
49        return self._getx(self._ptr)
50
51    def getdx(self):
52        return self._getdx(self._ptr)
53
54    def getf(self):
55        return self._getf(self._ptr)
56
57
58
59class _fdfsolver(_fsolver):
60    type = None
61    _alloc     = _callback.gsl_multifit_fdfsolver_alloc
62    _free      = _callback.gsl_multifit_fdfsolver_free
63    _set       = _callback.gsl_multifit_fdfsolver_set
64    _name      = _callback.gsl_multifit_fdfsolver_name
65    _position  = _callback.gsl_multifit_fdfsolver_position
66    _iterate   = _callback.gsl_multifit_fdfsolver_iterate
67    _getx      = _callback.gsl_multifit_fdfsolver_getx
68    _getdx     = _callback.gsl_multifit_fdfsolver_getdx
69    _getf      = _callback.gsl_multifit_fdfsolver_getf
70    _getJ      = _callback.gsl_multifit_fdfsolver_getJ
71
72    def getJ(self):
73        return self._getJ(self._ptr)
74
75class lmder(_fdfsolver):
76    r"""unscaled scaled Levenberg-Marquardt solver
77
78    This is an unscaled version of the LMDER algorithm.  The elements
79    of the diagonal scaling matrix D are set to 1.  This algorithm may
80    be useful in circumstances where the scaled version of LMDER
81    converges too slowly, or the function is already scaled
82    appropriately.
83
84    See also :c:type:`gsl_multifit_fdfsolver_lmder`
85    """
86    type = _callback.cvar.gsl_multifit_fdfsolver_lmder
87
88class lmsder(_fdfsolver):
89    r"""scaled Levenberg-Marquardt solver
90
91    This is a robust and efficient version of the Levenberg-Marquardt
92    algorithm as implemented in the scaled LMDER routine in MINPACK.
93    Minpack was written by Jorge J. More', Burton S. Garbow and
94    Kenneth E. Hillstrom.
95
96    The algorithm uses a generalized trust region to keep each step
97    under control.  In order to be accepted a proposed new position
98    :math:`x'`
99    must satisfy the condition :math:`|D (x' - x)| < \delta`, where D is a
100    diagonal scaling matrix and \delta is the size of the trust
101    region.  The components of D are computed internally, using the
102    column norms of the Jacobian to estimate the sensitivity of the
103    residual to each component of x.  This improves the behavior of the
104    algorithm for badly scaled functions.
105
106    On each iteration the algorithm attempts to minimize the linear
107    system :math:`|F + J p|` subject to the constraint
108    :math:`|D p| < \Delta`.  The
109    solution to this constrained linear system is found using the
110    Levenberg-Marquardt method.
111
112    The proposed step is now tested by evaluating the function at the
113    resulting point, :math:`x'`.  If the step reduces the norm of the function
114    sufficiently, and follows the predicted behavior of the function
115    within the trust region. then it is accepted and size of the trust
116    region is increased.  If the proposed step fails to improve the
117    solution, or differs significantly from the expected behavior
118    within the trust region, then the size of the trust region is
119    decreased and another trial step is computed.
120
121    The algorithm also monitors the progress of the solution and
122    returns an error if the changes in the solution are smaller than
123    the machine precision.  The possible error codes are,
124
125    `GSL_ETOLF'
126          the decrease in the function falls below machine precision
127
128    `GSL_ETOLX'
129          the change in the position vector falls below machine
130
131    `GSL_ETOLG'
132          the norm of the gradient, relative to the norm of the
133          function, falls below machine precision
134
135    These error codes indicate that further iterations will be
136    unlikely to change the solution from its current value.
137
138    See also :c:type:`gsl_multifit_fdfsolver_lmsder`
139
140    """
141    type = _callback.cvar.gsl_multifit_fdfsolver_lmsder
142
143_t_type = _callback.cvar.gsl_multifit_fdfsolver_lmniel
144if _t_type:
145    # Only available for GSL 2. or above
146    class lmniel(_fdfsolver):
147        r"""Levenberg-Marquardt solver
148
149        This is a Levenberg-Marquardt solver based on a smoother updating procedure for
150        the damping parameter mu proposed by Nielsen, 1999. It does not use a trust region
151        approach and only performs rudimentary scaling and is therefore not as robust as
152        lmsder. However, on each iteration it solves the normal equation system to compute
153        the next step:
154
155        .. math:: |(J^T   J + mu I) |  = - J^T   f
156
157        which makes it a much more practical method for problems with a large number of
158        residuals :math:`(n >> p)`, since only the p-by-p matrix :math:`J^T J` is decomposed rather than the
159        full n-by-p Jacobian. This makes a significant difference in efficiency when solving
160        systems with large amounts of data. While not as robust as lmsder, this algorithm
161        has proven effective on a wide class of problems.
162
163        See also :c:type:`gsl_multifit_fdfsolver_lmniel`
164        """
165        type = _t_type
166
167
168def test_delta(dx, x, epsabs, epsrel):
169    r"""Test convergence
170
171    This function tests for the convergence of the sequence by
172    comparing the last step DX with the absolute error EPSABS and
173    relative error EPSREL to the current position X.  The test returns
174    `GSL_SUCCESS' if the following condition is achieved,
175
176    .. math::     |dx_i| < epsabs + epsrel |x_i|
177
178    for each component of X and returns `GSL_CONTINUE' otherwise.
179    """
180    return _callback.gsl_multifit_test_delta(dx, x, epsabs, epsrel)
181
182def test_gradient(dx, epsabs):
183    r"""residual gradient test
184
185    This function tests the residual gradient G against the absolute
186    error bound EPSABS.  Mathematically, the gradient should be
187    exactly zero at the minimum. The test returns `GSL_SUCCESS' if the
188    following condition is achieved,
189
190    .. math::     \sum_i |g_i| < epsabs
191
192    and returns `GSL_CONTINUE' otherwise.  This criterion is suitable
193    for situations where the precise location of the minimum, x, is
194    unimportant provided a value can be found where the gradient is
195    small enough.
196    """
197    return _callback.gsl_multifit_test_gradient(dx, epsabs)
198
199def gradient(J, f):
200    r"""computes the gradient
201
202    This function computes the gradient G of
203    :math:`\Phi(x) = (1/2) ||F(x)||^2` from the Jacobian matrix J and the function values F,
204    using the formula :math:`g = J^T f`.
205
206    """
207    return _callback.gsl_multifit_gradient(J, f)
208
209def covar(J, epsrel):
210    r"""compute the covariance matrix from the Jacobian J
211
212    This function uses the Jacobian matrix J to compute the covariance
213    matrix of the best-fit parameters, COVAR.  The parameter EPSREL is
214    used to remove linear-dependent columns when J is rank deficient.
215
216    The covariance matrix is given by,
217
218    covar = :math:`(J^T J)^{-1}
219
220    and is computed by QR decomposition of J with column-pivoting.  Any
221    columns of R which satisfy
222
223    .. math::      |R_{kk}| <= epsrel |R_{11}|
224
225    are considered linearly-dependent and are excluded from the
226    covariance matrix (the corresponding rows and columns of the
227    covariance matrix are set to zero).
228
229    Args:
230       J: Jacobian matrix
231       epsrel
232    """
233    return _callback.gsl_multifit_covar(J, epsrel)
234
235#fsolver_driver = _callback.gsl_multifit_fsolver_driver
236#fdfsolver_driver = _callback.gsl_multifit_fdfsolver_driver
237