1 /////////////////////////////////////////////////////////////////////////////////
2 //
3 // Levenberg - Marquardt non-linear minimization algorithm
4 // Copyright (C) 2004-05 Manolis Lourakis (lourakis at ics forth gr)
5 // Institute of Computer Science, Foundation for Research & Technology - Hellas
6 // Heraklion, Crete, Greece.
7 //
8 // This program is free software; you can redistribute it and/or modify
9 // it under the terms of the GNU General Public License as published by
10 // the Free Software Foundation; either version 2 of the License, or
11 // (at your option) any later version.
12 //
13 // This program is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 // GNU General Public License for more details.
17 //
18 /////////////////////////////////////////////////////////////////////////////////
19
20 #ifndef LM_REAL // not included by lmbc.c
21 #error This file should not be compiled directly!
22 #endif
23
24
25 /* precision-specific definitions */
26 #define FUNC_STATE LM_ADD_PREFIX(func_state)
27 #define LNSRCH LM_ADD_PREFIX(lnsrch)
28 #define BOXPROJECT LM_ADD_PREFIX(boxProject)
29 #define BOXSCALE LM_ADD_PREFIX(boxScale)
30 #define LEVMAR_BOX_CHECK LM_ADD_PREFIX(levmar_box_check)
31 #define VECNORM LM_ADD_PREFIX(vecnorm)
32 #define LEVMAR_BC_DER LM_ADD_PREFIX(levmar_bc_der)
33 #define LEVMAR_BC_DIF LM_ADD_PREFIX(levmar_bc_dif)
34 #define LEVMAR_FDIF_FORW_JAC_APPROX LM_ADD_PREFIX(levmar_fdif_forw_jac_approx)
35 #define LEVMAR_FDIF_CENT_JAC_APPROX LM_ADD_PREFIX(levmar_fdif_cent_jac_approx)
36 #define LEVMAR_TRANS_MAT_MAT_MULT LM_ADD_PREFIX(levmar_trans_mat_mat_mult)
37 #define LEVMAR_L2NRMXMY LM_ADD_PREFIX(levmar_L2nrmxmy)
38 #define LEVMAR_COVAR LM_ADD_PREFIX(levmar_covar)
39 #define LMBC_DIF_DATA LM_ADD_PREFIX(lmbc_dif_data)
40 #define LMBC_DIF_FUNC LM_ADD_PREFIX(lmbc_dif_func)
41 #define LMBC_DIF_JACF LM_ADD_PREFIX(lmbc_dif_jacf)
42
43 #ifdef HAVE_LAPACK
44 #define AX_EQ_B_LU LM_ADD_PREFIX(Ax_eq_b_LU)
45 #define AX_EQ_B_CHOL LM_ADD_PREFIX(Ax_eq_b_Chol)
46 #define AX_EQ_B_QR LM_ADD_PREFIX(Ax_eq_b_QR)
47 #define AX_EQ_B_QRLS LM_ADD_PREFIX(Ax_eq_b_QRLS)
48 #define AX_EQ_B_SVD LM_ADD_PREFIX(Ax_eq_b_SVD)
49 #define AX_EQ_B_BK LM_ADD_PREFIX(Ax_eq_b_BK)
50 #else
51 #define AX_EQ_B_LU LM_ADD_PREFIX(Ax_eq_b_LU_noLapack)
52 #endif /* HAVE_LAPACK */
53
54 #ifdef HAVE_PLASMA
55 #define AX_EQ_B_PLASMA_CHOL LM_ADD_PREFIX(Ax_eq_b_PLASMA_Chol)
56 #endif
57
58 /* find the median of 3 numbers */
59 #define __MEDIAN3(a, b, c) ( ((a) >= (b))?\
60 ( ((c) >= (a))? (a) : ( ((c) <= (b))? (b) : (c) ) ) : \
61 ( ((c) >= (b))? (b) : ( ((c) <= (a))? (a) : (c) ) ) )
62
63 /* Projections to feasible set \Omega: P_{\Omega}(y) := arg min { ||x - y|| : x \in \Omega}, y \in R^m */
64
65 /* project vector p to a box shaped feasible set. p is a mx1 vector.
66 * Either lb, ub can be NULL. If not NULL, they are mx1 vectors
67 */
BOXPROJECT(LM_REAL * p,LM_REAL * lb,LM_REAL * ub,int m)68 static void BOXPROJECT(LM_REAL *p, LM_REAL *lb, LM_REAL *ub, int m)
69 {
70 register int i;
71
72 if(!lb){ /* no lower bounds */
73 if(!ub) /* no upper bounds */
74 return;
75 else{ /* upper bounds only */
76 for(i=m; i-->0; )
77 if(p[i]>ub[i]) p[i]=ub[i];
78 }
79 }
80 else
81 if(!ub){ /* lower bounds only */
82 for(i=m; i-->0; )
83 if(p[i]<lb[i]) p[i]=lb[i];
84 }
85 else /* box bounds */
86 for(i=m; i-->0; )
87 p[i]=__MEDIAN3(lb[i], p[i], ub[i]);
88 }
89 #undef __MEDIAN3
90
91 /* pointwise scaling of bounds with the mx1 vector scl. If div=1 scaling is by 1./scl.
92 * Either lb, ub can be NULL. If not NULL, they are mx1 vectors
93 */
BOXSCALE(LM_REAL * lb,LM_REAL * ub,LM_REAL * scl,int m,int div)94 static void BOXSCALE(LM_REAL *lb, LM_REAL *ub, LM_REAL *scl, int m, int div)
95 {
96 register int i;
97
98 if(!lb){ /* no lower bounds */
99 if(!ub) /* no upper bounds */
100 return;
101 else{ /* upper bounds only */
102 if(div){
103 for(i=m; i-->0; )
104 if(ub[i]!=LM_REAL_MAX)
105 ub[i]=ub[i]/scl[i];
106 }else{
107 for(i=m; i-->0; )
108 if(ub[i]!=LM_REAL_MAX)
109 ub[i]=ub[i]*scl[i];
110 }
111 }
112 }
113 else
114 if(!ub){ /* lower bounds only */
115 if(div){
116 for(i=m; i-->0; )
117 if(lb[i]!=LM_REAL_MIN)
118 lb[i]=lb[i]/scl[i];
119 }else{
120 for(i=m; i-->0; )
121 if(lb[i]!=LM_REAL_MIN)
122 lb[i]=lb[i]*scl[i];
123 }
124 }
125 else{ /* box bounds */
126 if(div){
127 for(i=m; i-->0; ){
128 if(ub[i]!=LM_REAL_MAX)
129 ub[i]=ub[i]/scl[i];
130 if(lb[i]!=LM_REAL_MIN)
131 lb[i]=lb[i]/scl[i];
132 }
133 }else{
134 for(i=m; i-->0; ){
135 if(ub[i]!=LM_REAL_MAX)
136 ub[i]=ub[i]*scl[i];
137 if(lb[i]!=LM_REAL_MIN)
138 lb[i]=lb[i]*scl[i];
139 }
140 }
141 }
142 }
143
144 /* compute the norm of a vector in a manner that avoids overflows
145 */
VECNORM(LM_REAL * x,int n)146 static LM_REAL VECNORM(LM_REAL *x, int n)
147 {
148 #ifdef HAVE_LAPACK
149 #define NRM2 LM_MK_BLAS_NAME(nrm2)
150 extern LM_REAL NRM2(int *n, LM_REAL *dx, int *incx);
151 int one=1;
152
153 return NRM2(&n, x, &one);
154 #undef NRM2
155 #else // no LAPACK, use the simple method described by Blue in TOMS78
156 register int i;
157 LM_REAL max, sum, tmp;
158
159 for(i=n, max=0.0; i-->0; )
160 if(x[i]>max) max=x[i];
161 else if(x[i]<-max) max=-x[i];
162
163 for(i=n, sum=0.0; i-->0; ){
164 tmp=x[i]/max;
165 sum+=tmp*tmp;
166 }
167
168 return max*(LM_REAL)sqrt(sum);
169 #endif /* HAVE_LAPACK */
170 }
171
172 struct FUNC_STATE{
173 int n, *nfev;
174 LM_REAL *hx, *x;
175 LM_REAL *lb, *ub;
176 void *adata;
177 };
178
179 static void
LNSRCH(int m,LM_REAL * x,LM_REAL f,LM_REAL * g,LM_REAL * p,LM_REAL alpha,LM_REAL * xpls,LM_REAL * ffpls,void (* func)(LM_REAL * p,LM_REAL * hx,int m,int n,void * adata),struct FUNC_STATE * state,int * mxtake,int * iretcd,LM_REAL stepmx,LM_REAL steptl,LM_REAL * sx)180 LNSRCH(int m, LM_REAL *x, LM_REAL f, LM_REAL *g, LM_REAL *p, LM_REAL alpha, LM_REAL *xpls,
181 LM_REAL *ffpls, void (*func)(LM_REAL *p, LM_REAL *hx, int m, int n, void *adata), struct FUNC_STATE *state,
182 int *mxtake, int *iretcd, LM_REAL stepmx, LM_REAL steptl, LM_REAL *sx)
183 {
184 /* Find a next newton iterate by backtracking line search.
185 * Specifically, finds a \lambda such that for a fixed alpha<0.5 (usually 1e-4),
186 * f(x + \lambda*p) <= f(x) + alpha * \lambda * g^T*p
187 *
188 * Translated (with a few changes) from Schnabel, Koontz & Weiss uncmin.f, v1.3
189 * Main changes include the addition of box projection and modification of the scaling
190 * logic since uncmin.f operates in the original (unscaled) variable space.
191
192 * PARAMETERS :
193
194 * m --> dimension of problem (i.e. number of variables)
195 * x(m) --> old iterate: x[k-1]
196 * f --> function value at old iterate, f(x)
197 * g(m) --> gradient at old iterate, g(x), or approximate
198 * p(m) --> non-zero newton step
199 * alpha --> fixed constant < 0.5 for line search (see above)
200 * xpls(m) <-- new iterate x[k]
201 * ffpls <-- function value at new iterate, f(xpls)
202 * func --> name of subroutine to evaluate function
203 * state <--> information other than x and m that func requires.
204 * state is not modified in xlnsrch (but can be modified by func).
205 * iretcd <-- return code
206 * mxtake <-- boolean flag indicating step of maximum length used
207 * stepmx --> maximum allowable step size
208 * steptl --> relative step size at which successive iterates
209 * considered close enough to terminate algorithm
210 * sx(m) --> diagonal scaling matrix for x, can be NULL
211
212 * internal variables
213
214 * sln newton length
215 * rln relative length of newton step
216 */
217
218 register int i, j;
219 int firstback = 1;
220 LM_REAL disc;
221 LM_REAL a3, b;
222 LM_REAL t1, t2, t3, lambda, tlmbda, rmnlmb;
223 LM_REAL scl, rln, sln, slp;
224 LM_REAL tmp1, tmp2;
225 LM_REAL fpls, pfpls = 0., plmbda = 0.; /* -Wall */
226
227 f*=LM_CNST(0.5);
228 *mxtake = 0;
229 *iretcd = 2;
230 tmp1 = 0.;
231 for (i = m; i-- > 0; )
232 tmp1 += p[i] * p[i];
233 sln = (LM_REAL)sqrt(tmp1);
234 if (sln > stepmx) {
235 /* newton step longer than maximum allowed */
236 scl = stepmx / sln;
237 for (i = m; i-- > 0; ) /* p * scl */
238 p[i]*=scl;
239 sln = stepmx;
240 }
241 for (i = m, slp = rln = 0.; i-- > 0; ){
242 slp+=g[i]*p[i]; /* g^T * p */
243
244 tmp1 = (FABS(x[i])>=LM_CNST(1.))? FABS(x[i]) : LM_CNST(1.);
245 tmp2 = FABS(p[i])/tmp1;
246 if(rln < tmp2) rln = tmp2;
247 }
248 rmnlmb = steptl / rln;
249 lambda = LM_CNST(1.0);
250
251 /* check if new iterate satisfactory. generate new lambda if necessary. */
252
253 for(j = _LSITMAX_; j-- > 0; ) {
254 for (i = m; i-- > 0; )
255 xpls[i] = x[i] + lambda * p[i];
256 BOXPROJECT(xpls, state->lb, state->ub, m); /* project to feasible set */
257
258 /* evaluate function at new point */
259 if(!sx){
260 (*func)(xpls, state->hx, m, state->n, state->adata); ++(*(state->nfev));
261 }
262 else{
263 for (i = m; i-- > 0; ) xpls[i] *= sx[i];
264 (*func)(xpls, state->hx, m, state->n, state->adata); ++(*(state->nfev));
265 for (i = m; i-- > 0; ) xpls[i] /= sx[i];
266 }
267 /* ### state->hx=state->x-state->hx, tmp1=||state->hx|| */
268 #if 1
269 tmp1=LEVMAR_L2NRMXMY(state->hx, state->x, state->hx, state->n);
270 #else
271 for(i=0, tmp1=0.0; i<state->n; ++i){
272 state->hx[i]=tmp2=state->x[i]-state->hx[i];
273 tmp1+=tmp2*tmp2;
274 }
275 #endif
276 fpls=LM_CNST(0.5)*tmp1; *ffpls=tmp1;
277
278 if (fpls <= f + slp * alpha * lambda) { /* solution found */
279 *iretcd = 0;
280 if (lambda == LM_CNST(1.) && sln > stepmx * LM_CNST(.99)) *mxtake = 1;
281 return;
282 }
283
284 /* else : solution not (yet) found */
285
286 /* First find a point with a finite value */
287
288 if (lambda < rmnlmb) {
289 /* no satisfactory xpls found sufficiently distinct from x */
290
291 *iretcd = 1;
292 return;
293 }
294 else { /* calculate new lambda */
295
296 /* modifications to cover non-finite values */
297 if (!LM_FINITE(fpls)) {
298 lambda *= LM_CNST(0.1);
299 firstback = 1;
300 }
301 else {
302 if (firstback) { /* first backtrack: quadratic fit */
303 tlmbda = -lambda * slp / ((fpls - f - slp) * LM_CNST(2.));
304 firstback = 0;
305 }
306 else { /* all subsequent backtracks: cubic fit */
307 t1 = fpls - f - lambda * slp;
308 t2 = pfpls - f - plmbda * slp;
309 t3 = LM_CNST(1.) / (lambda - plmbda);
310 a3 = LM_CNST(3.) * t3 * (t1 / (lambda * lambda)
311 - t2 / (plmbda * plmbda));
312 b = t3 * (t2 * lambda / (plmbda * plmbda)
313 - t1 * plmbda / (lambda * lambda));
314 disc = b * b - a3 * slp;
315 if (disc > b * b)
316 /* only one positive critical point, must be minimum */
317 tlmbda = (-b + ((a3 < 0)? -(LM_REAL)sqrt(disc): (LM_REAL)sqrt(disc))) /a3;
318 else
319 /* both critical points positive, first is minimum */
320 tlmbda = (-b + ((a3 < 0)? (LM_REAL)sqrt(disc): -(LM_REAL)sqrt(disc))) /a3;
321
322 if (tlmbda > lambda * LM_CNST(.5))
323 tlmbda = lambda * LM_CNST(.5);
324 }
325 plmbda = lambda;
326 pfpls = fpls;
327 if (tlmbda < lambda * LM_CNST(.1))
328 lambda *= LM_CNST(.1);
329 else
330 lambda = tlmbda;
331 }
332 }
333 }
334 /* this point is reached when the iterations limit is exceeded */
335 *iretcd = 1; /* failed */
336 return;
337 } /* LNSRCH */
338
339 /*
340 * This function seeks the parameter vector p that best describes the measurements
341 * vector x under box constraints.
342 * More precisely, given a vector function func : R^m --> R^n with n>=m,
343 * it finds p s.t. func(p) ~= x, i.e. the squared second order (i.e. L2) norm of
344 * e=x-func(p) is minimized under the constraints lb[i]<=p[i]<=ub[i].
345 * If no lower bound constraint applies for p[i], use -DBL_MAX/-FLT_MAX for lb[i];
346 * If no upper bound constraint applies for p[i], use DBL_MAX/FLT_MAX for ub[i].
347 *
348 * This function requires an analytic Jacobian. In case the latter is unavailable,
349 * use LEVMAR_BC_DIF() bellow
350 *
351 * Returns the number of iterations (>=0) if successful, LM_ERROR if failed
352 *
353 * For details, see C. Kanzow, N. Yamashita and M. Fukushima: "Levenberg-Marquardt
354 * methods for constrained nonlinear equations with strong local convergence properties",
355 * Journal of Computational and Applied Mathematics 172, 2004, pp. 375-397.
356 * Also, see K. Madsen, H.B. Nielsen and O. Tingleff's lecture notes on
357 * unconstrained Levenberg-Marquardt at http://www.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
358 *
359 * The algorithm implemented by this function employs projected gradient steps. Since steepest descent
360 * is very sensitive to poor scaling, diagonal scaling has been implemented through the dscl argument:
361 * Instead of minimizing f(p) for p, f(D*q) is minimized for q=D^-1*p, D being a diagonal scaling
362 * matrix whose diagonal equals dscl (see Nocedal-Wright p.27). dscl should contain "typical" magnitudes
363 * for the parameters p. A NULL value for dscl implies no scaling. i.e. D=I.
364 * To account for scaling, the code divides the starting point and box bounds pointwise by dscl. Moreover,
365 * before calling func and jacf the scaling has to be undone (by multiplying), as should be done with
366 * the final point. Note also that jac_q=jac_p*D, where jac_q, jac_p are the jacobians w.r.t. q & p, resp.
367 */
368
LEVMAR_BC_DER(void (* func)(LM_REAL * p,LM_REAL * hx,int m,int n,void * adata),void (* jacf)(LM_REAL * p,LM_REAL * j,int m,int n,void * adata),LM_REAL * p,LM_REAL * x,int m,int n,LM_REAL * lb,LM_REAL * ub,LM_REAL * dscl,int itmax,LM_REAL opts[4],LM_REAL info[LM_INFO_SZ],LM_REAL * work,LM_REAL * covar,void * adata)369 int LEVMAR_BC_DER(
370 void (*func)(LM_REAL *p, LM_REAL *hx, int m, int n, void *adata), /* functional relation describing measurements. A p \in R^m yields a \hat{x} \in R^n */
371 void (*jacf)(LM_REAL *p, LM_REAL *j, int m, int n, void *adata), /* function to evaluate the Jacobian \part x / \part p */
372 LM_REAL *p, /* I/O: initial parameter estimates. On output has the estimated solution */
373 LM_REAL *x, /* I: measurement vector. NULL implies a zero vector */
374 int m, /* I: parameter vector dimension (i.e. #unknowns) */
375 int n, /* I: measurement vector dimension */
376 LM_REAL *lb, /* I: vector of lower bounds. If NULL, no lower bounds apply */
377 LM_REAL *ub, /* I: vector of upper bounds. If NULL, no upper bounds apply */
378 LM_REAL *dscl, /* I: diagonal scaling constants. NULL implies no scaling */
379 int itmax, /* I: maximum number of iterations */
380 LM_REAL opts[4], /* I: minim. options [\mu, \epsilon1, \epsilon2, \epsilon3]. Respectively the scale factor for initial \mu,
381 * stopping thresholds for ||J^T e||_inf, ||Dp||_2 and ||e||_2. Set to NULL for defaults to be used.
382 * Note that ||J^T e||_inf is computed on free (not equal to lb[i] or ub[i]) variables only.
383 */
384 LM_REAL info[LM_INFO_SZ],
385 /* O: information regarding the minimization. Set to NULL if don't care
386 * info[0]= ||e||_2 at initial p.
387 * info[1-4]=[ ||e||_2, ||J^T e||_inf, ||Dp||_2, mu/max[J^T J]_ii ], all computed at estimated p.
388 * info[5]= # iterations,
389 * info[6]=reason for terminating: 1 - stopped by small gradient J^T e
390 * 2 - stopped by small Dp
391 * 3 - stopped by itmax
392 * 4 - singular matrix. Restart from current p with increased mu
393 * 5 - no further error reduction is possible. Restart with increased mu
394 * 6 - stopped by small ||e||_2
395 * 7 - stopped by invalid (i.e. NaN or Inf) "func" values. This is a user error
396 * info[7]= # function evaluations
397 * info[8]= # Jacobian evaluations
398 * info[9]= # linear systems solved, i.e. # attempts for reducing error
399 */
400 LM_REAL *work, /* working memory at least LM_BC_DER_WORKSZ() reals large, allocated if NULL */
401 LM_REAL *covar, /* O: Covariance matrix corresponding to LS solution; mxm. Set to NULL if not needed. */
402 void *adata) /* pointer to possibly additional data, passed uninterpreted to func & jacf.
403 * Set to NULL if not needed
404 */
405 {
406 register int i, j, k, l;
407 int worksz, freework=0, issolved;
408 /* temp work arrays */
409 LM_REAL *e, /* nx1 */
410 *hx, /* \hat{x}_i, nx1 */
411 *jacTe, /* J^T e_i mx1 */
412 *jac, /* nxm */
413 *jacTjac, /* mxm */
414 *Dp, /* mx1 */
415 *diag_jacTjac, /* diagonal of J^T J, mx1 */
416 *pDp, /* p + Dp, mx1 */
417 *sp_pDp=NULL; /* dscl*p or dscl*pDp, mx1 */
418
419 register LM_REAL mu, /* damping constant */
420 tmp; /* mainly used in matrix & vector multiplications */
421 LM_REAL p_eL2, jacTe_inf, pDp_eL2; /* ||e(p)||_2, ||J^T e||_inf, ||e(p+Dp)||_2 */
422 LM_REAL p_L2, Dp_L2=LM_REAL_MAX, dF, dL;
423 LM_REAL tau, eps1, eps2, eps2_sq, eps3;
424 LM_REAL init_p_eL2;
425 int nu=2, nu2, stop=0, nfev, njev=0, nlss=0;
426 const int nm=n*m;
427
428 /* variables for constrained LM */
429 struct FUNC_STATE fstate;
430 LM_REAL alpha=LM_CNST(1e-4), beta=LM_CNST(0.9), gamma=LM_CNST(0.99995), rho=LM_CNST(1e-8);
431 LM_REAL t, t0, jacTeDp;
432 LM_REAL tmin=LM_CNST(1e-12), tming=LM_CNST(1e-18); /* minimum step length for LS and PG steps */
433 const LM_REAL tini=LM_CNST(1.0); /* initial step length for LS and PG steps */
434 int nLMsteps=0, nLSsteps=0, nPGsteps=0, gprevtaken=0;
435 int numactive;
436 int (*linsolver)(LM_REAL *A, LM_REAL *B, LM_REAL *x, int m)=NULL;
437
438 mu=jacTe_inf=t=0.0; tmin=tmin; /* -Wall */
439
440 if(n<m){
441 fprintf(stderr, LCAT(LEVMAR_BC_DER, "(): cannot solve a problem with fewer measurements [%d] than unknowns [%d]\n"), n, m);
442 return LM_ERROR;
443 }
444
445 if(!jacf){
446 fprintf(stderr, RCAT("No function specified for computing the Jacobian in ", LEVMAR_BC_DER)
447 RCAT("().\nIf no such function is available, use ", LEVMAR_BC_DIF) RCAT("() rather than ", LEVMAR_BC_DER) "()\n");
448 return LM_ERROR;
449 }
450
451 if(!LEVMAR_BOX_CHECK(lb, ub, m)){
452 fprintf(stderr, LCAT(LEVMAR_BC_DER, "(): at least one lower bound exceeds the upper one\n"));
453 return LM_ERROR;
454 }
455
456 if(dscl){ /* check that scaling consts are valid */
457 for(i=m; i-->0; )
458 if(dscl[i]<=0.0){
459 fprintf(stderr, LCAT(LEVMAR_BC_DER, "(): scaling constants should be positive (scale %d: %g <= 0)\n"), i, dscl[i]);
460 return LM_ERROR;
461 }
462
463 sp_pDp=(LM_REAL *)malloc(m*sizeof(LM_REAL));
464 if(!sp_pDp){
465 fprintf(stderr, LCAT(LEVMAR_BC_DER, "(): memory allocation request failed\n"));
466 return LM_ERROR;
467 }
468 }
469
470 if(opts){
471 tau=opts[0];
472 eps1=opts[1];
473 eps2=opts[2];
474 eps2_sq=opts[2]*opts[2];
475 eps3=opts[3];
476 }
477 else{ // use default values
478 tau=LM_CNST(LM_INIT_MU);
479 eps1=LM_CNST(LM_STOP_THRESH);
480 eps2=LM_CNST(LM_STOP_THRESH);
481 eps2_sq=LM_CNST(LM_STOP_THRESH)*LM_CNST(LM_STOP_THRESH);
482 eps3=LM_CNST(LM_STOP_THRESH);
483 }
484
485 if(!work){
486 worksz=LM_BC_DER_WORKSZ(m, n); //2*n+4*m + n*m + m*m;
487 work=(LM_REAL *)malloc(worksz*sizeof(LM_REAL)); /* allocate a big chunk in one step */
488 if(!work){
489 fprintf(stderr, LCAT(LEVMAR_BC_DER, "(): memory allocation request failed\n"));
490 return LM_ERROR;
491 }
492 freework=1;
493 }
494
495 /* set up work arrays */
496 e=work;
497 hx=e + n;
498 jacTe=hx + n;
499 jac=jacTe + m;
500 jacTjac=jac + nm;
501 Dp=jacTjac + m*m;
502 diag_jacTjac=Dp + m;
503 pDp=diag_jacTjac + m;
504
505 fstate.n=n;
506 fstate.hx=hx;
507 fstate.x=x;
508 fstate.lb=lb;
509 fstate.ub=ub;
510 fstate.adata=adata;
511 fstate.nfev=&nfev;
512
513 /* see if starting point is within the feasible set */
514 for(i=0; i<m; ++i)
515 pDp[i]=p[i];
516 BOXPROJECT(p, lb, ub, m); /* project to feasible set */
517 /*
518 * for(i=0; i<m; ++i)
519 * if(pDp[i]!=p[i])
520 * fprintf(stderr, RCAT("Warning: component %d of starting point not feasible in ", LEVMAR_BC_DER) "()! [%g projected to %g]\n",
521 * i, pDp[i], p[i]);
522 */
523
524 /* compute e=x - f(p) and its L2 norm */
525 (*func)(p, hx, m, n, adata); nfev=1;
526 /* ### e=x-hx, p_eL2=||e|| */
527 #if 1
528 p_eL2=LEVMAR_L2NRMXMY(e, x, hx, n);
529 #else
530 for(i=0, p_eL2=0.0; i<n; ++i){
531 e[i]=tmp=x[i]-hx[i];
532 p_eL2+=tmp*tmp;
533 }
534 #endif
535 init_p_eL2=p_eL2;
536 if(!LM_FINITE(p_eL2)) stop=7;
537
538 if(dscl){
539 /* scale starting point and constraints */
540 for(i=m; i-->0; ) p[i]/=dscl[i];
541 BOXSCALE(lb, ub, dscl, m, 1);
542 }
543
544 for(k=0; k<itmax && !stop; ++k){
545 /* Note that p and e have been updated at a previous iteration */
546
547 if(p_eL2<=eps3){ /* error is small */
548 stop=6;
549 break;
550 }
551
552 /* Compute the Jacobian J at p, J^T J, J^T e, ||J^T e||_inf and ||p||^2.
553 * Since J^T J is symmetric, its computation can be sped up by computing
554 * only its upper triangular part and copying it to the lower part
555 */
556
557 if(!dscl){
558 (*jacf)(p, jac, m, n, adata); ++njev;
559 }
560 else{
561 for(i=m; i-->0; ) sp_pDp[i]=p[i]*dscl[i];
562 (*jacf)(sp_pDp, jac, m, n, adata); ++njev;
563
564 /* compute jac*D */
565 for(i=n; i-->0; ){
566 register LM_REAL *jacim;
567
568 jacim=jac+i*m;
569 for(j=m; j-->0; )
570 jacim[j]*=dscl[j]; // jac[i*m+j]*=dscl[j];
571 }
572 }
573
574 /* J^T J, J^T e */
575 if(nm<__BLOCKSZ__SQ){ // this is a small problem
576 /* J^T*J_ij = \sum_l J^T_il * J_lj = \sum_l J_li * J_lj.
577 * Thus, the product J^T J can be computed using an outer loop for
578 * l that adds J_li*J_lj to each element ij of the result. Note that
579 * with this scheme, the accesses to J and JtJ are always along rows,
580 * therefore induces less cache misses compared to the straightforward
581 * algorithm for computing the product (i.e., l loop is innermost one).
582 * A similar scheme applies to the computation of J^T e.
583 * However, for large minimization problems (i.e., involving a large number
584 * of unknowns and measurements) for which J/J^T J rows are too large to
585 * fit in the L1 cache, even this scheme incures many cache misses. In
586 * such cases, a cache-efficient blocking scheme is preferable.
587 *
588 * Thanks to John Nitao of Lawrence Livermore Lab for pointing out this
589 * performance problem.
590 *
591 * Note that the non-blocking algorithm is faster on small
592 * problems since in this case it avoids the overheads of blocking.
593 */
594 register LM_REAL alpha, *jaclm, *jacTjacim;
595
596 /* looping downwards saves a few computations */
597 for(i=m*m; i-->0; )
598 jacTjac[i]=0.0;
599 for(i=m; i-->0; )
600 jacTe[i]=0.0;
601
602 for(l=n; l-->0; ){
603 jaclm=jac+l*m;
604 for(i=m; i-->0; ){
605 jacTjacim=jacTjac+i*m;
606 alpha=jaclm[i]; //jac[l*m+i];
607 for(j=i+1; j-->0; ) /* j<=i computes lower triangular part only */
608 jacTjacim[j]+=jaclm[j]*alpha; //jacTjac[i*m+j]+=jac[l*m+j]*alpha
609
610 /* J^T e */
611 jacTe[i]+=alpha*e[l];
612 }
613 }
614
615 for(i=m; i-->0; ) /* copy to upper part */
616 for(j=i+1; j<m; ++j)
617 jacTjac[i*m+j]=jacTjac[j*m+i];
618 }
619 else{ // this is a large problem
620 /* Cache efficient computation of J^T J based on blocking
621 */
622 LEVMAR_TRANS_MAT_MAT_MULT(jac, jacTjac, n, m);
623
624 /* cache efficient computation of J^T e */
625 for(i=0; i<m; ++i)
626 jacTe[i]=0.0;
627
628 for(i=0; i<n; ++i){
629 register LM_REAL *jacrow;
630
631 for(l=0, jacrow=jac+i*m, tmp=e[i]; l<m; ++l)
632 jacTe[l]+=jacrow[l]*tmp;
633 }
634 }
635
636 /* Compute ||J^T e||_inf and ||p||^2. Note that ||J^T e||_inf
637 * is computed for free (i.e. inactive) variables only.
638 * At a local minimum, if p[i]==ub[i] then g[i]>0;
639 * if p[i]==lb[i] g[i]<0; otherwise g[i]=0
640 */
641 for(i=j=numactive=0, p_L2=jacTe_inf=0.0; i<m; ++i){
642 if(ub && p[i]==ub[i]){ ++numactive; if(jacTe[i]>0.0) ++j; }
643 else if(lb && p[i]==lb[i]){ ++numactive; if(jacTe[i]<0.0) ++j; }
644 else if(jacTe_inf < (tmp=FABS(jacTe[i]))) jacTe_inf=tmp;
645
646 diag_jacTjac[i]=jacTjac[i*m+i]; /* save diagonal entries so that augmentation can be later canceled */
647 p_L2+=p[i]*p[i];
648 }
649 //p_L2=sqrt(p_L2);
650
651 #if 0
652 if(!(k%100)){
653 printf("Current estimate: ");
654 for(i=0; i<m; ++i)
655 printf("%.9g ", p[i]);
656 printf("-- errors %.9g %0.9g, #active %d [%d]\n", jacTe_inf, p_eL2, numactive, j);
657 }
658 #endif
659
660 /* check for convergence */
661 if(j==numactive && (jacTe_inf <= eps1)){
662 Dp_L2=0.0; /* no increment for p in this case */
663 stop=1;
664 break;
665 }
666
667 /* compute initial damping factor */
668 if(k==0){
669 if(!lb && !ub){ /* no bounds */
670 for(i=0, tmp=LM_REAL_MIN; i<m; ++i)
671 if(diag_jacTjac[i]>tmp) tmp=diag_jacTjac[i]; /* find max diagonal element */
672 mu=tau*tmp;
673 }
674 else
675 mu=LM_CNST(0.5)*tau*p_eL2; /* use Kanzow's starting mu */
676 }
677
678 /* determine increment using a combination of adaptive damping, line search and projected gradient search */
679 while(1){
680 /* augment normal equations */
681 for(i=0; i<m; ++i)
682 jacTjac[i*m+i]+=mu;
683
684 /* solve augmented equations */
685 #ifdef HAVE_LAPACK
686 /* 7 alternatives are available: LU, Cholesky + Cholesky with PLASMA, LDLt, 2 variants of QR decomposition and SVD.
687 * For matrices with dimensions of at least a few hundreds, the PLASMA implementation of Cholesky is the fastest.
688 * From the serial solvers, Cholesky is the fastest but might occasionally be inapplicable due to numerical round-off;
689 * QR is slower but more robust; SVD is the slowest but most robust; LU is quite robust but
690 * slower than LDLt; LDLt offers a good tradeoff between robustness and speed
691 */
692
693 issolved=AX_EQ_B_BK(jacTjac, jacTe, Dp, m); ++nlss; linsolver=AX_EQ_B_BK;
694 //issolved=AX_EQ_B_LU(jacTjac, jacTe, Dp, m); ++nlss; linsolver=AX_EQ_B_LU;
695 //issolved=AX_EQ_B_CHOL(jacTjac, jacTe, Dp, m); ++nlss; linsolver=AX_EQ_B_CHOL;
696 #ifdef HAVE_PLASMA
697 //issolved=AX_EQ_B_PLASMA_CHOL(jacTjac, jacTe, Dp, m); ++nlss; linsolver=AX_EQ_B_PLASMA_CHOL;
698 #endif
699 //issolved=AX_EQ_B_QR(jacTjac, jacTe, Dp, m); ++nlss; linsolver=AX_EQ_B_QR;
700 //issolved=AX_EQ_B_QRLS(jacTjac, jacTe, Dp, m, m); ++nlss; linsolver=(int (*)(LM_REAL *A, LM_REAL *B, LM_REAL *x, int m))AX_EQ_B_QRLS;
701 //issolved=AX_EQ_B_SVD(jacTjac, jacTe, Dp, m); ++nlss; linsolver=AX_EQ_B_SVD;
702
703 #else
704 /* use the LU included with levmar */
705 issolved=AX_EQ_B_LU(jacTjac, jacTe, Dp, m); ++nlss; linsolver=AX_EQ_B_LU;
706 #endif /* HAVE_LAPACK */
707
708 if(issolved){
709 for(i=0; i<m; ++i)
710 pDp[i]=p[i] + Dp[i];
711
712 /* compute p's new estimate and ||Dp||^2 */
713 BOXPROJECT(pDp, lb, ub, m); /* project to feasible set */
714 for(i=0, Dp_L2=0.0; i<m; ++i){
715 Dp[i]=tmp=pDp[i]-p[i];
716 Dp_L2+=tmp*tmp;
717 }
718 //Dp_L2=sqrt(Dp_L2);
719
720 if(Dp_L2<=eps2_sq*p_L2){ /* relative change in p is small, stop */
721 stop=2;
722 break;
723 }
724
725 if(Dp_L2>=(p_L2+eps2)/(LM_CNST(EPSILON)*LM_CNST(EPSILON))){ /* almost singular */
726 stop=4;
727 break;
728 }
729
730 if(!dscl){
731 (*func)(pDp, hx, m, n, adata); ++nfev; /* evaluate function at p + Dp */
732 }
733 else{
734 for(i=m; i-->0; ) sp_pDp[i]=pDp[i]*dscl[i];
735 (*func)(sp_pDp, hx, m, n, adata); ++nfev; /* evaluate function at p + Dp */
736 }
737
738 /* ### hx=x-hx, pDp_eL2=||hx|| */
739 #if 1
740 pDp_eL2=LEVMAR_L2NRMXMY(hx, x, hx, n);
741 #else
742 for(i=0, pDp_eL2=0.0; i<n; ++i){ /* compute ||e(pDp)||_2 */
743 hx[i]=tmp=x[i]-hx[i];
744 pDp_eL2+=tmp*tmp;
745 }
746 #endif
747 /* the following test ensures that the computation of pDp_eL2 has not overflowed.
748 * Such an overflow does no harm here, thus it is not signalled as an error
749 */
750 if(!LM_FINITE(pDp_eL2) && !LM_FINITE(VECNORM(hx, n))){
751 stop=7;
752 break;
753 }
754
755 if(pDp_eL2<=gamma*p_eL2){
756 for(i=0, dL=0.0; i<m; ++i)
757 dL+=Dp[i]*(mu*Dp[i]+jacTe[i]);
758
759 #if 1
760 if(dL>0.0){
761 dF=p_eL2-pDp_eL2;
762 tmp=(LM_CNST(2.0)*dF/dL-LM_CNST(1.0));
763 tmp=LM_CNST(1.0)-tmp*tmp*tmp;
764 mu=mu*( (tmp>=LM_CNST(ONE_THIRD))? tmp : LM_CNST(ONE_THIRD) );
765 }
766 else{
767 tmp=LM_CNST(0.1)*pDp_eL2; /* pDp_eL2 is the new p_eL2 */
768 mu=(mu>=tmp)? tmp : mu;
769 }
770 #else
771
772 tmp=LM_CNST(0.1)*pDp_eL2; /* pDp_eL2 is the new p_eL2 */
773 mu=(mu>=tmp)? tmp : mu;
774 #endif
775
776 nu=2;
777
778 for(i=0 ; i<m; ++i) /* update p's estimate */
779 p[i]=pDp[i];
780
781 for(i=0; i<n; ++i) /* update e and ||e||_2 */
782 e[i]=hx[i];
783 p_eL2=pDp_eL2;
784 ++nLMsteps;
785 gprevtaken=0;
786 break;
787 }
788 /* note that if the LM step is not taken, code falls through to the LM line search below */
789 }
790 else{
791
792 /* the augmented linear system could not be solved, increase mu */
793
794 mu*=nu;
795 nu2=nu<<1; // 2*nu;
796 if(nu2<=nu){ /* nu has wrapped around (overflown). Thanks to Frank Jordan for spotting this case */
797 stop=5;
798 break;
799 }
800 nu=nu2;
801
802 for(i=0; i<m; ++i) /* restore diagonal J^T J entries */
803 jacTjac[i*m+i]=diag_jacTjac[i];
804
805 continue; /* solve again with increased nu */
806 }
807
808 /* if this point is reached, the LM step did not reduce the error;
809 * see if it is a descent direction
810 */
811
812 /* negate jacTe (i.e. g) & compute g^T * Dp */
813 for(i=0, jacTeDp=0.0; i<m; ++i){
814 jacTe[i]=-jacTe[i];
815 jacTeDp+=jacTe[i]*Dp[i];
816 }
817
818 if(jacTeDp<=-rho*pow(Dp_L2, LM_CNST(_POW_)/LM_CNST(2.0))){
819 /* Dp is a descent direction; do a line search along it */
820 #if 1
821 /* use Schnabel's backtracking line search; it requires fewer "func" evaluations */
822 {
823 int mxtake, iretcd;
824 LM_REAL stepmx, steptl=LM_CNST(1e3)*(LM_REAL)sqrt(LM_REAL_EPSILON);
825
826 tmp=(LM_REAL)sqrt(p_L2); stepmx=LM_CNST(1e3)*( (tmp>=LM_CNST(1.0))? tmp : LM_CNST(1.0) );
827
828 LNSRCH(m, p, p_eL2, jacTe, Dp, alpha, pDp, &pDp_eL2, func, &fstate,
829 &mxtake, &iretcd, stepmx, steptl, dscl); /* NOTE: LNSRCH() updates hx */
830 if(iretcd!=0 || !LM_FINITE(pDp_eL2)) goto gradproj; /* rather inelegant but effective way to handle LNSRCH() failures... */
831 }
832 #else
833 /* use the simpler (but slower!) line search described by Kanzow et al */
834 for(t=tini; t>tmin; t*=beta){
835 for(i=0; i<m; ++i)
836 pDp[i]=p[i] + t*Dp[i];
837 BOXPROJECT(pDp, lb, ub, m); /* project to feasible set */
838
839 if(!dscl){
840 (*func)(pDp, hx, m, n, adata); ++nfev; /* evaluate function at p + t*Dp */
841 }
842 else{
843 for(i=m; i-->0; ) sp_pDp[i]=pDp[i]*dscl[i];
844 (*func)(sp_pDp, hx, m, n, adata); ++nfev; /* evaluate function at p + t*Dp */
845 }
846
847 /* compute ||e(pDp)||_2 */
848 /* ### hx=x-hx, pDp_eL2=||hx|| */
849 #if 1
850 pDp_eL2=LEVMAR_L2NRMXMY(hx, x, hx, n);
851 #else
852 for(i=0, pDp_eL2=0.0; i<n; ++i){
853 hx[i]=tmp=x[i]-hx[i];
854 pDp_eL2+=tmp*tmp;
855 }
856 #endif /* ||e(pDp)||_2 */
857 if(!LM_FINITE(pDp_eL2)) goto gradproj; /* treat as line search failure */
858
859 //if(LM_CNST(0.5)*pDp_eL2<=LM_CNST(0.5)*p_eL2 + t*alpha*jacTeDp) break;
860 if(pDp_eL2<=p_eL2 + LM_CNST(2.0)*t*alpha*jacTeDp) break;
861 }
862 #endif /* line search alternatives */
863
864 ++nLSsteps;
865 gprevtaken=0;
866
867 /* NOTE: new estimate for p is in pDp, associated error in hx and its norm in pDp_eL2.
868 * These values are used below to update their corresponding variables
869 */
870 }
871 else{
872 /* Note that this point can also be reached via a goto when LNSRCH() fails. */
873 gradproj:
874
875 /* jacTe has been negated above. Being a descent direction, it is next used
876 * to make a projected gradient step
877 */
878
879 /* compute ||g|| */
880 for(i=0, tmp=0.0; i<m; ++i)
881 tmp+=jacTe[i]*jacTe[i];
882 tmp=(LM_REAL)sqrt(tmp);
883 tmp=LM_CNST(100.0)/(LM_CNST(1.0)+tmp);
884 t0=(tmp<=tini)? tmp : tini; /* guard against poor scaling & large steps; see (3.50) in C.T. Kelley's book */
885
886 /* if the previous step was along the gradient descent, try to use the t employed in that step */
887 for(t=(gprevtaken)? t : t0; t>tming; t*=beta){
888 for(i=0; i<m; ++i)
889 pDp[i]=p[i] - t*jacTe[i];
890 BOXPROJECT(pDp, lb, ub, m); /* project to feasible set */
891 for(i=0, Dp_L2=0.0; i<m; ++i){
892 Dp[i]=tmp=pDp[i]-p[i];
893 Dp_L2+=tmp*tmp;
894 }
895
896 if(!dscl){
897 (*func)(pDp, hx, m, n, adata); ++nfev; /* evaluate function at p - t*g */
898 }
899 else{
900 for(i=m; i-->0; ) sp_pDp[i]=pDp[i]*dscl[i];
901 (*func)(sp_pDp, hx, m, n, adata); ++nfev; /* evaluate function at p - t*g */
902 }
903
904 /* compute ||e(pDp)||_2 */
905 /* ### hx=x-hx, pDp_eL2=||hx|| */
906 #if 1
907 pDp_eL2=LEVMAR_L2NRMXMY(hx, x, hx, n);
908 #else
909 for(i=0, pDp_eL2=0.0; i<n; ++i){
910 hx[i]=tmp=x[i]-hx[i];
911 pDp_eL2+=tmp*tmp;
912 }
913 #endif
914 /* the following test ensures that the computation of pDp_eL2 has not overflowed.
915 * Such an overflow does no harm here, thus it is not signalled as an error
916 */
917 if(!LM_FINITE(pDp_eL2) && !LM_FINITE(VECNORM(hx, n))){
918 stop=7;
919 goto breaknested;
920 }
921
922 /* compute ||g^T * Dp||. Note that if pDp has not been altered by projection
923 * (i.e. BOXPROJECT), jacTeDp=-t*||g||^2
924 */
925 for(i=0, jacTeDp=0.0; i<m; ++i)
926 jacTeDp+=jacTe[i]*Dp[i];
927
928 if(gprevtaken && pDp_eL2<=p_eL2 + LM_CNST(2.0)*LM_CNST(0.99999)*jacTeDp){ /* starting t too small */
929 t=t0;
930 gprevtaken=0;
931 continue;
932 }
933 //if(LM_CNST(0.5)*pDp_eL2<=LM_CNST(0.5)*p_eL2 + alpha*jacTeDp) terminatePGLS;
934 if(pDp_eL2<=p_eL2 + LM_CNST(2.0)*alpha*jacTeDp) goto terminatePGLS;
935
936 //if(pDp_eL2<=p_eL2 - LM_CNST(2.0)*alpha/t*Dp_L2) goto terminatePGLS; // sufficient decrease condition proposed by Kelley in (5.13)
937 }
938
939 /* if this point is reached then the gradient line search has failed */
940 gprevtaken=0;
941 break;
942
943 terminatePGLS:
944
945 ++nPGsteps;
946 gprevtaken=1;
947 /* NOTE: new estimate for p is in pDp, associated error in hx and its norm in pDp_eL2 */
948 }
949
950 /* update using computed values */
951
952 for(i=0, Dp_L2=0.0; i<m; ++i){
953 tmp=pDp[i]-p[i];
954 Dp_L2+=tmp*tmp;
955 }
956 //Dp_L2=sqrt(Dp_L2);
957
958 if(Dp_L2<=eps2_sq*p_L2){ /* relative change in p is small, stop */
959 stop=2;
960 break;
961 }
962
963 for(i=0 ; i<m; ++i) /* update p's estimate */
964 p[i]=pDp[i];
965
966 for(i=0; i<n; ++i) /* update e and ||e||_2 */
967 e[i]=hx[i];
968 p_eL2=pDp_eL2;
969 break;
970 } /* inner loop */
971 }
972
973 breaknested: /* NOTE: this point is also reached via an explicit goto! */
974
975 if(k>=itmax) stop=3;
976
977 for(i=0; i<m; ++i) /* restore diagonal J^T J entries */
978 jacTjac[i*m+i]=diag_jacTjac[i];
979
980 if(info){
981 info[0]=init_p_eL2;
982 info[1]=p_eL2;
983 info[2]=jacTe_inf;
984 info[3]=Dp_L2;
985 for(i=0, tmp=LM_REAL_MIN; i<m; ++i)
986 if(tmp<jacTjac[i*m+i]) tmp=jacTjac[i*m+i];
987 info[4]=mu/tmp;
988 info[5]=(LM_REAL)k;
989 info[6]=(LM_REAL)stop;
990 info[7]=(LM_REAL)nfev;
991 info[8]=(LM_REAL)njev;
992 info[9]=(LM_REAL)nlss;
993 }
994
995 /* covariance matrix */
996 if(covar){
997 LEVMAR_COVAR(jacTjac, covar, p_eL2, m, n);
998
999 if(dscl){ /* correct for the scaling */
1000 for(i=m; i-->0; )
1001 for(j=m; j-->0; )
1002 covar[i*m+j]*=(dscl[i]*dscl[j]);
1003 }
1004 }
1005
1006 if(freework) free(work);
1007
1008 #ifdef LINSOLVERS_RETAIN_MEMORY
1009 if(linsolver) (*linsolver)(NULL, NULL, NULL, 0);
1010 #endif
1011
1012 #if 0
1013 printf("%d LM steps, %d line search, %d projected gradient\n", nLMsteps, nLSsteps, nPGsteps);
1014 #endif
1015
1016 if(dscl){
1017 /* scale final point and constraints */
1018 for(i=0; i<m; ++i) p[i]*=dscl[i];
1019 BOXSCALE(lb, ub, dscl, m, 0);
1020 free(sp_pDp);
1021 }
1022
1023 return (stop!=4 && stop!=7)? k : LM_ERROR;
1024 }
1025
1026 /* following struct & LMBC_DIF_XXX functions won't be necessary if a true secant
1027 * version of LEVMAR_BC_DIF() is implemented...
1028 */
1029 struct LMBC_DIF_DATA{
1030 int ffdif; // nonzero if forward differencing is used
1031 void (*func)(LM_REAL *p, LM_REAL *hx, int m, int n, void *adata);
1032 LM_REAL *hx, *hxx;
1033 void *adata;
1034 LM_REAL delta;
1035 };
1036
LMBC_DIF_FUNC(LM_REAL * p,LM_REAL * hx,int m,int n,void * data)1037 static void LMBC_DIF_FUNC(LM_REAL *p, LM_REAL *hx, int m, int n, void *data)
1038 {
1039 struct LMBC_DIF_DATA *dta=(struct LMBC_DIF_DATA *)data;
1040
1041 /* call user-supplied function passing it the user-supplied data */
1042 (*(dta->func))(p, hx, m, n, dta->adata);
1043 }
1044
LMBC_DIF_JACF(LM_REAL * p,LM_REAL * jac,int m,int n,void * data)1045 static void LMBC_DIF_JACF(LM_REAL *p, LM_REAL *jac, int m, int n, void *data)
1046 {
1047 struct LMBC_DIF_DATA *dta=(struct LMBC_DIF_DATA *)data;
1048
1049 if(dta->ffdif){
1050 /* evaluate user-supplied function at p */
1051 (*(dta->func))(p, dta->hx, m, n, dta->adata);
1052 LEVMAR_FDIF_FORW_JAC_APPROX(dta->func, p, dta->hx, dta->hxx, dta->delta, jac, m, n, dta->adata);
1053 }
1054 else
1055 LEVMAR_FDIF_CENT_JAC_APPROX(dta->func, p, dta->hx, dta->hxx, dta->delta, jac, m, n, dta->adata);
1056 }
1057
1058
1059 /* No Jacobian version of the LEVMAR_BC_DER() function above: the Jacobian is approximated with
1060 * the aid of finite differences (forward or central, see the comment for the opts argument)
1061 * Ideally, this function should be implemented with a secant approach. Currently, it just calls
1062 * LEVMAR_BC_DER()
1063 */
LEVMAR_BC_DIF(void (* func)(LM_REAL * p,LM_REAL * hx,int m,int n,void * adata),LM_REAL * p,LM_REAL * x,int m,int n,LM_REAL * lb,LM_REAL * ub,LM_REAL * dscl,int itmax,LM_REAL opts[5],LM_REAL info[LM_INFO_SZ],LM_REAL * work,LM_REAL * covar,void * adata)1064 int LEVMAR_BC_DIF(
1065 void (*func)(LM_REAL *p, LM_REAL *hx, int m, int n, void *adata), /* functional relation describing measurements. A p \in R^m yields a \hat{x} \in R^n */
1066 LM_REAL *p, /* I/O: initial parameter estimates. On output has the estimated solution */
1067 LM_REAL *x, /* I: measurement vector. NULL implies a zero vector */
1068 int m, /* I: parameter vector dimension (i.e. #unknowns) */
1069 int n, /* I: measurement vector dimension */
1070 LM_REAL *lb, /* I: vector of lower bounds. If NULL, no lower bounds apply */
1071 LM_REAL *ub, /* I: vector of upper bounds. If NULL, no upper bounds apply */
1072 LM_REAL *dscl, /* I: diagonal scaling constants. NULL implies no scaling */
1073 int itmax, /* I: maximum number of iterations */
1074 LM_REAL opts[5], /* I: opts[0-4] = minim. options [\mu, \epsilon1, \epsilon2, \epsilon3, \delta]. Respectively the
1075 * scale factor for initial \mu, stopping thresholds for ||J^T e||_inf, ||Dp||_2 and ||e||_2 and
1076 * the step used in difference approximation to the Jacobian. Set to NULL for defaults to be used.
1077 * If \delta<0, the Jacobian is approximated with central differences which are more accurate
1078 * (but slower!) compared to the forward differences employed by default.
1079 */
1080 LM_REAL info[LM_INFO_SZ],
1081 /* O: information regarding the minimization. Set to NULL if don't care
1082 * info[0]= ||e||_2 at initial p.
1083 * info[1-4]=[ ||e||_2, ||J^T e||_inf, ||Dp||_2, mu/max[J^T J]_ii ], all computed at estimated p.
1084 * info[5]= # iterations,
1085 * info[6]=reason for terminating: 1 - stopped by small gradient J^T e
1086 * 2 - stopped by small Dp
1087 * 3 - stopped by itmax
1088 * 4 - singular matrix. Restart from current p with increased mu
1089 * 5 - no further error reduction is possible. Restart with increased mu
1090 * 6 - stopped by small ||e||_2
1091 * 7 - stopped by invalid (i.e. NaN or Inf) "func" values. This is a user error
1092 * info[7]= # function evaluations
1093 * info[8]= # Jacobian evaluations
1094 * info[9]= # linear systems solved, i.e. # attempts for reducing error
1095 */
1096 LM_REAL *work, /* working memory at least LM_BC_DIF_WORKSZ() reals large, allocated if NULL */
1097 LM_REAL *covar, /* O: Covariance matrix corresponding to LS solution; mxm. Set to NULL if not needed. */
1098 void *adata) /* pointer to possibly additional data, passed uninterpreted to func.
1099 * Set to NULL if not needed
1100 */
1101 {
1102 struct LMBC_DIF_DATA data;
1103 int ret;
1104
1105 //fprintf(stderr, RCAT("\nWarning: current implementation of ", LEVMAR_BC_DIF) "() does not use a secant approach!\n\n");
1106
1107 data.ffdif=!opts || opts[4]>=0.0;
1108
1109 data.func=func;
1110 data.hx=(LM_REAL *)malloc(2*n*sizeof(LM_REAL)); /* allocate a big chunk in one step */
1111 if(!data.hx){
1112 fprintf(stderr, LCAT(LEVMAR_BC_DIF, "(): memory allocation request failed\n"));
1113 return LM_ERROR;
1114 }
1115 data.hxx=data.hx+n;
1116 data.adata=adata;
1117 data.delta=(opts)? FABS(opts[4]) : (LM_REAL)LM_DIFF_DELTA;
1118
1119 ret=LEVMAR_BC_DER(LMBC_DIF_FUNC, LMBC_DIF_JACF, p, x, m, n, lb, ub, dscl, itmax, opts, info, work, covar, (void *)&data);
1120
1121 if(info){ /* correct the number of function calls */
1122 if(data.ffdif)
1123 info[7]+=info[8]*(m+1); /* each Jacobian evaluation costs m+1 function calls */
1124 else
1125 info[7]+=info[8]*(2*m); /* each Jacobian evaluation costs 2*m function calls */
1126 }
1127
1128 free(data.hx);
1129
1130 return ret;
1131 }
1132
1133 /* undefine everything. THIS MUST REMAIN AT THE END OF THE FILE */
1134 #undef FUNC_STATE
1135 #undef LNSRCH
1136 #undef BOXPROJECT
1137 #undef BOXSCALE
1138 #undef LEVMAR_BOX_CHECK
1139 #undef VECNORM
1140 #undef LEVMAR_BC_DER
1141 #undef LMBC_DIF_DATA
1142 #undef LMBC_DIF_FUNC
1143 #undef LMBC_DIF_JACF
1144 #undef LEVMAR_BC_DIF
1145 #undef LEVMAR_FDIF_FORW_JAC_APPROX
1146 #undef LEVMAR_FDIF_CENT_JAC_APPROX
1147 #undef LEVMAR_COVAR
1148 #undef LEVMAR_TRANS_MAT_MAT_MULT
1149 #undef LEVMAR_L2NRMXMY
1150 #undef AX_EQ_B_LU
1151 #undef AX_EQ_B_CHOL
1152 #undef AX_EQ_B_PLASMA_CHOL
1153 #undef AX_EQ_B_QR
1154 #undef AX_EQ_B_QRLS
1155 #undef AX_EQ_B_SVD
1156 #undef AX_EQ_B_BK
1157