1 /* Siconos is a program dedicated to modeling, simulation and control
2 * of non smooth dynamical systems.
3 *
4 * Copyright 2021 INRIA.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #include <float.h> // for DBL_EPSILON
20 #include <math.h> // for fabs, NAN
21 #include <stdio.h> // for printf, NULL
22 #include <stdlib.h> // for calloc, free, malloc
23 #include "SiconosBlas.h" // for cblas_daxpy, cblas_d...
24 #include "NumericsFwd.h" // for SolverOptions, Varia...
25 #include "SolverOptions.h" // for SolverOptions, SICON...
26 #include "VI_cst.h" // for SICONOS_VI_IPARAM_ER...
27 #include "VariationalInequality.h" // for VariationalInequality
28 #include "VariationalInequality_Solvers.h" // for variationalInequalit...
29 #include "VariationalInequality_computeError.h" // for variationalInequalit...
30 #include "siconos_debug.h" // for DEBUG_PRINTF, DEBUG_...
31 #include "numerics_verbose.h" // for verbose, numerics_error
32
33 #ifdef DEBUG_MESSAGES
34 #include "NumericsVector.h"
35 #endif
36
determine_convergence(double error,double * tolerance,int iter,SolverOptions * options,VariationalInequality * problem,double * z,double * w,double rho)37 static int determine_convergence(double error, double *tolerance, int iter,
38 SolverOptions *options,
39 VariationalInequality* problem,
40 double *z, double *w, double rho)
41
42 {
43 int hasNotConverged = 1;
44 if(error < *tolerance)
45 {
46 if(verbose > 0)
47 printf("--------------- VI - Extra Gradient (EG) - Iteration %i "
48 "Residual = %14.7e < %7.3e\n", iter, error, *tolerance);
49 double absolute_error =0.0;
50 variationalInequality_computeError(problem, z, w, *tolerance, options, &absolute_error);
51 if(verbose > 0)
52 printf("-------------- VI - Extra Gradient (EG)- Full error criterion = %e\n", absolute_error);
53
54
55 hasNotConverged = 0;
56
57
58 if(options->iparam[SICONOS_VI_IPARAM_ERROR_EVALUATION]==SICONOS_VI_ERROR_EVALUATION_LIGHT_WITH_FULL_FINAL)
59 {
60 if(absolute_error > options->dparam[SICONOS_DPARAM_TOL])
61 {
62
63 if(verbose > 0)
64 {
65 printf("error = %e ", error);
66 printf("absolute_error = %e ", absolute_error);
67 printf("options->dparam[SICONOS_DPARAM_TOL]= %e\n", options->dparam[SICONOS_DPARAM_TOL]);
68 }
69
70 *tolerance = error/absolute_error*options->dparam[SICONOS_DPARAM_TOL];
71 if(verbose > 0)
72 printf("-------------- VI - Extra Gradient (EG)- We modify the required incremental precision to reach accuracy to %e\n", *tolerance);
73 hasNotConverged = 1;
74 }
75 else
76 {
77 if(verbose > 0)
78 printf("-------------- VI - Extra Gradient (EG) - The incremental precision is sufficient to reach accuracy to %e\n", *tolerance);
79 }
80 }
81 }
82 else
83 {
84 if(verbose > 0)
85 printf("--------------- VI - Extra Gradient (EG) - Iteration %i rho = %14.7e error = %14.7e > %10.5e \n", iter, rho, error, *tolerance);
86 }
87 return hasNotConverged;
88 }
89
90 static
compute_error(VariationalInequality * problem,double * z,double * w,double norm_z_z_k,double tolerance,SolverOptions * options)91 double compute_error(VariationalInequality* problem,
92 double *z, double *w, double norm_z_z_k,
93 double tolerance,
94 SolverOptions * options)
95 {
96
97 double error;
98 if(options->iparam[SICONOS_VI_IPARAM_ERROR_EVALUATION]==SICONOS_VI_ERROR_EVALUATION_FULL)
99 variationalInequality_computeError(problem, z, w, tolerance, options, &error);
100 else if(options->iparam[SICONOS_VI_IPARAM_ERROR_EVALUATION]==SICONOS_VI_ERROR_EVALUATION_LIGHT ||
101 options->iparam[SICONOS_VI_IPARAM_ERROR_EVALUATION]==SICONOS_VI_ERROR_EVALUATION_LIGHT_WITH_FULL_FINAL)
102 {
103 /* cblas_dcopy(n , x , 1 , xtmp, 1); */
104 /* cblas_daxpy(n, -1.0, x_k , 1, xtmp , 1) ; */
105 /* error = cblas_dnorm2(n,xtmp,1); */
106 double norm_z = cblas_dnrm2(problem->size, z, 1);
107 error = norm_z_z_k;
108 if(fabs(norm_z) > DBL_EPSILON)
109 error /= norm_z;
110 }
111 else
112 {
113 return NAN;
114 // unreachable code => comment.
115 // numerics_error("compute_error(VariationalInequality* problem, ...)", "unknown error evaluation strategy");
116 }
117 return error;
118 }
variationalInequality_ExtraGradient(VariationalInequality * problem,double * x,double * w,int * info,SolverOptions * options)119 void variationalInequality_ExtraGradient(VariationalInequality* problem, double *x, double *w, int* info, SolverOptions* options)
120 {
121 //verbose=1;
122 DEBUG_BEGIN("variationalInequality_ExtraGradient(VariationalInequality* problem, ...)\n")
123 /* /\* int and double parameters *\/ */
124 int* iparam = options->iparam;
125 double* dparam = options->dparam;
126 /* Number of contacts */
127 int n = problem->size;
128 /* Maximum number of iterations */
129 int itermax = iparam[SICONOS_IPARAM_MAX_ITER];
130 /* Tolerance */
131 double tolerance = dparam[SICONOS_DPARAM_TOL];
132
133 DEBUG_EXPR(NV_display(x,n););
134 DEBUG_EXPR(NV_display(w,n););
135 /***** Fixed point iterations *****/
136 int iter = 0; /* Current iteration number */
137 double error = 1.; /* Current error */
138 int hasNotConverged = 1;
139
140
141 double * xtmp = (double *)calloc(n,sizeof(double));
142 double * wtmp = (double *)calloc(n,sizeof(double));
143
144 double rho = 0.0, rho_k =0.0;
145 int isVariable = 0;
146
147 if(dparam[SICONOS_VI_DPARAM_RHO] > 0.0)
148 {
149 rho = dparam[SICONOS_VI_DPARAM_RHO];
150 if(verbose > 0)
151 {
152 printf("--------------- VI - Extra Gradient (EG) - Fixed stepsize with rho = %14.7e \n", rho);
153 }
154 }
155 else
156 {
157 /* Variable step in iterations*/
158 isVariable = 1;
159 rho = -dparam[SICONOS_VI_DPARAM_RHO];
160 if(verbose > 0)
161 {
162 printf("--------------- VI - Extra Gradient (EG) - Variable stepsize with starting rho = %14.7e \n", rho);
163 }
164
165 }
166
167 /* Variable for Line_search */
168 int success =0;
169 DEBUG_EXPR_WE(double error_k;);
170 double light_error_sum =0.0;
171 int ls_iter = 0;
172 int ls_itermax = 10;
173 double tau=dparam[SICONOS_VI_DPARAM_LS_TAU],
174 tauinv=dparam[SICONOS_VI_DPARAM_LS_TAUINV],
175 L= dparam[SICONOS_VI_DPARAM_LS_L], Lmin = dparam[SICONOS_VI_DPARAM_LS_LMIN];
176 double a1=0.0, a2=0.0;
177 double * x_k =0;
178 double * w_k =0;
179
180 if(isVariable)
181 {
182 x_k = (double *)malloc(n * sizeof(double));
183 w_k = (double *)malloc(n * sizeof(double));
184 }
185 /* memcpy(x,x_k,n * sizeof(double)); */
186 /* memcpy(w,w_k,n * sizeof(double)); */
187 //isVariable=0;
188 if(!isVariable)
189 {
190 /* double minusrho = -1.0*rho; */
191 while((iter < itermax) && (hasNotConverged > 0))
192 {
193 ++iter;
194
195 /* xtmp <- x */
196 cblas_dcopy(n, x, 1, xtmp, 1);
197
198
199 /* wtmp <- F(xtmp) */
200 problem->F(problem, n, xtmp,wtmp);
201
202 /* xtmp <- xtmp - F(xtmp) */
203 cblas_daxpy(n, -1.0, wtmp, 1, xtmp, 1) ;
204
205 /* wtmp <- ProjectionOnX(xtmp) */
206 problem->ProjectionOnX(problem,xtmp,wtmp);
207
208 /* x <- x - wtmp */
209 cblas_daxpy(n, -1.0, wtmp, 1, x, 1) ;
210
211 /* x <- ProjectionOnX(x) */
212 cblas_dcopy(n, xtmp, 1, x, 1);
213
214 problem->ProjectionOnX(problem,xtmp,x);
215
216
217 /* problem->F(problem,x,w); */
218 /* cblas_daxpy(n, -1.0, w , 1, x , 1) ; */
219 /* cblas_dcopy(n , x , 1 , xtmp, 1); */
220 /* problem->ProjectionOnX(problem,xtmp,x); */
221
222 /* **** Criterium convergence **** */
223 if(options->iparam[SICONOS_VI_IPARAM_ERROR_EVALUATION] == SICONOS_VI_ERROR_EVALUATION_LIGHT||
224 options->iparam[SICONOS_VI_IPARAM_ERROR_EVALUATION] == SICONOS_VI_ERROR_EVALUATION_LIGHT_WITH_FULL_FINAL)
225 {
226 cblas_dcopy(n, xtmp, 1,x, 1) ;
227 cblas_daxpy(n, -1.0, x_k, 1, xtmp, 1) ;
228 light_error_sum = cblas_dnrm2(n,xtmp,1);
229 }
230 error = compute_error(problem, x, w, light_error_sum, tolerance, options);
231 hasNotConverged = determine_convergence(error, &tolerance, iter, options,
232 problem, x, w, rho);
233
234
235 if(options->callback)
236 {
237 options->callback->collectStatsIteration(options->callback->env, n,
238 x, w,
239 error, NULL);
240 }
241
242
243
244
245 }
246 }
247
248 if(isVariable)
249 {
250 if(iparam[SICONOS_VI_IPARAM_LINESEARCH_METHOD]==SICONOS_VI_LS_ARMIJO) /* Armijo rule with Khotbotov ratio (default) */
251 {
252 while((iter < itermax) && (hasNotConverged > 0))
253 {
254 ++iter;
255
256
257 /* Store the error */
258 DEBUG_EXPR_WE(error_k = error;);
259
260 /* x_k <-- x store the x at the beginning of the iteration */
261 cblas_dcopy(n, x, 1, x_k, 1);
262 DEBUG_EXPR(NV_display(x_k,n););
263 problem->F(problem, n, x, w_k);
264
265 ls_iter = 0 ;
266 success =0;
267 rho_k=rho / tau;
268
269 while(!success && (ls_iter < ls_itermax))
270 {
271 /* if (iparam[SICONOS_VI_IPARAM_DECREASE_RHO] && ls_iter !=0) rho_k = rho_k * tau * min(1.0,a2/(rho_k*a1)); */
272 /* else */ rho_k = rho_k * tau ;
273
274 /* x <- x_k for the std approach*/
275 if(iparam[SICONOS_VI_IPARAM_ACTIVATE_UPDATE]==0) cblas_dcopy(n, x_k, 1, x, 1) ;
276
277 /* x <- x - rho_k* w_k */
278 cblas_daxpy(n, -rho_k, w_k, 1, x, 1) ;
279
280 /* xtmp <- ProjectionOnX(x) */
281 problem->ProjectionOnX(problem,x,xtmp);
282 problem->F(problem,n,xtmp,w);
283
284 DEBUG_EXPR_WE(for(int i =0; i< 5 ; i++)
285 {
286 printf("xtmp[%i]=%12.8e\t",i,xtmp[i]);
287 printf("w[%i]=F[%i]=%12.8e\n",i,i,w[i]);
288 });
289 /* velocitytmp <- velocity */
290 /* cblas_dcopy(n, w, 1, wtmp , 1) ; */
291
292 /* velocity <- velocity - velocity_k */
293 cblas_daxpy(n, -1.0, w_k, 1, w, 1) ;
294
295 /* a1 = ||w - w_k|| */
296 a1 = cblas_dnrm2(n, w, 1);
297 DEBUG_PRINTF("a1 = %12.8e\n", a1);
298
299 /* xtmp <- x */
300 cblas_dcopy(n, xtmp, 1,x, 1) ;
301
302 /* xtmp <- x - x_k */
303 cblas_daxpy(n, -1.0, x_k, 1, xtmp, 1) ;
304
305 /* a2 = || x - x_k || */
306 a2 = cblas_dnrm2(n, xtmp, 1) ;
307 DEBUG_PRINTF("a2 = %12.8e\n", a2);
308
309 DEBUG_PRINTF("test rho_k*a1 < L * a2 = %e < %e\n", rho_k*a1, L * a2) ;
310 success = (rho_k*a1 < L * a2)?1:0;
311
312 /* printf("rho_k = %12.8e\t", rho_k); */
313 /* printf("a1 = %12.8e\t", a1); */
314 /* printf("a2 = %12.8e\t", a2); */
315 /* printf("norm x = %12.8e\t",cblas_dnrm2(n, x, 1) ); */
316 /* printf("success = %i\n", success); */
317
318 ls_iter++;
319 }
320
321 /* velocitytmp <- q */
322 /* cblas_dcopy(n , q , 1 , velocitytmp, 1); */
323 /* NM_gemv(alpha, M, reaction, beta, velocitytmp); */
324
325 problem->F(problem, n, x,wtmp);
326
327 /* x <- x - rho_k* wtmp */
328 cblas_daxpy(n, -rho_k, wtmp, 1, x, 1) ;
329
330 /* wtmp <- ProjectionOnX(xtmp) */
331 cblas_dcopy(n, x, 1, xtmp, 1);
332 problem->ProjectionOnX(problem,xtmp,x);
333
334 DEBUG_EXPR(NV_display(x,n););
335 DEBUG_EXPR(NV_display(w,n););
336
337 /* **** Criterium convergence **** */
338 error = compute_error(problem, x, w, a1, tolerance, options);
339 hasNotConverged = determine_convergence(error, &tolerance, iter, options,
340 problem, x, w, rho);
341
342 DEBUG_PRINTF("error = %12.8e\t error_k = %12.8e\n",error,error_k);
343 /*Update rho*/
344 if((rho_k*a1 < Lmin * a2))
345 {
346 rho =rho_k*tauinv;
347 }
348 else
349 rho =rho_k;
350 }
351 }// end iparam[SICONOS_VI_IPARAM_LINESEARCH_METHOD]==0
352
353 if(iparam[SICONOS_VI_IPARAM_LINESEARCH_METHOD]==SICONOS_VI_LS_SOLODOV) /* Armijo rule with Solodov.Tseng ratio */
354 {
355 while((iter < itermax) && (hasNotConverged > 0))
356 {
357 ++iter;
358
359
360 /* Store the error */
361 DEBUG_EXPR_WE(error_k = error;);
362
363 /* x_k <-- x store the x at the beginning of the iteration */
364 cblas_dcopy(n, x, 1, x_k, 1);
365
366 problem->F(problem, n, x, w_k);
367
368 ls_iter = 0 ;
369 success =0;
370 rho_k=rho / tau;
371
372 while(!success && (ls_iter < ls_itermax))
373 {
374
375 /* if (iparam[SICONOS_VI_IPARAM_DECREASE_RHO] && ls_iter !=0) rho_k = rho_k * tau * min(1.0,a2*a2/(rho_k*a1)); */
376 /* else */ rho_k = rho_k * tau ;
377
378 /* x <- x_k for the std approach*/
379 if(iparam[SICONOS_VI_IPARAM_ACTIVATE_UPDATE]==0) cblas_dcopy(n, x_k, 1, x, 1) ;
380
381 /* x <- x - rho_k* w_k */
382 cblas_daxpy(n, -rho_k, w_k, 1, x, 1) ;
383
384 /* xtmp <- ProjectionOnX(x) */
385 problem->ProjectionOnX(problem,x,xtmp);
386 problem->F(problem,n,xtmp,w);
387
388 DEBUG_EXPR_WE(for(int i =0; i< 5 ; i++)
389 {
390 printf("xtmp[%i]=%12.8e\t",i,xtmp[i]);
391 printf("w[%i]=F[%i]=%12.8e\n",i,i,w[i]);
392 });
393 /* wtmp <- w */
394 /* cblas_dcopy(n, w, 1, wtmp , 1) ; */
395
396 /* w <- w - w_k */
397 cblas_daxpy(n, -1.0, w_k, 1, w, 1) ;
398
399 /* xtmp <- x - x_k */
400 cblas_dcopy(n, xtmp, 1,x, 1) ;
401 cblas_daxpy(n, -1.0, x_k, 1, xtmp, 1) ;
402
403 /* a1 = (w - w_k)^T(x - x_k) */
404 a1 = cblas_ddot(n, xtmp, 1, w, 1);
405 DEBUG_PRINTF("a1 = %12.8e\n", a1);
406
407 /* a2 = || x - x_k || */
408 a2 = cblas_dnrm2(n, xtmp, 1) ;
409 DEBUG_PRINTF("a2 = %12.8e\n", a2);
410
411 DEBUG_PRINTF("test rho_k*a1 < L * a2 * a2 = %e < %e\n", rho_k*a1, L * a2 * a2) ;
412 success = (rho_k*a1 < L * a2 * a2)?1:0;
413
414 /* printf("rho_k = %12.8e\t", rho_k); */
415 /* printf("a1 = %12.8e\t", a1); */
416 /* printf("a2 = %12.8e\t", a2); */
417 /* printf("norm x = %12.8e\t",cblas_dnrm2(n, x, 1) ); */
418 /* printf("success = %i\n", success); */
419
420 ls_iter++;
421 }
422
423
424
425 /* velocitytmp <- q */
426 /* cblas_dcopy(n , q , 1 , velocitytmp, 1); */
427 /* NM_gemv(alpha, M, reaction, beta, velocitytmp); */
428
429 problem->F(problem, n, x,wtmp);
430
431 /* x <- x - rho_k* wtmp */
432 cblas_daxpy(n, -rho_k, wtmp, 1, x, 1) ;
433
434 /* wtmp <- ProjectionOnX(xtmp) */
435 cblas_dcopy(n, x, 1, xtmp, 1);
436 problem->ProjectionOnX(problem,xtmp,x);
437 DEBUG_EXPR_WE(for(int i =0; i< 5 ; i++)
438 {
439 printf("x[%i]=%12.8e\t",i,x[i]);
440 printf("w[%i]=F[%i]=%12.8e\n",i,i,w[i]);
441 }
442 );
443
444
445 /* **** Criterium convergence **** */
446 error = compute_error(problem, x, w, a1, tolerance, options);
447 hasNotConverged = determine_convergence(error, &tolerance, iter, options,
448 problem, x, w, rho);
449
450
451
452
453
454 DEBUG_PRINTF("error = %12.8e\t error_k = %12.8e\n",error,error_k);
455 /*Update rho*/
456 if((rho_k*a1 < Lmin * a2*a2))
457 {
458 rho =rho_k*tauinv;
459 }
460 else
461 rho =rho_k;
462
463 }
464
465 }// end iparam[SICONOS_VI_IPARAM_LINESEARCH_METHOD]==SICONOS_VI_LS_SOLODOV
466 /* we return the negative value of rho for multiple call to the solver */
467 dparam[SICONOS_VI_DPARAM_RHO] = -rho;
468
469 free(x_k);
470 free(w_k);
471 }
472
473 *info = hasNotConverged;
474 if(verbose > 0)
475 {
476 printf("--------------- VI - Extra Gradient (EG) - #Iteration %i Final Residual = %14.7e\n", iter, error);
477 }
478 dparam[SICONOS_DPARAM_TOL] = tolerance;
479 dparam[SICONOS_DPARAM_RESIDU] = error;
480 iparam[SICONOS_IPARAM_ITER_DONE] = iter;
481 free(xtmp);
482 free(wtmp);
483 DEBUG_END("variationalInequality_ExtraGradient(VariationalInequality* problem, ...)\n")
484
485 }
486
487
variationalInequality_ExtraGradient_set_default(SolverOptions * options)488 void variationalInequality_ExtraGradient_set_default(SolverOptions* options)
489 {
490 options->iparam[SICONOS_VI_IPARAM_LINESEARCH_METHOD] = SICONOS_VI_LS_ARMIJO;
491 /* options->iparam[SICONOS_VI_IPARAM_ERROR_EVALUATION]=SICONOS_VI_ERROR_EVALUATION_FULL; */
492 options->iparam[SICONOS_VI_IPARAM_DECREASE_RHO] = 0; // use rho_k * tau * min(1.0,a2/(rho_k*a1)) to decrease rho; commented in the code
493 options->iparam[SICONOS_VI_IPARAM_ERROR_EVALUATION] = SICONOS_VI_ERROR_EVALUATION_LIGHT_WITH_FULL_FINAL;
494 options->iparam[SICONOS_VI_IPARAM_ERROR_EVALUATION_FREQUENCY]=0; // Rk FP : set but not used
495 options->iparam[SICONOS_VI_IPARAM_ACTIVATE_UPDATE] = 0; // activate the update in the loop (0:false default choice)
496 options->dparam[SICONOS_VI_DPARAM_RHO] = -1.0; // in-out parameter
497 options->dparam[SICONOS_VI_DPARAM_LS_TAU] = 2/3.0; /* tau */
498 options->dparam[SICONOS_VI_DPARAM_LS_TAUINV] = 3.0/2.0; /*tauinv */
499 options->dparam[SICONOS_VI_DPARAM_LS_L] = 0.9; /* L */
500 options->dparam[SICONOS_VI_DPARAM_LS_LMIN] = 0.3; /* Lmin */
501 }
502