1 /*************************************************************************
2 ALGLIB 3.15.0 (source code generated 2019-02-20)
3 Copyright (c) Sergey Bochkanov (ALGLIB project).
4 
5 >>> SOURCE LICENSE >>>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation (www.fsf.org); either version 2 of the
9 License, or (at your option) any later version.
10 
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 A copy of the GNU General Public License is available at
17 http://www.fsf.org/licensing/licenses
18 >>> END OF LICENSE >>>
19 *************************************************************************/
20 #ifndef _optimization_pkg_h
21 #define _optimization_pkg_h
22 #include "ap.h"
23 #include "alglibinternal.h"
24 #include "linalg.h"
25 #include "alglibmisc.h"
26 #include "solvers.h"
27 
28 /////////////////////////////////////////////////////////////////////////
29 //
30 // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (DATATYPES)
31 //
32 /////////////////////////////////////////////////////////////////////////
33 namespace alglib_impl
34 {
35 #if defined(AE_COMPILE_CQMODELS) || !defined(AE_PARTIAL_BUILD)
36 typedef struct
37 {
38     ae_int_t n;
39     ae_int_t k;
40     double alpha;
41     double tau;
42     double theta;
43     ae_matrix a;
44     ae_matrix q;
45     ae_vector b;
46     ae_vector r;
47     ae_vector xc;
48     ae_vector d;
49     ae_vector activeset;
50     ae_matrix tq2dense;
51     ae_matrix tk2;
52     ae_vector tq2diag;
53     ae_vector tq1;
54     ae_vector tk1;
55     double tq0;
56     double tk0;
57     ae_vector txc;
58     ae_vector tb;
59     ae_int_t nfree;
60     ae_int_t ecakind;
61     ae_matrix ecadense;
62     ae_matrix eq;
63     ae_matrix eccm;
64     ae_vector ecadiag;
65     ae_vector eb;
66     double ec;
67     ae_vector tmp0;
68     ae_vector tmp1;
69     ae_vector tmpg;
70     ae_matrix tmp2;
71     ae_bool ismaintermchanged;
72     ae_bool issecondarytermchanged;
73     ae_bool islineartermchanged;
74     ae_bool isactivesetchanged;
75 } convexquadraticmodel;
76 #endif
77 #if defined(AE_COMPILE_OPTGUARDAPI) || !defined(AE_PARTIAL_BUILD)
78 typedef struct
79 {
80     ae_bool nonc0suspected;
81     ae_bool nonc0test0positive;
82     ae_int_t nonc0fidx;
83     double nonc0lipschitzc;
84     ae_bool nonc1suspected;
85     ae_bool nonc1test0positive;
86     ae_bool nonc1test1positive;
87     ae_int_t nonc1fidx;
88     double nonc1lipschitzc;
89     ae_bool badgradsuspected;
90     ae_int_t badgradfidx;
91     ae_int_t badgradvidx;
92     ae_vector badgradxbase;
93     ae_matrix badgraduser;
94     ae_matrix badgradnum;
95 } optguardreport;
96 typedef struct
97 {
98     ae_bool positive;
99     ae_int_t fidx;
100     ae_vector x0;
101     ae_vector d;
102     ae_int_t n;
103     ae_vector stp;
104     ae_vector f;
105     ae_int_t cnt;
106     ae_int_t stpidxa;
107     ae_int_t stpidxb;
108 } optguardnonc1test0report;
109 typedef struct
110 {
111     ae_bool positive;
112     ae_int_t fidx;
113     ae_int_t vidx;
114     ae_vector x0;
115     ae_vector d;
116     ae_int_t n;
117     ae_vector stp;
118     ae_vector g;
119     ae_int_t cnt;
120     ae_int_t stpidxa;
121     ae_int_t stpidxb;
122 } optguardnonc1test1report;
123 #endif
124 #if defined(AE_COMPILE_OPTSERV) || !defined(AE_PARTIAL_BUILD)
125 typedef struct
126 {
127     ae_vector norms;
128     ae_vector alpha;
129     ae_vector rho;
130     ae_matrix yk;
131     ae_vector idx;
132     ae_vector bufa;
133     ae_vector bufb;
134 } precbuflbfgs;
135 typedef struct
136 {
137     ae_int_t n;
138     ae_int_t k;
139     ae_vector d;
140     ae_matrix v;
141     ae_vector bufc;
142     ae_matrix bufz;
143     ae_matrix bufw;
144     ae_vector tmp;
145 } precbuflowrank;
146 typedef struct
147 {
148     ae_int_t n;
149     ae_int_t k;
150     ae_bool checksmoothness;
151     ae_vector dcur;
152     ae_int_t enqueuedcnt;
153     ae_vector enqueuedstp;
154     ae_vector enqueuedx;
155     ae_vector enqueuedfunc;
156     ae_matrix enqueuedjac;
157     ae_vector sortedstp;
158     ae_vector sortedidx;
159     ae_int_t sortedcnt;
160     ae_bool linesearchspoiled;
161     ae_bool linesearchstarted;
162     double nonc0currentrating;
163     double nonc1currentrating;
164     ae_bool badgradhasxj;
165     optguardreport rep;
166     double nonc1test0strrating;
167     double nonc1test0lngrating;
168     optguardnonc1test0report nonc1test0strrep;
169     optguardnonc1test0report nonc1test0lngrep;
170     double nonc1test1strrating;
171     double nonc1test1lngrating;
172     optguardnonc1test1report nonc1test1strrep;
173     optguardnonc1test1report nonc1test1lngrep;
174     ae_bool needfij;
175     ae_vector x;
176     ae_vector fi;
177     ae_matrix j;
178     rcommstate rstateg0;
179     ae_vector xbase;
180     ae_vector fbase;
181     ae_vector fm;
182     ae_vector fc;
183     ae_vector fp;
184     ae_vector jm;
185     ae_vector jc;
186     ae_vector jp;
187     ae_matrix jbaseusr;
188     ae_matrix jbasenum;
189     ae_vector stp;
190     ae_vector bufr;
191     ae_vector f;
192     ae_vector g;
193     ae_vector deltax;
194     ae_vector tmpidx;
195     ae_vector bufi;
196     ae_vector xu;
197     ae_vector du;
198     ae_vector f0;
199     ae_matrix j0;
200 } smoothnessmonitor;
201 #endif
202 #if defined(AE_COMPILE_SNNLS) || !defined(AE_PARTIAL_BUILD)
203 typedef struct
204 {
205     ae_int_t ns;
206     ae_int_t nd;
207     ae_int_t nr;
208     ae_matrix densea;
209     ae_vector b;
210     ae_vector nnc;
211     double debugflops;
212     ae_int_t debugmaxinnerits;
213     ae_vector xn;
214     ae_vector xp;
215     ae_matrix tmpca;
216     ae_matrix tmplq;
217     ae_matrix trda;
218     ae_vector trdd;
219     ae_vector crb;
220     ae_vector g;
221     ae_vector d;
222     ae_vector dx;
223     ae_vector diagaa;
224     ae_vector cb;
225     ae_vector cx;
226     ae_vector cborg;
227     ae_vector tmpcholesky;
228     ae_vector r;
229     ae_vector regdiag;
230     ae_vector tmp0;
231     ae_vector tmp1;
232     ae_vector tmp2;
233     ae_vector rdtmprowmap;
234 } snnlssolver;
235 #endif
236 #if defined(AE_COMPILE_SACTIVESETS) || !defined(AE_PARTIAL_BUILD)
237 typedef struct
238 {
239     ae_int_t n;
240     ae_int_t algostate;
241     ae_vector xc;
242     ae_bool hasxc;
243     ae_vector s;
244     ae_vector h;
245     ae_vector cstatus;
246     ae_bool basisisready;
247     ae_matrix sdensebatch;
248     ae_matrix pdensebatch;
249     ae_matrix idensebatch;
250     ae_int_t densebatchsize;
251     ae_vector sparsebatch;
252     ae_int_t sparsebatchsize;
253     ae_int_t basisage;
254     ae_bool feasinitpt;
255     ae_bool constraintschanged;
256     ae_vector hasbndl;
257     ae_vector hasbndu;
258     ae_vector bndl;
259     ae_vector bndu;
260     ae_matrix cleic;
261     ae_int_t nec;
262     ae_int_t nic;
263     ae_vector mtnew;
264     ae_vector mtx;
265     ae_vector mtas;
266     ae_vector cdtmp;
267     ae_vector corrtmp;
268     ae_vector unitdiagonal;
269     snnlssolver solver;
270     ae_vector scntmp;
271     ae_vector tmp0;
272     ae_vector tmpfeas;
273     ae_matrix tmpm0;
274     ae_vector rctmps;
275     ae_vector rctmpg;
276     ae_vector rctmprightpart;
277     ae_matrix rctmpdense0;
278     ae_matrix rctmpdense1;
279     ae_vector rctmpisequality;
280     ae_vector rctmpconstraintidx;
281     ae_vector rctmplambdas;
282     ae_matrix tmpbasis;
283     ae_vector tmpnormestimates;
284     ae_vector tmpreciph;
285     ae_vector tmpprodp;
286     ae_vector tmpprods;
287     ae_vector tmpcp;
288     ae_vector tmpcs;
289     ae_vector tmpci;
290 } sactiveset;
291 #endif
292 #if defined(AE_COMPILE_QQPSOLVER) || !defined(AE_PARTIAL_BUILD)
293 typedef struct
294 {
295     double epsg;
296     double epsf;
297     double epsx;
298     ae_int_t maxouterits;
299     ae_bool cgphase;
300     ae_bool cnphase;
301     ae_int_t cgminits;
302     ae_int_t cgmaxits;
303     ae_int_t cnmaxupdates;
304     ae_int_t sparsesolver;
305 } qqpsettings;
306 typedef struct
307 {
308     ae_int_t n;
309     ae_int_t akind;
310     ae_matrix densea;
311     sparsematrix sparsea;
312     ae_bool sparseupper;
313     double absamax;
314     double absasum;
315     double absasum2;
316     ae_vector b;
317     ae_vector bndl;
318     ae_vector bndu;
319     ae_vector havebndl;
320     ae_vector havebndu;
321     ae_vector xs;
322     ae_vector xf;
323     ae_vector gc;
324     ae_vector xp;
325     ae_vector dc;
326     ae_vector dp;
327     ae_vector cgc;
328     ae_vector cgp;
329     sactiveset sas;
330     ae_vector activated;
331     ae_int_t nfree;
332     ae_int_t cnmodelage;
333     ae_matrix densez;
334     sparsematrix sparsecca;
335     ae_vector yidx;
336     ae_vector regdiag;
337     ae_vector regx0;
338     ae_vector tmpcn;
339     ae_vector tmpcni;
340     ae_vector tmpcnb;
341     ae_vector tmp0;
342     ae_vector tmp1;
343     ae_vector stpbuf;
344     sparsebuffers sbuf;
345     ae_int_t repinneriterationscount;
346     ae_int_t repouteriterationscount;
347     ae_int_t repncholesky;
348     ae_int_t repncupdates;
349 } qqpbuffers;
350 #endif
351 #if defined(AE_COMPILE_MINLBFGS) || !defined(AE_PARTIAL_BUILD)
352 typedef struct
353 {
354     ae_int_t n;
355     ae_int_t m;
356     double epsg;
357     double epsf;
358     double epsx;
359     ae_int_t maxits;
360     ae_bool xrep;
361     double stpmax;
362     ae_vector s;
363     double diffstep;
364     ae_int_t nfev;
365     ae_int_t mcstage;
366     ae_int_t k;
367     ae_int_t q;
368     ae_int_t p;
369     ae_vector rho;
370     ae_matrix yk;
371     ae_matrix sk;
372     ae_vector xp;
373     ae_vector theta;
374     ae_vector d;
375     double stp;
376     ae_vector work;
377     double fold;
378     double trimthreshold;
379     ae_vector xbase;
380     ae_int_t prectype;
381     double gammak;
382     ae_matrix denseh;
383     ae_vector diagh;
384     ae_vector precc;
385     ae_vector precd;
386     ae_matrix precw;
387     ae_int_t preck;
388     precbuflbfgs precbuf;
389     precbuflowrank lowrankbuf;
390     double fbase;
391     double fm2;
392     double fm1;
393     double fp1;
394     double fp2;
395     ae_vector autobuf;
396     ae_vector invs;
397     ae_vector x;
398     double f;
399     ae_vector g;
400     ae_bool needf;
401     ae_bool needfg;
402     ae_bool xupdated;
403     ae_bool userterminationneeded;
404     double teststep;
405     rcommstate rstate;
406     ae_int_t repiterationscount;
407     ae_int_t repnfev;
408     ae_int_t repterminationtype;
409     linminstate lstate;
410     ae_int_t smoothnessguardlevel;
411     smoothnessmonitor smonitor;
412     ae_vector lastscaleused;
413 } minlbfgsstate;
414 typedef struct
415 {
416     ae_int_t iterationscount;
417     ae_int_t nfev;
418     ae_int_t terminationtype;
419 } minlbfgsreport;
420 #endif
421 #if defined(AE_COMPILE_QPDENSEAULSOLVER) || !defined(AE_PARTIAL_BUILD)
422 typedef struct
423 {
424     double epsx;
425     ae_int_t outerits;
426     double rho;
427 } qpdenseaulsettings;
428 typedef struct
429 {
430     ae_vector nulc;
431     ae_matrix sclsfta;
432     ae_vector sclsftb;
433     ae_vector sclsfthasbndl;
434     ae_vector sclsfthasbndu;
435     ae_vector sclsftbndl;
436     ae_vector sclsftbndu;
437     ae_vector sclsftxc;
438     ae_matrix sclsftcleic;
439     ae_matrix exa;
440     ae_vector exb;
441     ae_vector exxc;
442     ae_vector exbndl;
443     ae_vector exbndu;
444     ae_vector exscale;
445     ae_vector exxorigin;
446     qqpsettings qqpsettingsuser;
447     qqpbuffers qqpbuf;
448     ae_vector nulcest;
449     ae_vector tmp0;
450     ae_matrix tmp2;
451     ae_vector modelg;
452     ae_vector d;
453     ae_vector deltax;
454     convexquadraticmodel dummycqm;
455     sparsematrix dummysparse;
456     ae_matrix qrkkt;
457     ae_vector qrrightpart;
458     ae_vector qrtau;
459     ae_vector qrsv0;
460     ae_vector qrsvx1;
461     ae_vector nicerr;
462     ae_vector nicnact;
463     ae_int_t repinneriterationscount;
464     ae_int_t repouteriterationscount;
465     ae_int_t repncholesky;
466     ae_int_t repnwrkchanges;
467     ae_int_t repnwrk0;
468     ae_int_t repnwrk1;
469     ae_int_t repnwrkf;
470     ae_int_t repnmv;
471 } qpdenseaulbuffers;
472 #endif
473 #if defined(AE_COMPILE_MINBLEIC) || !defined(AE_PARTIAL_BUILD)
474 typedef struct
475 {
476     ae_int_t nmain;
477     ae_int_t nslack;
478     double epsg;
479     double epsf;
480     double epsx;
481     ae_int_t maxits;
482     ae_bool xrep;
483     ae_bool drep;
484     double stpmax;
485     double diffstep;
486     sactiveset sas;
487     ae_vector s;
488     ae_int_t prectype;
489     ae_vector diagh;
490     ae_vector x;
491     double f;
492     ae_vector g;
493     ae_bool needf;
494     ae_bool needfg;
495     ae_bool xupdated;
496     ae_bool lsstart;
497     ae_bool steepestdescentstep;
498     ae_bool boundedstep;
499     ae_bool userterminationneeded;
500     rcommstate rstate;
501     ae_vector ugc;
502     ae_vector cgc;
503     ae_vector xn;
504     ae_vector ugn;
505     ae_vector cgn;
506     ae_vector xp;
507     double fc;
508     double fn;
509     double fp;
510     ae_vector d;
511     ae_matrix cleic;
512     ae_int_t nec;
513     ae_int_t nic;
514     double lastgoodstep;
515     double lastscaledgoodstep;
516     double maxscaledgrad;
517     ae_vector hasbndl;
518     ae_vector hasbndu;
519     ae_vector bndl;
520     ae_vector bndu;
521     ae_int_t repinneriterationscount;
522     ae_int_t repouteriterationscount;
523     ae_int_t repnfev;
524     ae_int_t repvaridx;
525     ae_int_t repterminationtype;
526     double repdebugeqerr;
527     double repdebugfs;
528     double repdebugff;
529     double repdebugdx;
530     ae_int_t repdebugfeasqpits;
531     ae_int_t repdebugfeasgpaits;
532     ae_vector xstart;
533     snnlssolver solver;
534     double fbase;
535     double fm2;
536     double fm1;
537     double fp1;
538     double fp2;
539     double xm1;
540     double xp1;
541     double gm1;
542     double gp1;
543     ae_int_t cidx;
544     double cval;
545     ae_vector tmpprec;
546     ae_vector tmp0;
547     ae_int_t nfev;
548     ae_int_t mcstage;
549     double stp;
550     double curstpmax;
551     double activationstep;
552     ae_vector work;
553     linminstate lstate;
554     double trimthreshold;
555     ae_int_t nonmonotoniccnt;
556     ae_matrix bufyk;
557     ae_matrix bufsk;
558     ae_vector bufrho;
559     ae_vector buftheta;
560     ae_int_t bufsize;
561     double teststep;
562     ae_int_t smoothnessguardlevel;
563     smoothnessmonitor smonitor;
564     ae_vector lastscaleused;
565     ae_vector invs;
566 } minbleicstate;
567 typedef struct
568 {
569     ae_int_t iterationscount;
570     ae_int_t nfev;
571     ae_int_t varidx;
572     ae_int_t terminationtype;
573     double debugeqerr;
574     double debugfs;
575     double debugff;
576     double debugdx;
577     ae_int_t debugfeasqpits;
578     ae_int_t debugfeasgpaits;
579     ae_int_t inneriterationscount;
580     ae_int_t outeriterationscount;
581 } minbleicreport;
582 #endif
583 #if defined(AE_COMPILE_QPBLEICSOLVER) || !defined(AE_PARTIAL_BUILD)
584 typedef struct
585 {
586     double epsg;
587     double epsf;
588     double epsx;
589     ae_int_t maxits;
590 } qpbleicsettings;
591 typedef struct
592 {
593     minbleicstate solver;
594     minbleicreport solverrep;
595     ae_vector tmp0;
596     ae_vector tmp1;
597     ae_vector tmpi;
598     ae_int_t repinneriterationscount;
599     ae_int_t repouteriterationscount;
600 } qpbleicbuffers;
601 #endif
602 #if defined(AE_COMPILE_MINQP) || !defined(AE_PARTIAL_BUILD)
603 typedef struct
604 {
605     ae_int_t n;
606     qqpsettings qqpsettingsuser;
607     qpbleicsettings qpbleicsettingsuser;
608     qpdenseaulsettings qpdenseaulsettingsuser;
609     ae_bool dbgskipconstraintnormalization;
610     ae_int_t algokind;
611     ae_int_t akind;
612     convexquadraticmodel a;
613     sparsematrix sparsea;
614     ae_bool sparseaupper;
615     double absamax;
616     double absasum;
617     double absasum2;
618     ae_vector b;
619     ae_vector bndl;
620     ae_vector bndu;
621     ae_int_t stype;
622     ae_vector s;
623     ae_vector havebndl;
624     ae_vector havebndu;
625     ae_vector xorigin;
626     ae_vector startx;
627     ae_bool havex;
628     ae_matrix cleic;
629     ae_int_t nec;
630     ae_int_t nic;
631     sparsematrix scleic;
632     ae_int_t snec;
633     ae_int_t snic;
634     ae_vector xs;
635     ae_int_t repinneriterationscount;
636     ae_int_t repouteriterationscount;
637     ae_int_t repncholesky;
638     ae_int_t repnmv;
639     ae_int_t repterminationtype;
640     ae_vector effectives;
641     ae_vector tmp0;
642     ae_matrix ecleic;
643     ae_matrix dummyr2;
644     ae_bool qpbleicfirstcall;
645     qpbleicbuffers qpbleicbuf;
646     qqpbuffers qqpbuf;
647     qpdenseaulbuffers qpdenseaulbuf;
648 } minqpstate;
649 typedef struct
650 {
651     ae_int_t inneriterationscount;
652     ae_int_t outeriterationscount;
653     ae_int_t nmv;
654     ae_int_t ncholesky;
655     ae_int_t terminationtype;
656 } minqpreport;
657 #endif
658 #if defined(AE_COMPILE_REVISEDDUALSIMPLEX) || !defined(AE_PARTIAL_BUILD)
659 typedef struct
660 {
661     double pivottol;
662     double perturbmag;
663     ae_int_t maxtrfage;
664     ae_int_t trftype;
665     ae_int_t ratiotest;
666     ae_int_t pricing;
667     ae_int_t shifting;
668 } dualsimplexsettings;
669 typedef struct
670 {
671     ae_int_t ns;
672     ae_int_t m;
673     ae_vector idx;
674     ae_vector nidx;
675     ae_vector isbasic;
676     ae_int_t trftype;
677     ae_bool isvalidtrf;
678     ae_int_t trfage;
679     ae_matrix denselu;
680     sparsematrix sparsel;
681     sparsematrix sparseu;
682     sparsematrix sparseut;
683     ae_vector rowpermbwd;
684     ae_vector colpermbwd;
685     ae_vector densepfieta;
686     ae_vector densemu;
687     ae_vector rk;
688     ae_vector dk;
689     ae_vector dseweights;
690     ae_bool dsevalid;
691     double eminu;
692     ae_vector wtmp0;
693     ae_vector wtmp1;
694     ae_vector wtmp2;
695     ae_vector nrs;
696     ae_vector tcinvidx;
697     ae_matrix denselu2;
698     ae_vector densep2;
699     ae_vector densep2c;
700     sparsematrix sparselu1;
701     sparsematrix sparselu2;
702     sluv2buffer lubuf2;
703     ae_vector tmpi;
704     ae_vector utmp0;
705     ae_vector utmpi;
706     sparsematrix sparseludbg;
707 } dualsimplexbasis;
708 typedef struct
709 {
710     ae_int_t ns;
711     ae_int_t m;
712     ae_vector rawc;
713     ae_vector bndl;
714     ae_vector bndu;
715     ae_vector bndt;
716     ae_vector xa;
717     ae_vector d;
718     ae_int_t state;
719     ae_vector xb;
720     ae_vector bndlb;
721     ae_vector bndub;
722     ae_vector bndtb;
723     ae_vector effc;
724     ae_vector colscales;
725 } dualsimplexsubproblem;
726 typedef struct
727 {
728     ae_vector varscales;
729     ae_vector rowscales;
730     ae_vector rawbndl;
731     ae_vector rawbndu;
732     ae_int_t ns;
733     ae_int_t m;
734     sparsematrix a;
735     sparsematrix at;
736     dualsimplexbasis basis;
737     dualsimplexsubproblem primary;
738     dualsimplexsubproblem phase1;
739     dualsimplexsubproblem phase3;
740     ae_vector repx;
741     ae_vector repy;
742     ae_vector repdx;
743     ae_vector repstats;
744     double repf;
745     double repprimalerror;
746     double repdualerror;
747     ae_int_t repterminationtype;
748     ae_int_t repiterationscount;
749     ae_int_t repiterationscount1;
750     ae_int_t repiterationscount2;
751     ae_int_t repiterationscount3;
752     ae_vector possibleflips;
753     ae_int_t possibleflipscnt;
754     ae_vector dfctmp0;
755     ae_vector dfctmp1;
756     ae_vector dfctmp2;
757     ae_vector ustmpi;
758     ae_vector tmp0;
759     ae_vector tmp1;
760     ae_vector tmp2;
761     ae_vector alphar;
762     ae_vector rhor;
763     ae_vector tau;
764     ae_vector alphaq;
765     ae_vector alphaqim;
766     ae_vector eligibleset;
767     ae_vector harrisset;
768 } dualsimplexstate;
769 #endif
770 #if defined(AE_COMPILE_MINLP) || !defined(AE_PARTIAL_BUILD)
771 typedef struct
772 {
773     ae_int_t n;
774     ae_int_t algokind;
775     ae_vector s;
776     ae_vector c;
777     ae_vector bndl;
778     ae_vector bndu;
779     ae_int_t m;
780     sparsematrix a;
781     ae_vector al;
782     ae_vector au;
783     ae_vector xs;
784     ae_vector ys;
785     ae_vector cs;
786     double repf;
787     double repprimalerror;
788     double repdualerror;
789     ae_int_t repiterationscount;
790     ae_int_t repterminationtype;
791     dualsimplexstate dss;
792     ae_vector adddtmpi;
793     ae_vector adddtmpr;
794 } minlpstate;
795 typedef struct
796 {
797     double f;
798     ae_vector y;
799     ae_vector stats;
800     double primalerror;
801     double dualerror;
802     ae_int_t iterationscount;
803     ae_int_t terminationtype;
804 } minlpreport;
805 #endif
806 #if defined(AE_COMPILE_NLCSLP) || !defined(AE_PARTIAL_BUILD)
807 typedef struct
808 {
809     dualsimplexstate dss;
810     dualsimplexsettings dsssettings;
811     dualsimplexbasis lastbasis;
812     ae_bool basispresent;
813     ae_matrix curd;
814     ae_int_t curdcnt;
815     ae_vector curb;
816     ae_vector curbndl;
817     ae_vector curbndu;
818     ae_vector cural;
819     ae_vector curau;
820     sparsematrix sparserawlc;
821     sparsematrix sparseefflc;
822     ae_int_t hessiantype;
823     ae_matrix h;
824     ae_matrix curhd;
825     ae_matrix densedummy;
826     sparsematrix sparsedummy;
827     ae_vector tmp0;
828     ae_vector tmp1;
829     ae_vector sk;
830     ae_vector yk;
831 } minslpsubsolver;
832 typedef struct
833 {
834     ae_int_t n;
835     ae_int_t nec;
836     ae_int_t nic;
837     ae_int_t nlec;
838     ae_int_t nlic;
839     ae_matrix scaledcleic;
840     ae_vector lcsrcidx;
841     ae_vector hasbndl;
842     ae_vector hasbndu;
843     ae_vector scaledbndl;
844     ae_vector scaledbndu;
845     double epsx;
846     ae_int_t maxits;
847     ae_int_t hessiantype;
848     ae_vector x;
849     ae_vector fi;
850     ae_matrix j;
851     double f;
852     ae_bool needfij;
853     ae_bool xupdated;
854     double trustrad;
855     double deltamax;
856     minslpsubsolver subsolver;
857     ae_vector d;
858     ae_vector d0;
859     ae_vector d1;
860     linminstate mcstate;
861     ae_int_t xstagnationcnt;
862     ae_int_t fstagnationcnt;
863     ae_vector prevx;
864     ae_vector step0x;
865     ae_vector stepkx;
866     ae_vector stepkxc;
867     ae_vector stepkxn;
868     ae_vector step0fi;
869     ae_vector stepkfi;
870     ae_vector stepkfic;
871     ae_vector stepkfin;
872     ae_matrix step0j;
873     ae_matrix stepkj;
874     ae_matrix stepkjc;
875     ae_matrix stepkjn;
876     double stepklagval;
877     double stepkclagval;
878     double stepknlagval;
879     ae_vector stepklaggrad;
880     ae_vector stepknlaggrad;
881     ae_vector stepklagmult;
882     ae_vector stepknlagmult;
883     ae_vector rho;
884     ae_vector tmp0;
885     ae_vector sclagtmp0;
886     ae_vector sclagtmp1;
887     double lastlcerr;
888     ae_int_t lastlcidx;
889     double lastnlcerr;
890     ae_int_t lastnlcidx;
891     ae_vector mftmp0;
892     ae_int_t repsimplexiterations;
893     ae_int_t repsimplexiterations1;
894     ae_int_t repsimplexiterations2;
895     ae_int_t repsimplexiterations3;
896     ae_int_t repinneriterationscount;
897     ae_int_t repouteriterationscount;
898     ae_int_t repterminationtype;
899     double repbcerr;
900     ae_int_t repbcidx;
901     double replcerr;
902     ae_int_t replcidx;
903     double repnlcerr;
904     ae_int_t repnlcidx;
905     rcommstate rstate;
906     rcommstate rphase13state;
907     rcommstate rphase2state;
908 } minslpstate;
909 #endif
910 #if defined(AE_COMPILE_MINNLC) || !defined(AE_PARTIAL_BUILD)
911 typedef struct
912 {
913     double stabilizingpoint;
914     double initialinequalitymultiplier;
915     ae_int_t solvertype;
916     ae_int_t prectype;
917     ae_int_t updatefreq;
918     double rho;
919     ae_int_t n;
920     double epsx;
921     ae_int_t maxits;
922     ae_int_t aulitscnt;
923     ae_bool xrep;
924     double stpmax;
925     double diffstep;
926     double teststep;
927     ae_vector s;
928     ae_vector bndl;
929     ae_vector bndu;
930     ae_vector hasbndl;
931     ae_vector hasbndu;
932     ae_int_t nec;
933     ae_int_t nic;
934     ae_matrix cleic;
935     ae_vector lcsrcidx;
936     ae_int_t ng;
937     ae_int_t nh;
938     ae_vector x;
939     double f;
940     ae_vector fi;
941     ae_matrix j;
942     ae_bool needfij;
943     ae_bool needfi;
944     ae_bool xupdated;
945     rcommstate rstate;
946     rcommstate rstateaul;
947     rcommstate rstateslp;
948     ae_vector scaledbndl;
949     ae_vector scaledbndu;
950     ae_matrix scaledcleic;
951     ae_vector xc;
952     ae_vector xstart;
953     ae_vector xbase;
954     ae_vector fbase;
955     ae_vector dfbase;
956     ae_vector fm2;
957     ae_vector fm1;
958     ae_vector fp1;
959     ae_vector fp2;
960     ae_vector dfm1;
961     ae_vector dfp1;
962     ae_vector bufd;
963     ae_vector bufc;
964     ae_vector tmp0;
965     ae_matrix bufw;
966     ae_matrix bufz;
967     ae_vector xk;
968     ae_vector xk1;
969     ae_vector gk;
970     ae_vector gk1;
971     double gammak;
972     ae_bool xkpresent;
973     minlbfgsstate auloptimizer;
974     minlbfgsreport aulreport;
975     ae_vector nubc;
976     ae_vector nulc;
977     ae_vector nunlc;
978     ae_bool userterminationneeded;
979     minslpstate slpsolverstate;
980     ae_int_t smoothnessguardlevel;
981     smoothnessmonitor smonitor;
982     ae_vector lastscaleused;
983     ae_int_t repinneriterationscount;
984     ae_int_t repouteriterationscount;
985     ae_int_t repnfev;
986     ae_int_t repterminationtype;
987     double repbcerr;
988     ae_int_t repbcidx;
989     double replcerr;
990     ae_int_t replcidx;
991     double repnlcerr;
992     ae_int_t repnlcidx;
993     ae_int_t repdbgphase0its;
994 } minnlcstate;
995 typedef struct
996 {
997     ae_int_t iterationscount;
998     ae_int_t nfev;
999     ae_int_t terminationtype;
1000     double bcerr;
1001     ae_int_t bcidx;
1002     double lcerr;
1003     ae_int_t lcidx;
1004     double nlcerr;
1005     ae_int_t nlcidx;
1006     ae_int_t dbgphase0its;
1007 } minnlcreport;
1008 #endif
1009 #if defined(AE_COMPILE_MINBC) || !defined(AE_PARTIAL_BUILD)
1010 typedef struct
1011 {
1012     ae_int_t nmain;
1013     double epsg;
1014     double epsf;
1015     double epsx;
1016     ae_int_t maxits;
1017     ae_bool xrep;
1018     double stpmax;
1019     double diffstep;
1020     ae_vector s;
1021     ae_int_t prectype;
1022     ae_vector diagh;
1023     ae_vector x;
1024     double f;
1025     ae_vector g;
1026     ae_bool needf;
1027     ae_bool needfg;
1028     ae_bool xupdated;
1029     ae_bool userterminationneeded;
1030     rcommstate rstate;
1031     ae_vector xc;
1032     ae_vector ugc;
1033     ae_vector cgc;
1034     ae_vector xn;
1035     ae_vector ugn;
1036     ae_vector cgn;
1037     ae_vector xp;
1038     double fc;
1039     double fn;
1040     double fp;
1041     ae_vector d;
1042     double lastscaledgoodstep;
1043     ae_vector hasbndl;
1044     ae_vector hasbndu;
1045     ae_vector bndl;
1046     ae_vector bndu;
1047     ae_int_t repiterationscount;
1048     ae_int_t repnfev;
1049     ae_int_t repvaridx;
1050     ae_int_t repterminationtype;
1051     ae_vector xstart;
1052     double fbase;
1053     double fm2;
1054     double fm1;
1055     double fp1;
1056     double fp2;
1057     double xm1;
1058     double xp1;
1059     double gm1;
1060     double gp1;
1061     ae_vector tmpprec;
1062     ae_vector tmp0;
1063     ae_int_t nfev;
1064     ae_int_t mcstage;
1065     double stp;
1066     double curstpmax;
1067     ae_vector work;
1068     linminstate lstate;
1069     double trimthreshold;
1070     ae_int_t nonmonotoniccnt;
1071     ae_matrix bufyk;
1072     ae_matrix bufsk;
1073     ae_vector bufrho;
1074     ae_vector buftheta;
1075     ae_int_t bufsize;
1076     double teststep;
1077     ae_int_t smoothnessguardlevel;
1078     smoothnessmonitor smonitor;
1079     ae_vector lastscaleused;
1080     ae_vector invs;
1081 } minbcstate;
1082 typedef struct
1083 {
1084     ae_int_t iterationscount;
1085     ae_int_t nfev;
1086     ae_int_t varidx;
1087     ae_int_t terminationtype;
1088 } minbcreport;
1089 #endif
1090 #if defined(AE_COMPILE_MINNS) || !defined(AE_PARTIAL_BUILD)
1091 typedef struct
1092 {
1093     double fc;
1094     double fn;
1095     ae_vector xc;
1096     ae_vector xn;
1097     ae_vector x0;
1098     ae_vector gc;
1099     ae_vector d;
1100     ae_matrix uh;
1101     ae_matrix ch;
1102     ae_matrix rk;
1103     ae_vector invutc;
1104     ae_vector tmp0;
1105     ae_vector tmpidx;
1106     ae_vector tmpd;
1107     ae_vector tmpc;
1108     ae_vector tmplambdas;
1109     ae_matrix tmpc2;
1110     ae_vector tmpb;
1111     snnlssolver nnls;
1112 } minnsqp;
1113 typedef struct
1114 {
1115     ae_int_t solvertype;
1116     ae_int_t n;
1117     double epsx;
1118     ae_int_t maxits;
1119     ae_bool xrep;
1120     double diffstep;
1121     ae_vector s;
1122     ae_vector bndl;
1123     ae_vector bndu;
1124     ae_vector hasbndl;
1125     ae_vector hasbndu;
1126     ae_int_t nec;
1127     ae_int_t nic;
1128     ae_matrix cleic;
1129     ae_int_t ng;
1130     ae_int_t nh;
1131     ae_vector x;
1132     double f;
1133     ae_vector fi;
1134     ae_matrix j;
1135     ae_bool needfij;
1136     ae_bool needfi;
1137     ae_bool xupdated;
1138     rcommstate rstate;
1139     rcommstate rstateags;
1140     hqrndstate agsrs;
1141     double agsradius;
1142     ae_int_t agssamplesize;
1143     double agsraddecay;
1144     double agsalphadecay;
1145     double agsdecrease;
1146     double agsinitstp;
1147     double agsstattold;
1148     double agsshortstpabs;
1149     double agsshortstprel;
1150     double agsshortf;
1151     ae_int_t agsshortlimit;
1152     double agsrhononlinear;
1153     ae_int_t agsminupdate;
1154     ae_int_t agsmaxraddecays;
1155     ae_int_t agsmaxbacktrack;
1156     ae_int_t agsmaxbacktracknonfull;
1157     double agspenaltylevel;
1158     double agspenaltyincrease;
1159     ae_vector xstart;
1160     ae_vector xc;
1161     ae_vector xn;
1162     ae_vector grs;
1163     ae_vector d;
1164     ae_vector colmax;
1165     ae_vector diagh;
1166     ae_vector signmin;
1167     ae_vector signmax;
1168     ae_bool userterminationneeded;
1169     ae_vector scaledbndl;
1170     ae_vector scaledbndu;
1171     ae_matrix scaledcleic;
1172     ae_vector rholinear;
1173     ae_matrix samplex;
1174     ae_matrix samplegm;
1175     ae_matrix samplegmbc;
1176     ae_vector samplef;
1177     ae_vector samplef0;
1178     minnsqp nsqp;
1179     ae_vector tmp0;
1180     ae_vector tmp1;
1181     ae_matrix tmp2;
1182     ae_vector tmp3;
1183     ae_vector xbase;
1184     ae_vector fp;
1185     ae_vector fm;
1186     ae_int_t repinneriterationscount;
1187     ae_int_t repouteriterationscount;
1188     ae_int_t repnfev;
1189     ae_int_t repvaridx;
1190     ae_int_t repfuncidx;
1191     ae_int_t repterminationtype;
1192     double replcerr;
1193     double repnlcerr;
1194     ae_int_t dbgncholesky;
1195 } minnsstate;
1196 typedef struct
1197 {
1198     ae_int_t iterationscount;
1199     ae_int_t nfev;
1200     double cerr;
1201     double lcerr;
1202     double nlcerr;
1203     ae_int_t terminationtype;
1204     ae_int_t varidx;
1205     ae_int_t funcidx;
1206 } minnsreport;
1207 #endif
1208 #if defined(AE_COMPILE_MINCOMP) || !defined(AE_PARTIAL_BUILD)
1209 typedef struct
1210 {
1211     ae_int_t n;
1212     double epsg;
1213     double epsf;
1214     double epsx;
1215     ae_int_t maxits;
1216     ae_bool xrep;
1217     double stpmax;
1218     ae_int_t cgtype;
1219     ae_int_t k;
1220     ae_int_t nfev;
1221     ae_int_t mcstage;
1222     ae_vector bndl;
1223     ae_vector bndu;
1224     ae_int_t curalgo;
1225     ae_int_t acount;
1226     double mu;
1227     double finit;
1228     double dginit;
1229     ae_vector ak;
1230     ae_vector xk;
1231     ae_vector dk;
1232     ae_vector an;
1233     ae_vector xn;
1234     ae_vector dn;
1235     ae_vector d;
1236     double fold;
1237     double stp;
1238     ae_vector work;
1239     ae_vector yk;
1240     ae_vector gc;
1241     double laststep;
1242     ae_vector x;
1243     double f;
1244     ae_vector g;
1245     ae_bool needfg;
1246     ae_bool xupdated;
1247     rcommstate rstate;
1248     ae_int_t repiterationscount;
1249     ae_int_t repnfev;
1250     ae_int_t repterminationtype;
1251     ae_int_t debugrestartscount;
1252     linminstate lstate;
1253     double betahs;
1254     double betady;
1255 } minasastate;
1256 typedef struct
1257 {
1258     ae_int_t iterationscount;
1259     ae_int_t nfev;
1260     ae_int_t terminationtype;
1261     ae_int_t activeconstraints;
1262 } minasareport;
1263 #endif
1264 #if defined(AE_COMPILE_MINCG) || !defined(AE_PARTIAL_BUILD)
1265 typedef struct
1266 {
1267     ae_int_t n;
1268     double epsg;
1269     double epsf;
1270     double epsx;
1271     ae_int_t maxits;
1272     double stpmax;
1273     double suggestedstep;
1274     ae_bool xrep;
1275     ae_bool drep;
1276     ae_int_t cgtype;
1277     ae_int_t prectype;
1278     ae_vector diagh;
1279     ae_vector diaghl2;
1280     ae_matrix vcorr;
1281     ae_int_t vcnt;
1282     ae_vector s;
1283     double diffstep;
1284     ae_int_t nfev;
1285     ae_int_t mcstage;
1286     ae_int_t k;
1287     ae_vector xk;
1288     ae_vector dk;
1289     ae_vector xn;
1290     ae_vector dn;
1291     ae_vector d;
1292     double fold;
1293     double stp;
1294     double curstpmax;
1295     ae_vector yk;
1296     double lastgoodstep;
1297     double lastscaledstep;
1298     ae_int_t mcinfo;
1299     ae_bool innerresetneeded;
1300     ae_bool terminationneeded;
1301     double trimthreshold;
1302     ae_vector xbase;
1303     ae_int_t rstimer;
1304     ae_vector x;
1305     double f;
1306     ae_vector g;
1307     ae_bool needf;
1308     ae_bool needfg;
1309     ae_bool xupdated;
1310     ae_bool algpowerup;
1311     ae_bool lsstart;
1312     ae_bool lsend;
1313     ae_bool userterminationneeded;
1314     rcommstate rstate;
1315     ae_int_t repiterationscount;
1316     ae_int_t repnfev;
1317     ae_int_t repterminationtype;
1318     ae_int_t debugrestartscount;
1319     linminstate lstate;
1320     double fbase;
1321     double fm2;
1322     double fm1;
1323     double fp1;
1324     double fp2;
1325     double betahs;
1326     double betady;
1327     ae_vector work0;
1328     ae_vector work1;
1329     ae_vector invs;
1330     double teststep;
1331     ae_int_t smoothnessguardlevel;
1332     smoothnessmonitor smonitor;
1333     ae_vector lastscaleused;
1334 } mincgstate;
1335 typedef struct
1336 {
1337     ae_int_t iterationscount;
1338     ae_int_t nfev;
1339     ae_int_t terminationtype;
1340 } mincgreport;
1341 #endif
1342 #if defined(AE_COMPILE_MINLM) || !defined(AE_PARTIAL_BUILD)
1343 typedef struct
1344 {
1345     ae_int_t n;
1346     ae_int_t m;
1347     double stpmax;
1348     ae_int_t modelage;
1349     ae_int_t maxmodelage;
1350     ae_bool hasfi;
1351     double epsx;
1352     ae_vector x;
1353     double f;
1354     ae_vector fi;
1355     ae_bool needf;
1356     ae_bool needfi;
1357     double fbase;
1358     ae_vector modeldiag;
1359     ae_vector xbase;
1360     ae_vector fibase;
1361     ae_vector bndl;
1362     ae_vector bndu;
1363     ae_vector havebndl;
1364     ae_vector havebndu;
1365     ae_vector s;
1366     rcommstate rstate;
1367     ae_vector xdir;
1368     ae_vector choleskybuf;
1369     ae_vector tmp0;
1370     ae_vector tmpct;
1371     double actualdecrease;
1372     double predicteddecrease;
1373     minqpstate qpstate;
1374     minqpreport qprep;
1375     sparsematrix tmpsp;
1376 } minlmstepfinder;
1377 typedef struct
1378 {
1379     ae_int_t n;
1380     ae_int_t m;
1381     double diffstep;
1382     double epsx;
1383     ae_int_t maxits;
1384     ae_bool xrep;
1385     double stpmax;
1386     ae_int_t maxmodelage;
1387     ae_bool makeadditers;
1388     ae_vector x;
1389     double f;
1390     ae_vector fi;
1391     ae_matrix j;
1392     ae_matrix h;
1393     ae_vector g;
1394     ae_bool needf;
1395     ae_bool needfg;
1396     ae_bool needfgh;
1397     ae_bool needfij;
1398     ae_bool needfi;
1399     ae_bool xupdated;
1400     ae_bool userterminationneeded;
1401     ae_int_t algomode;
1402     ae_bool hasf;
1403     ae_bool hasfi;
1404     ae_bool hasg;
1405     ae_vector xbase;
1406     double fbase;
1407     ae_vector fibase;
1408     ae_vector gbase;
1409     ae_matrix quadraticmodel;
1410     ae_vector bndl;
1411     ae_vector bndu;
1412     ae_vector havebndl;
1413     ae_vector havebndu;
1414     ae_vector s;
1415     ae_matrix cleic;
1416     ae_int_t nec;
1417     ae_int_t nic;
1418     double lambdav;
1419     double nu;
1420     ae_int_t modelage;
1421     ae_vector xnew;
1422     ae_vector xdir;
1423     ae_vector deltax;
1424     ae_vector deltaf;
1425     ae_bool deltaxready;
1426     ae_bool deltafready;
1427     smoothnessmonitor smonitor;
1428     double teststep;
1429     ae_vector lastscaleused;
1430     ae_int_t repiterationscount;
1431     ae_int_t repterminationtype;
1432     ae_int_t repnfunc;
1433     ae_int_t repnjac;
1434     ae_int_t repngrad;
1435     ae_int_t repnhess;
1436     ae_int_t repncholesky;
1437     rcommstate rstate;
1438     ae_vector choleskybuf;
1439     ae_vector tmp0;
1440     double actualdecrease;
1441     double predicteddecrease;
1442     double xm1;
1443     double xp1;
1444     ae_vector fm1;
1445     ae_vector fp1;
1446     ae_vector fc1;
1447     ae_vector gm1;
1448     ae_vector gp1;
1449     ae_vector gc1;
1450     minlbfgsstate internalstate;
1451     minlbfgsreport internalrep;
1452     minqpstate qpstate;
1453     minqpreport qprep;
1454     minlmstepfinder finderstate;
1455 } minlmstate;
1456 typedef struct
1457 {
1458     ae_int_t iterationscount;
1459     ae_int_t terminationtype;
1460     ae_int_t nfunc;
1461     ae_int_t njac;
1462     ae_int_t ngrad;
1463     ae_int_t nhess;
1464     ae_int_t ncholesky;
1465 } minlmreport;
1466 #endif
1467 
1468 }
1469 
1470 /////////////////////////////////////////////////////////////////////////
1471 //
1472 // THIS SECTION CONTAINS C++ INTERFACE
1473 //
1474 /////////////////////////////////////////////////////////////////////////
1475 namespace alglib
1476 {
1477 
1478 #if defined(AE_COMPILE_CQMODELS) || !defined(AE_PARTIAL_BUILD)
1479 
1480 #endif
1481 
1482 #if defined(AE_COMPILE_OPTGUARDAPI) || !defined(AE_PARTIAL_BUILD)
1483 /*************************************************************************
1484 This structure is used to store  OptGuard  report,  i.e.  report  on   the
1485 properties of the nonlinear function being optimized with ALGLIB.
1486 
1487 After you tell your optimizer to activate OptGuard  this technology starts
1488 to silently monitor function values and gradients/Jacobians  being  passed
1489 all around during your optimization session. Depending on specific set  of
1490 checks enabled OptGuard may perform additional function evaluations  (say,
1491 about 3*N evaluations if you want to check analytic gradient for errors).
1492 
1493 Upon discovering that something strange happens  (function  values  and/or
1494 gradient components change too sharply and/or unexpectedly) OptGuard  sets
1495 one of the "suspicion  flags" (without interrupting optimization session).
1496 After optimization is done, you can examine OptGuard report.
1497 
1498 Following report fields can be set:
1499 * nonc0suspected
1500 * nonc1suspected
1501 * badgradsuspected
1502 
1503 
1504 === WHAT CAN BE DETECTED WITH OptGuard INTEGRITY CHECKER =================
1505 
1506 Following  types  of  errors  in your target function (constraints) can be
1507 caught:
1508 a) discontinuous functions ("non-C0" part of the report)
1509 b) functions with discontinuous derivative ("non-C1" part of the report)
1510 c) errors in the analytic gradient provided by user
1511 
1512 These types of errors result in optimizer  stopping  well  before reaching
1513 solution (most often - right after encountering discontinuity).
1514 
1515 Type A errors are usually  coding  errors  during  implementation  of  the
1516 target function. Most "normal" problems involve continuous functions,  and
1517 anyway you can't reliably optimize discontinuous function.
1518 
1519 Type B errors are either coding errors or (in case code itself is correct)
1520 evidence of the fact  that  your  problem  is  an  "incorrect"  one.  Most
1521 optimizers (except for ones provided by MINNS subpackage) do  not  support
1522 nonsmooth problems.
1523 
1524 Type C errors are coding errors which often prevent optimizer from  making
1525 even one step  or result in optimizing stopping  too  early,  as  soon  as
1526 actual descent direction becomes too different from one suggested by user-
1527 supplied gradient.
1528 
1529 
1530 === WHAT IS REPORTED =====================================================
1531 
1532 Following set of report fields deals with discontinuous  target functions,
1533 ones not belonging to C0 continuity class:
1534 
1535 * nonc0suspected - is a flag which is set upon discovering some indication
1536   of the discontinuity. If this flag is false, the rest of "non-C0" fields
1537   should be ignored
1538 * nonc0fidx - is an index of the function (0 for  target  function,  1  or
1539   higher for nonlinear constraints) which is suspected of being "non-C0"
1540 * nonc0lipshitzc - a Lipchitz constant for a function which was  suspected
1541   of being non-continuous.
1542 * nonc0test0positive -  set  to  indicate  specific  test  which  detected
1543   continuity violation (test #0)
1544 
1545 Following set of report fields deals with discontinuous gradient/Jacobian,
1546 i.e. with functions violating C1 continuity:
1547 
1548 * nonc1suspected - is a flag which is set upon discovering some indication
1549   of the discontinuity. If this flag is false, the rest of "non-C1" fields
1550   should be ignored
1551 * nonc1fidx - is an index of the function (0 for  target  function,  1  or
1552   higher for nonlinear constraints) which is suspected of being "non-C1"
1553 * nonc1lipshitzc - a Lipchitz constant for a function gradient  which  was
1554   suspected of being non-smooth.
1555 * nonc1test0positive -  set  to  indicate  specific  test  which  detected
1556   continuity violation (test #0)
1557 * nonc1test1positive -  set  to  indicate  specific  test  which  detected
1558   continuity violation (test #1)
1559 
1560 Following set of report fields deals with errors in the gradient:
1561 * badgradsuspected - is a flad which is set upon discovering an  error  in
1562   the analytic gradient supplied by user
1563 * badgradfidx - index  of   the  function  with bad gradient (0 for target
1564   function, 1 or higher for nonlinear constraints)
1565 * badgradvidx - index of the variable
1566 * badgradxbase - location where Jacobian is tested
1567 * following  matrices  store  user-supplied  Jacobian  and  its  numerical
1568   differentiation version (which is assumed to be  free  from  the  coding
1569   errors), both of them computed near the initial point:
1570   * badgraduser, an array[K,N], analytic Jacobian supplied by user
1571   * badgradnum,  an array[K,N], numeric  Jacobian computed by ALGLIB
1572   Here K is a total number of  nonlinear  functions  (target  +  nonlinear
1573   constraints), N is a variable number.
1574   The  element  of  badgraduser[] with index [badgradfidx,badgradvidx]  is
1575   assumed to be wrong.
1576 
1577 More detailed error log can  be  obtained  from  optimizer  by  explicitly
1578 requesting reports for tests C0.0, C1.0, C1.1.
1579 
1580   -- ALGLIB --
1581      Copyright 19.11.2018 by Bochkanov Sergey
1582 *************************************************************************/
1583 class _optguardreport_owner
1584 {
1585 public:
1586     _optguardreport_owner();
1587     _optguardreport_owner(const _optguardreport_owner &rhs);
1588     _optguardreport_owner& operator=(const _optguardreport_owner &rhs);
1589     virtual ~_optguardreport_owner();
1590     alglib_impl::optguardreport* c_ptr();
1591     alglib_impl::optguardreport* c_ptr() const;
1592 protected:
1593     alglib_impl::optguardreport *p_struct;
1594 };
1595 class optguardreport : public _optguardreport_owner
1596 {
1597 public:
1598     optguardreport();
1599     optguardreport(const optguardreport &rhs);
1600     optguardreport& operator=(const optguardreport &rhs);
1601     virtual ~optguardreport();
1602     ae_bool &nonc0suspected;
1603     ae_bool &nonc0test0positive;
1604     ae_int_t &nonc0fidx;
1605     double &nonc0lipschitzc;
1606     ae_bool &nonc1suspected;
1607     ae_bool &nonc1test0positive;
1608     ae_bool &nonc1test1positive;
1609     ae_int_t &nonc1fidx;
1610     double &nonc1lipschitzc;
1611     ae_bool &badgradsuspected;
1612     ae_int_t &badgradfidx;
1613     ae_int_t &badgradvidx;
1614     real_1d_array badgradxbase;
1615     real_2d_array badgraduser;
1616     real_2d_array badgradnum;
1617 
1618 };
1619 
1620 
1621 /*************************************************************************
1622 This  structure  is  used  for  detailed   reporting  about  suspected  C1
1623 continuity violation as flagged by C1 test #0 (OptGuard  has several tests
1624 for C1 continuity, this report is used by #0).
1625 
1626 === WHAT IS TESTED =======================================================
1627 
1628 C1 test #0 studies function values (not gradient!)  obtained  during  line
1629 searches and monitors behavior of directional  derivative  estimate.  This
1630 test is less powerful than test #1, but it does  not  depend  on  gradient
1631 values  and  thus  it  is  more  robust  against  artifacts  introduced by
1632 numerical differentiation.
1633 
1634 
1635 === WHAT IS REPORTED =====================================================
1636 
1637 Actually, report retrieval function returns TWO report structures:
1638 
1639 * one for most suspicious point found so far (one with highest  change  in
1640   the directional derivative), so called "strongest" report
1641 * another one for most detailed line search (more function  evaluations  =
1642   easier to understand what's going on) which triggered  test #0 criteria,
1643   so called "longest" report
1644 
1645 In both cases following fields are returned:
1646 
1647 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
1648   did not notice anything (in the latter cases fields below are empty).
1649 * fidx - is an index of the function (0 for  target  function, 1 or higher
1650   for nonlinear constraints) which is suspected of being "non-C1"
1651 * x0[], d[] - arrays of length N which store initial point  and  direction
1652   for line search (d[] can be normalized, but does not have to)
1653 * stp[], f[] - arrays of length CNT which store step lengths and  function
1654   values at these points; f[i] is evaluated in x0+stp[i]*d.
1655 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
1656   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
1657   with  most  likely  position  of  the  violation  between  stpidxa+1 and
1658   stpidxa+2.
1659 
1660 You can plot function values stored in stp[]  and  f[]  arrays  and  study
1661 behavior of your function by your own eyes, just  to  be  sure  that  test
1662 correctly reported C1 violation.
1663 
1664   -- ALGLIB --
1665      Copyright 19.11.2018 by Bochkanov Sergey
1666 *************************************************************************/
1667 class _optguardnonc1test0report_owner
1668 {
1669 public:
1670     _optguardnonc1test0report_owner();
1671     _optguardnonc1test0report_owner(const _optguardnonc1test0report_owner &rhs);
1672     _optguardnonc1test0report_owner& operator=(const _optguardnonc1test0report_owner &rhs);
1673     virtual ~_optguardnonc1test0report_owner();
1674     alglib_impl::optguardnonc1test0report* c_ptr();
1675     alglib_impl::optguardnonc1test0report* c_ptr() const;
1676 protected:
1677     alglib_impl::optguardnonc1test0report *p_struct;
1678 };
1679 class optguardnonc1test0report : public _optguardnonc1test0report_owner
1680 {
1681 public:
1682     optguardnonc1test0report();
1683     optguardnonc1test0report(const optguardnonc1test0report &rhs);
1684     optguardnonc1test0report& operator=(const optguardnonc1test0report &rhs);
1685     virtual ~optguardnonc1test0report();
1686     ae_bool &positive;
1687     ae_int_t &fidx;
1688     real_1d_array x0;
1689     real_1d_array d;
1690     ae_int_t &n;
1691     real_1d_array stp;
1692     real_1d_array f;
1693     ae_int_t &cnt;
1694     ae_int_t &stpidxa;
1695     ae_int_t &stpidxb;
1696 
1697 };
1698 
1699 
1700 /*************************************************************************
1701 This  structure  is  used  for  detailed   reporting  about  suspected  C1
1702 continuity violation as flagged by C1 test #1 (OptGuard  has several tests
1703 for C1 continuity, this report is used by #1).
1704 
1705 === WHAT IS TESTED =======================================================
1706 
1707 C1 test #1 studies individual  components  of  the  gradient  as  recorded
1708 during line searches. Upon discovering discontinuity in the gradient  this
1709 test records specific component which was suspected (or  one  with  highest
1710 indication of discontinuity if multiple components are suspected).
1711 
1712 When precise analytic gradient is provided this test is more powerful than
1713 test #0  which  works  with  function  values  and  ignores  user-provided
1714 gradient.  However,  test  #0  becomes  more   powerful   when   numerical
1715 differentiation is employed (in such cases test #1 detects  higher  levels
1716 of numerical noise and becomes too conservative).
1717 
1718 This test also tells specific components of the gradient which violate  C1
1719 continuity, which makes it more informative than #0, which just tells that
1720 continuity is violated.
1721 
1722 
1723 === WHAT IS REPORTED =====================================================
1724 
1725 Actually, report retrieval function returns TWO report structures:
1726 
1727 * one for most suspicious point found so far (one with highest  change  in
1728   the directional derivative), so called "strongest" report
1729 * another one for most detailed line search (more function  evaluations  =
1730   easier to understand what's going on) which triggered  test #1 criteria,
1731   so called "longest" report
1732 
1733 In both cases following fields are returned:
1734 
1735 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
1736   did not notice anything (in the latter cases fields below are empty).
1737 * fidx - is an index of the function (0 for  target  function, 1 or higher
1738   for nonlinear constraints) which is suspected of being "non-C1"
1739 * vidx - is an index of the variable in [0,N) with nonsmooth derivative
1740 * x0[], d[] - arrays of length N which store initial point  and  direction
1741   for line search (d[] can be normalized, but does not have to)
1742 * stp[], g[] - arrays of length CNT which store step lengths and  gradient
1743   values at these points; g[i] is evaluated in  x0+stp[i]*d  and  contains
1744   vidx-th component of the gradient.
1745 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
1746   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
1747   with  most  likely  position  of  the  violation  between  stpidxa+1 and
1748   stpidxa+2.
1749 
1750 You can plot function values stored in stp[]  and  g[]  arrays  and  study
1751 behavior of your function by your own eyes, just  to  be  sure  that  test
1752 correctly reported C1 violation.
1753 
1754   -- ALGLIB --
1755      Copyright 19.11.2018 by Bochkanov Sergey
1756 *************************************************************************/
1757 class _optguardnonc1test1report_owner
1758 {
1759 public:
1760     _optguardnonc1test1report_owner();
1761     _optguardnonc1test1report_owner(const _optguardnonc1test1report_owner &rhs);
1762     _optguardnonc1test1report_owner& operator=(const _optguardnonc1test1report_owner &rhs);
1763     virtual ~_optguardnonc1test1report_owner();
1764     alglib_impl::optguardnonc1test1report* c_ptr();
1765     alglib_impl::optguardnonc1test1report* c_ptr() const;
1766 protected:
1767     alglib_impl::optguardnonc1test1report *p_struct;
1768 };
1769 class optguardnonc1test1report : public _optguardnonc1test1report_owner
1770 {
1771 public:
1772     optguardnonc1test1report();
1773     optguardnonc1test1report(const optguardnonc1test1report &rhs);
1774     optguardnonc1test1report& operator=(const optguardnonc1test1report &rhs);
1775     virtual ~optguardnonc1test1report();
1776     ae_bool &positive;
1777     ae_int_t &fidx;
1778     ae_int_t &vidx;
1779     real_1d_array x0;
1780     real_1d_array d;
1781     ae_int_t &n;
1782     real_1d_array stp;
1783     real_1d_array g;
1784     ae_int_t &cnt;
1785     ae_int_t &stpidxa;
1786     ae_int_t &stpidxb;
1787 
1788 };
1789 #endif
1790 
1791 #if defined(AE_COMPILE_OPTSERV) || !defined(AE_PARTIAL_BUILD)
1792 
1793 #endif
1794 
1795 #if defined(AE_COMPILE_SNNLS) || !defined(AE_PARTIAL_BUILD)
1796 
1797 #endif
1798 
1799 #if defined(AE_COMPILE_SACTIVESETS) || !defined(AE_PARTIAL_BUILD)
1800 
1801 #endif
1802 
1803 #if defined(AE_COMPILE_QQPSOLVER) || !defined(AE_PARTIAL_BUILD)
1804 
1805 #endif
1806 
1807 #if defined(AE_COMPILE_MINLBFGS) || !defined(AE_PARTIAL_BUILD)
1808 /*************************************************************************
1809 
1810 *************************************************************************/
1811 class _minlbfgsstate_owner
1812 {
1813 public:
1814     _minlbfgsstate_owner();
1815     _minlbfgsstate_owner(const _minlbfgsstate_owner &rhs);
1816     _minlbfgsstate_owner& operator=(const _minlbfgsstate_owner &rhs);
1817     virtual ~_minlbfgsstate_owner();
1818     alglib_impl::minlbfgsstate* c_ptr();
1819     alglib_impl::minlbfgsstate* c_ptr() const;
1820 protected:
1821     alglib_impl::minlbfgsstate *p_struct;
1822 };
1823 class minlbfgsstate : public _minlbfgsstate_owner
1824 {
1825 public:
1826     minlbfgsstate();
1827     minlbfgsstate(const minlbfgsstate &rhs);
1828     minlbfgsstate& operator=(const minlbfgsstate &rhs);
1829     virtual ~minlbfgsstate();
1830     ae_bool &needf;
1831     ae_bool &needfg;
1832     ae_bool &xupdated;
1833     double &f;
1834     real_1d_array g;
1835     real_1d_array x;
1836 
1837 };
1838 
1839 
1840 /*************************************************************************
1841 This structure stores optimization report:
1842 * IterationsCount           total number of inner iterations
1843 * NFEV                      number of gradient evaluations
1844 * TerminationType           termination type (see below)
1845 
1846 TERMINATION CODES
1847 
1848 TerminationType field contains completion code, which can be:
1849   -8    internal integrity control detected  infinite  or  NAN  values  in
1850         function/gradient. Abnormal termination signalled.
1851    1    relative function improvement is no more than EpsF.
1852    2    relative step is no more than EpsX.
1853    4    gradient norm is no more than EpsG
1854    5    MaxIts steps was taken
1855    7    stopping conditions are too stringent,
1856         further improvement is impossible,
1857         X contains best point found so far.
1858    8    terminated    by  user  who  called  minlbfgsrequesttermination().
1859         X contains point which was   "current accepted"  when  termination
1860         request was submitted.
1861 
1862 Other fields of this structure are not documented and should not be used!
1863 *************************************************************************/
1864 class _minlbfgsreport_owner
1865 {
1866 public:
1867     _minlbfgsreport_owner();
1868     _minlbfgsreport_owner(const _minlbfgsreport_owner &rhs);
1869     _minlbfgsreport_owner& operator=(const _minlbfgsreport_owner &rhs);
1870     virtual ~_minlbfgsreport_owner();
1871     alglib_impl::minlbfgsreport* c_ptr();
1872     alglib_impl::minlbfgsreport* c_ptr() const;
1873 protected:
1874     alglib_impl::minlbfgsreport *p_struct;
1875 };
1876 class minlbfgsreport : public _minlbfgsreport_owner
1877 {
1878 public:
1879     minlbfgsreport();
1880     minlbfgsreport(const minlbfgsreport &rhs);
1881     minlbfgsreport& operator=(const minlbfgsreport &rhs);
1882     virtual ~minlbfgsreport();
1883     ae_int_t &iterationscount;
1884     ae_int_t &nfev;
1885     ae_int_t &terminationtype;
1886 
1887 };
1888 #endif
1889 
1890 #if defined(AE_COMPILE_QPDENSEAULSOLVER) || !defined(AE_PARTIAL_BUILD)
1891 
1892 #endif
1893 
1894 #if defined(AE_COMPILE_MINBLEIC) || !defined(AE_PARTIAL_BUILD)
1895 /*************************************************************************
1896 This object stores nonlinear optimizer state.
1897 You should use functions provided by MinBLEIC subpackage to work with this
1898 object
1899 *************************************************************************/
1900 class _minbleicstate_owner
1901 {
1902 public:
1903     _minbleicstate_owner();
1904     _minbleicstate_owner(const _minbleicstate_owner &rhs);
1905     _minbleicstate_owner& operator=(const _minbleicstate_owner &rhs);
1906     virtual ~_minbleicstate_owner();
1907     alglib_impl::minbleicstate* c_ptr();
1908     alglib_impl::minbleicstate* c_ptr() const;
1909 protected:
1910     alglib_impl::minbleicstate *p_struct;
1911 };
1912 class minbleicstate : public _minbleicstate_owner
1913 {
1914 public:
1915     minbleicstate();
1916     minbleicstate(const minbleicstate &rhs);
1917     minbleicstate& operator=(const minbleicstate &rhs);
1918     virtual ~minbleicstate();
1919     ae_bool &needf;
1920     ae_bool &needfg;
1921     ae_bool &xupdated;
1922     double &f;
1923     real_1d_array g;
1924     real_1d_array x;
1925 
1926 };
1927 
1928 
1929 /*************************************************************************
1930 This structure stores optimization report:
1931 * IterationsCount           number of iterations
1932 * NFEV                      number of gradient evaluations
1933 * TerminationType           termination type (see below)
1934 
1935 TERMINATION CODES
1936 
1937 TerminationType field contains completion code, which can be:
1938   -8    internal integrity control detected  infinite  or  NAN  values  in
1939         function/gradient. Abnormal termination signalled.
1940   -3    inconsistent constraints. Feasible point is
1941         either nonexistent or too hard to find. Try to
1942         restart optimizer with better initial approximation
1943    1    relative function improvement is no more than EpsF.
1944    2    relative step is no more than EpsX.
1945    4    gradient norm is no more than EpsG
1946    5    MaxIts steps was taken
1947    7    stopping conditions are too stringent,
1948         further improvement is impossible,
1949         X contains best point found so far.
1950    8    terminated by user who called minbleicrequesttermination(). X contains
1951         point which was "current accepted" when  termination  request  was
1952         submitted.
1953 
1954 ADDITIONAL FIELDS
1955 
1956 There are additional fields which can be used for debugging:
1957 * DebugEqErr                error in the equality constraints (2-norm)
1958 * DebugFS                   f, calculated at projection of initial point
1959                             to the feasible set
1960 * DebugFF                   f, calculated at the final point
1961 * DebugDX                   |X_start-X_final|
1962 *************************************************************************/
1963 class _minbleicreport_owner
1964 {
1965 public:
1966     _minbleicreport_owner();
1967     _minbleicreport_owner(const _minbleicreport_owner &rhs);
1968     _minbleicreport_owner& operator=(const _minbleicreport_owner &rhs);
1969     virtual ~_minbleicreport_owner();
1970     alglib_impl::minbleicreport* c_ptr();
1971     alglib_impl::minbleicreport* c_ptr() const;
1972 protected:
1973     alglib_impl::minbleicreport *p_struct;
1974 };
1975 class minbleicreport : public _minbleicreport_owner
1976 {
1977 public:
1978     minbleicreport();
1979     minbleicreport(const minbleicreport &rhs);
1980     minbleicreport& operator=(const minbleicreport &rhs);
1981     virtual ~minbleicreport();
1982     ae_int_t &iterationscount;
1983     ae_int_t &nfev;
1984     ae_int_t &varidx;
1985     ae_int_t &terminationtype;
1986     double &debugeqerr;
1987     double &debugfs;
1988     double &debugff;
1989     double &debugdx;
1990     ae_int_t &debugfeasqpits;
1991     ae_int_t &debugfeasgpaits;
1992     ae_int_t &inneriterationscount;
1993     ae_int_t &outeriterationscount;
1994 
1995 };
1996 #endif
1997 
1998 #if defined(AE_COMPILE_QPBLEICSOLVER) || !defined(AE_PARTIAL_BUILD)
1999 
2000 #endif
2001 
2002 #if defined(AE_COMPILE_MINQP) || !defined(AE_PARTIAL_BUILD)
2003 /*************************************************************************
2004 This object stores nonlinear optimizer state.
2005 You should use functions provided by MinQP subpackage to work with this
2006 object
2007 *************************************************************************/
2008 class _minqpstate_owner
2009 {
2010 public:
2011     _minqpstate_owner();
2012     _minqpstate_owner(const _minqpstate_owner &rhs);
2013     _minqpstate_owner& operator=(const _minqpstate_owner &rhs);
2014     virtual ~_minqpstate_owner();
2015     alglib_impl::minqpstate* c_ptr();
2016     alglib_impl::minqpstate* c_ptr() const;
2017 protected:
2018     alglib_impl::minqpstate *p_struct;
2019 };
2020 class minqpstate : public _minqpstate_owner
2021 {
2022 public:
2023     minqpstate();
2024     minqpstate(const minqpstate &rhs);
2025     minqpstate& operator=(const minqpstate &rhs);
2026     virtual ~minqpstate();
2027 
2028 };
2029 
2030 
2031 /*************************************************************************
2032 This structure stores optimization report:
2033 * InnerIterationsCount      number of inner iterations
2034 * OuterIterationsCount      number of outer iterations
2035 * NCholesky                 number of Cholesky decomposition
2036 * NMV                       number of matrix-vector products
2037                             (only products calculated as part of iterative
2038                             process are counted)
2039 * TerminationType           completion code (see below)
2040 
2041 Completion codes:
2042 * -9    failure of the automatic scale evaluation:  one  of  the  diagonal
2043         elements of the quadratic term is non-positive.  Specify  variable
2044         scales manually!
2045 * -5    inappropriate solver was used:
2046         * QuickQP solver for problem with general linear constraints (dense/sparse)
2047 * -4    BLEIC-QP or QuickQP solver found unconstrained direction
2048         of negative curvature (function is unbounded from
2049         below  even  under  constraints),  no  meaningful
2050         minimum can be found.
2051 * -3    inconsistent constraints (or, maybe, feasible point is
2052         too hard to find). If you are sure that constraints are feasible,
2053         try to restart optimizer with better initial approximation.
2054 * -1    solver error
2055 *  1..4 successful completion
2056 *  5    MaxIts steps was taken
2057 *  7    stopping conditions are too stringent,
2058         further improvement is impossible,
2059         X contains best point found so far.
2060 *************************************************************************/
2061 class _minqpreport_owner
2062 {
2063 public:
2064     _minqpreport_owner();
2065     _minqpreport_owner(const _minqpreport_owner &rhs);
2066     _minqpreport_owner& operator=(const _minqpreport_owner &rhs);
2067     virtual ~_minqpreport_owner();
2068     alglib_impl::minqpreport* c_ptr();
2069     alglib_impl::minqpreport* c_ptr() const;
2070 protected:
2071     alglib_impl::minqpreport *p_struct;
2072 };
2073 class minqpreport : public _minqpreport_owner
2074 {
2075 public:
2076     minqpreport();
2077     minqpreport(const minqpreport &rhs);
2078     minqpreport& operator=(const minqpreport &rhs);
2079     virtual ~minqpreport();
2080     ae_int_t &inneriterationscount;
2081     ae_int_t &outeriterationscount;
2082     ae_int_t &nmv;
2083     ae_int_t &ncholesky;
2084     ae_int_t &terminationtype;
2085 
2086 };
2087 #endif
2088 
2089 #if defined(AE_COMPILE_REVISEDDUALSIMPLEX) || !defined(AE_PARTIAL_BUILD)
2090 
2091 #endif
2092 
2093 #if defined(AE_COMPILE_MINLP) || !defined(AE_PARTIAL_BUILD)
2094 /*************************************************************************
2095 This object stores linear solver state.
2096 You should use functions provided by MinLP subpackage to work with this
2097 object
2098 *************************************************************************/
2099 class _minlpstate_owner
2100 {
2101 public:
2102     _minlpstate_owner();
2103     _minlpstate_owner(const _minlpstate_owner &rhs);
2104     _minlpstate_owner& operator=(const _minlpstate_owner &rhs);
2105     virtual ~_minlpstate_owner();
2106     alglib_impl::minlpstate* c_ptr();
2107     alglib_impl::minlpstate* c_ptr() const;
2108 protected:
2109     alglib_impl::minlpstate *p_struct;
2110 };
2111 class minlpstate : public _minlpstate_owner
2112 {
2113 public:
2114     minlpstate();
2115     minlpstate(const minlpstate &rhs);
2116     minlpstate& operator=(const minlpstate &rhs);
2117     virtual ~minlpstate();
2118 
2119 };
2120 
2121 
2122 /*************************************************************************
2123 This structure stores optimization report:
2124 * f                         target function value
2125 * y                         dual variables
2126 * stats                     array[N+M], statuses of box (N) and linear (M)
2127                             constraints:
2128                             * stats[i]>0  =>  constraint at upper bound
2129                                               (also used for free non-basic
2130                                               variables set to zero)
2131                             * stats[i]<0  =>  constraint at lower bound
2132                             * stats[i]=0  =>  constraint is inactive, basic
2133                                               variable
2134 * primalerror               primal feasibility error
2135 * dualerror                 dual feasibility error
2136 * iterationscount           iteration count
2137 * terminationtype           completion code (see below)
2138 
2139 Completion codes:
2140 * -4    LP problem is primal unbounded (dual infeasible)
2141 * -3    LP problem is primal infeasible (dual unbounded)
2142 *  1..4 successful completion
2143 *  5    MaxIts steps was taken
2144 *  7    stopping conditions are too stringent,
2145         further improvement is impossible,
2146         X contains best point found so far.
2147 *************************************************************************/
2148 class _minlpreport_owner
2149 {
2150 public:
2151     _minlpreport_owner();
2152     _minlpreport_owner(const _minlpreport_owner &rhs);
2153     _minlpreport_owner& operator=(const _minlpreport_owner &rhs);
2154     virtual ~_minlpreport_owner();
2155     alglib_impl::minlpreport* c_ptr();
2156     alglib_impl::minlpreport* c_ptr() const;
2157 protected:
2158     alglib_impl::minlpreport *p_struct;
2159 };
2160 class minlpreport : public _minlpreport_owner
2161 {
2162 public:
2163     minlpreport();
2164     minlpreport(const minlpreport &rhs);
2165     minlpreport& operator=(const minlpreport &rhs);
2166     virtual ~minlpreport();
2167     double &f;
2168     real_1d_array y;
2169     integer_1d_array stats;
2170     double &primalerror;
2171     double &dualerror;
2172     ae_int_t &iterationscount;
2173     ae_int_t &terminationtype;
2174 
2175 };
2176 #endif
2177 
2178 #if defined(AE_COMPILE_NLCSLP) || !defined(AE_PARTIAL_BUILD)
2179 
2180 #endif
2181 
2182 #if defined(AE_COMPILE_MINNLC) || !defined(AE_PARTIAL_BUILD)
2183 /*************************************************************************
2184 This object stores nonlinear optimizer state.
2185 You should use functions provided by MinNLC subpackage to work  with  this
2186 object
2187 *************************************************************************/
2188 class _minnlcstate_owner
2189 {
2190 public:
2191     _minnlcstate_owner();
2192     _minnlcstate_owner(const _minnlcstate_owner &rhs);
2193     _minnlcstate_owner& operator=(const _minnlcstate_owner &rhs);
2194     virtual ~_minnlcstate_owner();
2195     alglib_impl::minnlcstate* c_ptr();
2196     alglib_impl::minnlcstate* c_ptr() const;
2197 protected:
2198     alglib_impl::minnlcstate *p_struct;
2199 };
2200 class minnlcstate : public _minnlcstate_owner
2201 {
2202 public:
2203     minnlcstate();
2204     minnlcstate(const minnlcstate &rhs);
2205     minnlcstate& operator=(const minnlcstate &rhs);
2206     virtual ~minnlcstate();
2207     ae_bool &needfi;
2208     ae_bool &needfij;
2209     ae_bool &xupdated;
2210     double &f;
2211     real_1d_array fi;
2212     real_2d_array j;
2213     real_1d_array x;
2214 
2215 };
2216 
2217 
2218 /*************************************************************************
2219 These fields store optimization report:
2220 * iterationscount           total number of inner iterations
2221 * nfev                      number of gradient evaluations
2222 * terminationtype           termination type (see below)
2223 
2224 Scaled constraint violations are reported:
2225 * bcerr                     maximum violation of the box constraints
2226 * bcidx                     index of the most violated box  constraint (or
2227                             -1, if all box constraints  are  satisfied  or
2228                             there is no box constraint)
2229 * lcerr                     maximum violation of the  linear  constraints,
2230                             computed as maximum  scaled  distance  between
2231                             final point and constraint boundary.
2232 * lcidx                     index of the most violated  linear  constraint
2233                             (or -1, if all constraints  are  satisfied  or
2234                             there is no general linear constraints)
2235 * nlcerr                    maximum violation of the nonlinear constraints
2236 * nlcidx                    index of the most violated nonlinear constraint
2237                             (or -1, if all constraints  are  satisfied  or
2238                             there is no nonlinear constraints)
2239 
2240 Violations of box constraints are scaled on per-component basis  according
2241 to  the  scale  vector s[] as specified by minnlcsetscale(). Violations of
2242 the general linear  constraints  are  also  computed  using  user-supplied
2243 variable scaling. Violations of nonlinear constraints are computed "as is"
2244 
2245 TERMINATION CODES
2246 
2247 TerminationType field contains completion code, which can be either:
2248 
2249 === FAILURE CODE ===
2250   -8    internal integrity control detected  infinite  or  NAN  values  in
2251         function/gradient. Abnormal termination signaled.
2252   -3    box  constraints  are  infeasible.  Note: infeasibility of non-box
2253         constraints does NOT trigger emergency  completion;  you  have  to
2254         examine  bcerr/lcerr/nlcerr   to  detect   possibly   inconsistent
2255         constraints.
2256 
2257 === SUCCESS CODE ===
2258    2    relative step is no more than EpsX.
2259    5    MaxIts steps was taken
2260    7    stopping conditions are too stringent,
2261         further improvement is impossible,
2262         X contains best point found so far.
2263    8    user requested algorithm termination via minnlcrequesttermination(),
2264         last accepted point is returned
2265 
2266 Other fields of this structure are not documented and should not be used!
2267 *************************************************************************/
2268 class _minnlcreport_owner
2269 {
2270 public:
2271     _minnlcreport_owner();
2272     _minnlcreport_owner(const _minnlcreport_owner &rhs);
2273     _minnlcreport_owner& operator=(const _minnlcreport_owner &rhs);
2274     virtual ~_minnlcreport_owner();
2275     alglib_impl::minnlcreport* c_ptr();
2276     alglib_impl::minnlcreport* c_ptr() const;
2277 protected:
2278     alglib_impl::minnlcreport *p_struct;
2279 };
2280 class minnlcreport : public _minnlcreport_owner
2281 {
2282 public:
2283     minnlcreport();
2284     minnlcreport(const minnlcreport &rhs);
2285     minnlcreport& operator=(const minnlcreport &rhs);
2286     virtual ~minnlcreport();
2287     ae_int_t &iterationscount;
2288     ae_int_t &nfev;
2289     ae_int_t &terminationtype;
2290     double &bcerr;
2291     ae_int_t &bcidx;
2292     double &lcerr;
2293     ae_int_t &lcidx;
2294     double &nlcerr;
2295     ae_int_t &nlcidx;
2296     ae_int_t &dbgphase0its;
2297 
2298 };
2299 #endif
2300 
2301 #if defined(AE_COMPILE_MINBC) || !defined(AE_PARTIAL_BUILD)
2302 /*************************************************************************
2303 This object stores nonlinear optimizer state.
2304 You should use functions provided by MinBC subpackage to work with this
2305 object
2306 *************************************************************************/
2307 class _minbcstate_owner
2308 {
2309 public:
2310     _minbcstate_owner();
2311     _minbcstate_owner(const _minbcstate_owner &rhs);
2312     _minbcstate_owner& operator=(const _minbcstate_owner &rhs);
2313     virtual ~_minbcstate_owner();
2314     alglib_impl::minbcstate* c_ptr();
2315     alglib_impl::minbcstate* c_ptr() const;
2316 protected:
2317     alglib_impl::minbcstate *p_struct;
2318 };
2319 class minbcstate : public _minbcstate_owner
2320 {
2321 public:
2322     minbcstate();
2323     minbcstate(const minbcstate &rhs);
2324     minbcstate& operator=(const minbcstate &rhs);
2325     virtual ~minbcstate();
2326     ae_bool &needf;
2327     ae_bool &needfg;
2328     ae_bool &xupdated;
2329     double &f;
2330     real_1d_array g;
2331     real_1d_array x;
2332 
2333 };
2334 
2335 
2336 /*************************************************************************
2337 This structure stores optimization report:
2338 * iterationscount           number of iterations
2339 * nfev                      number of gradient evaluations
2340 * terminationtype           termination type (see below)
2341 
2342 TERMINATION CODES
2343 
2344 terminationtype field contains completion code, which can be:
2345   -8    internal integrity control detected  infinite  or  NAN  values  in
2346         function/gradient. Abnormal termination signalled.
2347   -3    inconsistent constraints.
2348    1    relative function improvement is no more than EpsF.
2349    2    relative step is no more than EpsX.
2350    4    gradient norm is no more than EpsG
2351    5    MaxIts steps was taken
2352    7    stopping conditions are too stringent,
2353         further improvement is impossible,
2354         X contains best point found so far.
2355    8    terminated by user who called minbcrequesttermination(). X contains
2356         point which was "current accepted" when  termination  request  was
2357         submitted.
2358 *************************************************************************/
2359 class _minbcreport_owner
2360 {
2361 public:
2362     _minbcreport_owner();
2363     _minbcreport_owner(const _minbcreport_owner &rhs);
2364     _minbcreport_owner& operator=(const _minbcreport_owner &rhs);
2365     virtual ~_minbcreport_owner();
2366     alglib_impl::minbcreport* c_ptr();
2367     alglib_impl::minbcreport* c_ptr() const;
2368 protected:
2369     alglib_impl::minbcreport *p_struct;
2370 };
2371 class minbcreport : public _minbcreport_owner
2372 {
2373 public:
2374     minbcreport();
2375     minbcreport(const minbcreport &rhs);
2376     minbcreport& operator=(const minbcreport &rhs);
2377     virtual ~minbcreport();
2378     ae_int_t &iterationscount;
2379     ae_int_t &nfev;
2380     ae_int_t &varidx;
2381     ae_int_t &terminationtype;
2382 
2383 };
2384 #endif
2385 
2386 #if defined(AE_COMPILE_MINNS) || !defined(AE_PARTIAL_BUILD)
2387 /*************************************************************************
2388 This object stores nonlinear optimizer state.
2389 You should use functions provided by MinNS subpackage to work  with  this
2390 object
2391 *************************************************************************/
2392 class _minnsstate_owner
2393 {
2394 public:
2395     _minnsstate_owner();
2396     _minnsstate_owner(const _minnsstate_owner &rhs);
2397     _minnsstate_owner& operator=(const _minnsstate_owner &rhs);
2398     virtual ~_minnsstate_owner();
2399     alglib_impl::minnsstate* c_ptr();
2400     alglib_impl::minnsstate* c_ptr() const;
2401 protected:
2402     alglib_impl::minnsstate *p_struct;
2403 };
2404 class minnsstate : public _minnsstate_owner
2405 {
2406 public:
2407     minnsstate();
2408     minnsstate(const minnsstate &rhs);
2409     minnsstate& operator=(const minnsstate &rhs);
2410     virtual ~minnsstate();
2411     ae_bool &needfi;
2412     ae_bool &needfij;
2413     ae_bool &xupdated;
2414     double &f;
2415     real_1d_array fi;
2416     real_2d_array j;
2417     real_1d_array x;
2418 
2419 };
2420 
2421 
2422 /*************************************************************************
2423 This structure stores optimization report:
2424 * IterationsCount           total number of inner iterations
2425 * NFEV                      number of gradient evaluations
2426 * TerminationType           termination type (see below)
2427 * CErr                      maximum violation of all types of constraints
2428 * LCErr                     maximum violation of linear constraints
2429 * NLCErr                    maximum violation of nonlinear constraints
2430 
2431 TERMINATION CODES
2432 
2433 TerminationType field contains completion code, which can be:
2434   -8    internal integrity control detected  infinite  or  NAN  values  in
2435         function/gradient. Abnormal termination signalled.
2436   -3    box constraints are inconsistent
2437   -1    inconsistent parameters were passed:
2438         * penalty parameter for minnssetalgoags() is zero,
2439           but we have nonlinear constraints set by minnssetnlc()
2440    2    sampling radius decreased below epsx
2441    5    MaxIts steps was taken
2442    7    stopping conditions are too stringent,
2443         further improvement is impossible,
2444         X contains best point found so far.
2445    8    User requested termination via MinNSRequestTermination()
2446 
2447 Other fields of this structure are not documented and should not be used!
2448 *************************************************************************/
2449 class _minnsreport_owner
2450 {
2451 public:
2452     _minnsreport_owner();
2453     _minnsreport_owner(const _minnsreport_owner &rhs);
2454     _minnsreport_owner& operator=(const _minnsreport_owner &rhs);
2455     virtual ~_minnsreport_owner();
2456     alglib_impl::minnsreport* c_ptr();
2457     alglib_impl::minnsreport* c_ptr() const;
2458 protected:
2459     alglib_impl::minnsreport *p_struct;
2460 };
2461 class minnsreport : public _minnsreport_owner
2462 {
2463 public:
2464     minnsreport();
2465     minnsreport(const minnsreport &rhs);
2466     minnsreport& operator=(const minnsreport &rhs);
2467     virtual ~minnsreport();
2468     ae_int_t &iterationscount;
2469     ae_int_t &nfev;
2470     double &cerr;
2471     double &lcerr;
2472     double &nlcerr;
2473     ae_int_t &terminationtype;
2474     ae_int_t &varidx;
2475     ae_int_t &funcidx;
2476 
2477 };
2478 #endif
2479 
2480 #if defined(AE_COMPILE_MINCOMP) || !defined(AE_PARTIAL_BUILD)
2481 /*************************************************************************
2482 
2483 *************************************************************************/
2484 class _minasastate_owner
2485 {
2486 public:
2487     _minasastate_owner();
2488     _minasastate_owner(const _minasastate_owner &rhs);
2489     _minasastate_owner& operator=(const _minasastate_owner &rhs);
2490     virtual ~_minasastate_owner();
2491     alglib_impl::minasastate* c_ptr();
2492     alglib_impl::minasastate* c_ptr() const;
2493 protected:
2494     alglib_impl::minasastate *p_struct;
2495 };
2496 class minasastate : public _minasastate_owner
2497 {
2498 public:
2499     minasastate();
2500     minasastate(const minasastate &rhs);
2501     minasastate& operator=(const minasastate &rhs);
2502     virtual ~minasastate();
2503     ae_bool &needfg;
2504     ae_bool &xupdated;
2505     double &f;
2506     real_1d_array g;
2507     real_1d_array x;
2508 
2509 };
2510 
2511 
2512 /*************************************************************************
2513 
2514 *************************************************************************/
2515 class _minasareport_owner
2516 {
2517 public:
2518     _minasareport_owner();
2519     _minasareport_owner(const _minasareport_owner &rhs);
2520     _minasareport_owner& operator=(const _minasareport_owner &rhs);
2521     virtual ~_minasareport_owner();
2522     alglib_impl::minasareport* c_ptr();
2523     alglib_impl::minasareport* c_ptr() const;
2524 protected:
2525     alglib_impl::minasareport *p_struct;
2526 };
2527 class minasareport : public _minasareport_owner
2528 {
2529 public:
2530     minasareport();
2531     minasareport(const minasareport &rhs);
2532     minasareport& operator=(const minasareport &rhs);
2533     virtual ~minasareport();
2534     ae_int_t &iterationscount;
2535     ae_int_t &nfev;
2536     ae_int_t &terminationtype;
2537     ae_int_t &activeconstraints;
2538 
2539 };
2540 #endif
2541 
2542 #if defined(AE_COMPILE_MINCG) || !defined(AE_PARTIAL_BUILD)
2543 /*************************************************************************
2544 This object stores state of the nonlinear CG optimizer.
2545 
2546 You should use ALGLIB functions to work with this object.
2547 *************************************************************************/
2548 class _mincgstate_owner
2549 {
2550 public:
2551     _mincgstate_owner();
2552     _mincgstate_owner(const _mincgstate_owner &rhs);
2553     _mincgstate_owner& operator=(const _mincgstate_owner &rhs);
2554     virtual ~_mincgstate_owner();
2555     alglib_impl::mincgstate* c_ptr();
2556     alglib_impl::mincgstate* c_ptr() const;
2557 protected:
2558     alglib_impl::mincgstate *p_struct;
2559 };
2560 class mincgstate : public _mincgstate_owner
2561 {
2562 public:
2563     mincgstate();
2564     mincgstate(const mincgstate &rhs);
2565     mincgstate& operator=(const mincgstate &rhs);
2566     virtual ~mincgstate();
2567     ae_bool &needf;
2568     ae_bool &needfg;
2569     ae_bool &xupdated;
2570     double &f;
2571     real_1d_array g;
2572     real_1d_array x;
2573 
2574 };
2575 
2576 
2577 /*************************************************************************
2578 This structure stores optimization report:
2579 * IterationsCount           total number of inner iterations
2580 * NFEV                      number of gradient evaluations
2581 * TerminationType           termination type (see below)
2582 
2583 TERMINATION CODES
2584 
2585 TerminationType field contains completion code, which can be:
2586   -8    internal integrity control detected  infinite  or  NAN  values  in
2587         function/gradient. Abnormal termination signalled.
2588    1    relative function improvement is no more than EpsF.
2589    2    relative step is no more than EpsX.
2590    4    gradient norm is no more than EpsG
2591    5    MaxIts steps was taken
2592    7    stopping conditions are too stringent,
2593         further improvement is impossible,
2594         X contains best point found so far.
2595    8    terminated by user who called mincgrequesttermination(). X contains
2596         point which was "current accepted" when  termination  request  was
2597         submitted.
2598 
2599 Other fields of this structure are not documented and should not be used!
2600 *************************************************************************/
2601 class _mincgreport_owner
2602 {
2603 public:
2604     _mincgreport_owner();
2605     _mincgreport_owner(const _mincgreport_owner &rhs);
2606     _mincgreport_owner& operator=(const _mincgreport_owner &rhs);
2607     virtual ~_mincgreport_owner();
2608     alglib_impl::mincgreport* c_ptr();
2609     alglib_impl::mincgreport* c_ptr() const;
2610 protected:
2611     alglib_impl::mincgreport *p_struct;
2612 };
2613 class mincgreport : public _mincgreport_owner
2614 {
2615 public:
2616     mincgreport();
2617     mincgreport(const mincgreport &rhs);
2618     mincgreport& operator=(const mincgreport &rhs);
2619     virtual ~mincgreport();
2620     ae_int_t &iterationscount;
2621     ae_int_t &nfev;
2622     ae_int_t &terminationtype;
2623 
2624 };
2625 #endif
2626 
2627 #if defined(AE_COMPILE_MINLM) || !defined(AE_PARTIAL_BUILD)
2628 /*************************************************************************
2629 Levenberg-Marquardt optimizer.
2630 
2631 This structure should be created using one of the MinLMCreate???()
2632 functions. You should not access its fields directly; use ALGLIB functions
2633 to work with it.
2634 *************************************************************************/
2635 class _minlmstate_owner
2636 {
2637 public:
2638     _minlmstate_owner();
2639     _minlmstate_owner(const _minlmstate_owner &rhs);
2640     _minlmstate_owner& operator=(const _minlmstate_owner &rhs);
2641     virtual ~_minlmstate_owner();
2642     alglib_impl::minlmstate* c_ptr();
2643     alglib_impl::minlmstate* c_ptr() const;
2644 protected:
2645     alglib_impl::minlmstate *p_struct;
2646 };
2647 class minlmstate : public _minlmstate_owner
2648 {
2649 public:
2650     minlmstate();
2651     minlmstate(const minlmstate &rhs);
2652     minlmstate& operator=(const minlmstate &rhs);
2653     virtual ~minlmstate();
2654     ae_bool &needf;
2655     ae_bool &needfg;
2656     ae_bool &needfgh;
2657     ae_bool &needfi;
2658     ae_bool &needfij;
2659     ae_bool &xupdated;
2660     double &f;
2661     real_1d_array fi;
2662     real_1d_array g;
2663     real_2d_array h;
2664     real_2d_array j;
2665     real_1d_array x;
2666 
2667 };
2668 
2669 
2670 /*************************************************************************
2671 Optimization report, filled by MinLMResults() function
2672 
2673 FIELDS:
2674 * TerminationType, completetion code:
2675     * -8    optimizer detected NAN/INF values either in the function itself,
2676             or in its Jacobian
2677     * -5    inappropriate solver was used:
2678             * solver created with minlmcreatefgh() used  on  problem  with
2679               general linear constraints (set with minlmsetlc() call).
2680     * -3    constraints are inconsistent
2681     *  2    relative step is no more than EpsX.
2682     *  5    MaxIts steps was taken
2683     *  7    stopping conditions are too stringent,
2684             further improvement is impossible
2685     *  8    terminated   by  user  who  called  MinLMRequestTermination().
2686             X contains point which was "current accepted" when termination
2687             request was submitted.
2688 * IterationsCount, contains iterations count
2689 * NFunc, number of function calculations
2690 * NJac, number of Jacobi matrix calculations
2691 * NGrad, number of gradient calculations
2692 * NHess, number of Hessian calculations
2693 * NCholesky, number of Cholesky decomposition calculations
2694 *************************************************************************/
2695 class _minlmreport_owner
2696 {
2697 public:
2698     _minlmreport_owner();
2699     _minlmreport_owner(const _minlmreport_owner &rhs);
2700     _minlmreport_owner& operator=(const _minlmreport_owner &rhs);
2701     virtual ~_minlmreport_owner();
2702     alglib_impl::minlmreport* c_ptr();
2703     alglib_impl::minlmreport* c_ptr() const;
2704 protected:
2705     alglib_impl::minlmreport *p_struct;
2706 };
2707 class minlmreport : public _minlmreport_owner
2708 {
2709 public:
2710     minlmreport();
2711     minlmreport(const minlmreport &rhs);
2712     minlmreport& operator=(const minlmreport &rhs);
2713     virtual ~minlmreport();
2714     ae_int_t &iterationscount;
2715     ae_int_t &terminationtype;
2716     ae_int_t &nfunc;
2717     ae_int_t &njac;
2718     ae_int_t &ngrad;
2719     ae_int_t &nhess;
2720     ae_int_t &ncholesky;
2721 
2722 };
2723 #endif
2724 
2725 #if defined(AE_COMPILE_CQMODELS) || !defined(AE_PARTIAL_BUILD)
2726 
2727 #endif
2728 
2729 #if defined(AE_COMPILE_OPTGUARDAPI) || !defined(AE_PARTIAL_BUILD)
2730 
2731 #endif
2732 
2733 #if defined(AE_COMPILE_OPTSERV) || !defined(AE_PARTIAL_BUILD)
2734 
2735 #endif
2736 
2737 #if defined(AE_COMPILE_SNNLS) || !defined(AE_PARTIAL_BUILD)
2738 
2739 #endif
2740 
2741 #if defined(AE_COMPILE_SACTIVESETS) || !defined(AE_PARTIAL_BUILD)
2742 
2743 #endif
2744 
2745 #if defined(AE_COMPILE_QQPSOLVER) || !defined(AE_PARTIAL_BUILD)
2746 
2747 #endif
2748 
2749 #if defined(AE_COMPILE_MINLBFGS) || !defined(AE_PARTIAL_BUILD)
2750 /*************************************************************************
2751         LIMITED MEMORY BFGS METHOD FOR LARGE SCALE OPTIMIZATION
2752 
2753 DESCRIPTION:
2754 The subroutine minimizes function F(x) of N arguments by  using  a  quasi-
2755 Newton method (LBFGS scheme) which is optimized to use  a  minimum  amount
2756 of memory.
2757 The subroutine generates the approximation of an inverse Hessian matrix by
2758 using information about the last M steps of the algorithm  (instead of N).
2759 It lessens a required amount of memory from a value  of  order  N^2  to  a
2760 value of order 2*N*M.
2761 
2762 
2763 REQUIREMENTS:
2764 Algorithm will request following information during its operation:
2765 * function value F and its gradient G (simultaneously) at given point X
2766 
2767 
2768 USAGE:
2769 1. User initializes algorithm state with MinLBFGSCreate() call
2770 2. User tunes solver parameters with MinLBFGSSetCond() MinLBFGSSetStpMax()
2771    and other functions
2772 3. User calls MinLBFGSOptimize() function which takes algorithm  state and
2773    pointer (delegate, etc.) to callback function which calculates F/G.
2774 4. User calls MinLBFGSResults() to get solution
2775 5. Optionally user may call MinLBFGSRestartFrom() to solve another problem
2776    with same N/M but another starting point and/or another function.
2777    MinLBFGSRestartFrom() allows to reuse already initialized structure.
2778 
2779 
2780 INPUT PARAMETERS:
2781     N       -   problem dimension. N>0
2782     M       -   number of corrections in the BFGS scheme of Hessian
2783                 approximation update. Recommended value:  3<=M<=7. The smaller
2784                 value causes worse convergence, the bigger will  not  cause  a
2785                 considerably better convergence, but will cause a fall in  the
2786                 performance. M<=N.
2787     X       -   initial solution approximation, array[0..N-1].
2788 
2789 
2790 OUTPUT PARAMETERS:
2791     State   -   structure which stores algorithm state
2792 
2793 
2794 NOTES:
2795 1. you may tune stopping conditions with MinLBFGSSetCond() function
2796 2. if target function contains exp() or other fast growing functions,  and
2797    optimization algorithm makes too large steps which leads  to  overflow,
2798    use MinLBFGSSetStpMax() function to bound algorithm's  steps.  However,
2799    L-BFGS rarely needs such a tuning.
2800 
2801 
2802   -- ALGLIB --
2803      Copyright 02.04.2010 by Bochkanov Sergey
2804 *************************************************************************/
2805 void minlbfgscreate(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
2806 void minlbfgscreate(const ae_int_t m, const real_1d_array &x, minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
2807 
2808 
2809 /*************************************************************************
2810 The subroutine is finite difference variant of MinLBFGSCreate().  It  uses
2811 finite differences in order to differentiate target function.
2812 
2813 Description below contains information which is specific to  this function
2814 only. We recommend to read comments on MinLBFGSCreate() in  order  to  get
2815 more information about creation of LBFGS optimizer.
2816 
2817 INPUT PARAMETERS:
2818     N       -   problem dimension, N>0:
2819                 * if given, only leading N elements of X are used
2820                 * if not given, automatically determined from size of X
2821     M       -   number of corrections in the BFGS scheme of Hessian
2822                 approximation update. Recommended value:  3<=M<=7. The smaller
2823                 value causes worse convergence, the bigger will  not  cause  a
2824                 considerably better convergence, but will cause a fall in  the
2825                 performance. M<=N.
2826     X       -   starting point, array[0..N-1].
2827     DiffStep-   differentiation step, >0
2828 
2829 OUTPUT PARAMETERS:
2830     State   -   structure which stores algorithm state
2831 
2832 NOTES:
2833 1. algorithm uses 4-point central formula for differentiation.
2834 2. differentiation step along I-th axis is equal to DiffStep*S[I] where
2835    S[] is scaling vector which can be set by MinLBFGSSetScale() call.
2836 3. we recommend you to use moderate values of  differentiation  step.  Too
2837    large step will result in too large truncation  errors, while too small
2838    step will result in too large numerical  errors.  1.0E-6  can  be  good
2839    value to start with.
2840 4. Numerical  differentiation  is   very   inefficient  -   one   gradient
2841    calculation needs 4*N function evaluations. This function will work for
2842    any N - either small (1...10), moderate (10...100) or  large  (100...).
2843    However, performance penalty will be too severe for any N's except  for
2844    small ones.
2845    We should also say that code which relies on numerical  differentiation
2846    is   less  robust  and  precise.  LBFGS  needs  exact  gradient values.
2847    Imprecise gradient may slow  down  convergence,  especially  on  highly
2848    nonlinear problems.
2849    Thus  we  recommend to use this function for fast prototyping on small-
2850    dimensional problems only, and to implement analytical gradient as soon
2851    as possible.
2852 
2853   -- ALGLIB --
2854      Copyright 16.05.2011 by Bochkanov Sergey
2855 *************************************************************************/
2856 void minlbfgscreatef(const ae_int_t n, const ae_int_t m, const real_1d_array &x, const double diffstep, minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
2857 void minlbfgscreatef(const ae_int_t m, const real_1d_array &x, const double diffstep, minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
2858 
2859 
2860 /*************************************************************************
2861 This function sets stopping conditions for L-BFGS optimization algorithm.
2862 
2863 INPUT PARAMETERS:
2864     State   -   structure which stores algorithm state
2865     EpsG    -   >=0
2866                 The  subroutine  finishes  its  work   if   the  condition
2867                 |v|<EpsG is satisfied, where:
2868                 * |.| means Euclidian norm
2869                 * v - scaled gradient vector, v[i]=g[i]*s[i]
2870                 * g - gradient
2871                 * s - scaling coefficients set by MinLBFGSSetScale()
2872     EpsF    -   >=0
2873                 The  subroutine  finishes  its work if on k+1-th iteration
2874                 the  condition  |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
2875                 is satisfied.
2876     EpsX    -   >=0
2877                 The subroutine finishes its work if  on  k+1-th  iteration
2878                 the condition |v|<=EpsX is fulfilled, where:
2879                 * |.| means Euclidian norm
2880                 * v - scaled step vector, v[i]=dx[i]/s[i]
2881                 * dx - ste pvector, dx=X(k+1)-X(k)
2882                 * s - scaling coefficients set by MinLBFGSSetScale()
2883     MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
2884                 iterations is unlimited.
2885 
2886 Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to
2887 automatic stopping criterion selection (small EpsX).
2888 
2889   -- ALGLIB --
2890      Copyright 02.04.2010 by Bochkanov Sergey
2891 *************************************************************************/
2892 void minlbfgssetcond(const minlbfgsstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
2893 
2894 
2895 /*************************************************************************
2896 This function turns on/off reporting.
2897 
2898 INPUT PARAMETERS:
2899     State   -   structure which stores algorithm state
2900     NeedXRep-   whether iteration reports are needed or not
2901 
2902 If NeedXRep is True, algorithm will call rep() callback function if  it is
2903 provided to MinLBFGSOptimize().
2904 
2905 
2906   -- ALGLIB --
2907      Copyright 02.04.2010 by Bochkanov Sergey
2908 *************************************************************************/
2909 void minlbfgssetxrep(const minlbfgsstate &state, const bool needxrep, const xparams _xparams = alglib::xdefault);
2910 
2911 
2912 /*************************************************************************
2913 This function sets maximum step length
2914 
2915 INPUT PARAMETERS:
2916     State   -   structure which stores algorithm state
2917     StpMax  -   maximum step length, >=0. Set StpMax to 0.0 (default),  if
2918                 you don't want to limit step length.
2919 
2920 Use this subroutine when you optimize target function which contains exp()
2921 or  other  fast  growing  functions,  and optimization algorithm makes too
2922 large  steps  which  leads  to overflow. This function allows us to reject
2923 steps  that  are  too  large  (and  therefore  expose  us  to the possible
2924 overflow) without actually calculating function value at the x+stp*d.
2925 
2926   -- ALGLIB --
2927      Copyright 02.04.2010 by Bochkanov Sergey
2928 *************************************************************************/
2929 void minlbfgssetstpmax(const minlbfgsstate &state, const double stpmax, const xparams _xparams = alglib::xdefault);
2930 
2931 
2932 /*************************************************************************
2933 This function sets scaling coefficients for LBFGS optimizer.
2934 
2935 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
2936 size and gradient are scaled before comparison with tolerances).  Scale of
2937 the I-th variable is a translation invariant measure of:
2938 a) "how large" the variable is
2939 b) how large the step should be to make significant changes in the function
2940 
2941 Scaling is also used by finite difference variant of the optimizer  - step
2942 along I-th axis is equal to DiffStep*S[I].
2943 
2944 In  most  optimizers  (and  in  the  LBFGS  too)  scaling is NOT a form of
2945 preconditioning. It just  affects  stopping  conditions.  You  should  set
2946 preconditioner  by  separate  call  to  one  of  the  MinLBFGSSetPrec...()
2947 functions.
2948 
2949 There  is  special  preconditioning  mode, however,  which  uses   scaling
2950 coefficients to form diagonal preconditioning matrix. You  can  turn  this
2951 mode on, if you want.   But  you should understand that scaling is not the
2952 same thing as preconditioning - these are two different, although  related
2953 forms of tuning solver.
2954 
2955 INPUT PARAMETERS:
2956     State   -   structure stores algorithm state
2957     S       -   array[N], non-zero scaling coefficients
2958                 S[i] may be negative, sign doesn't matter.
2959 
2960   -- ALGLIB --
2961      Copyright 14.01.2011 by Bochkanov Sergey
2962 *************************************************************************/
2963 void minlbfgssetscale(const minlbfgsstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
2964 
2965 
2966 /*************************************************************************
2967 Modification  of  the  preconditioner:  default  preconditioner    (simple
2968 scaling, same for all elements of X) is used.
2969 
2970 INPUT PARAMETERS:
2971     State   -   structure which stores algorithm state
2972 
2973 NOTE:  you  can  change  preconditioner  "on  the  fly",  during algorithm
2974 iterations.
2975 
2976   -- ALGLIB --
2977      Copyright 13.10.2010 by Bochkanov Sergey
2978 *************************************************************************/
2979 void minlbfgssetprecdefault(const minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
2980 
2981 
2982 /*************************************************************************
2983 Modification of the preconditioner: Cholesky factorization of  approximate
2984 Hessian is used.
2985 
2986 INPUT PARAMETERS:
2987     State   -   structure which stores algorithm state
2988     P       -   triangular preconditioner, Cholesky factorization of
2989                 the approximate Hessian. array[0..N-1,0..N-1],
2990                 (if larger, only leading N elements are used).
2991     IsUpper -   whether upper or lower triangle of P is given
2992                 (other triangle is not referenced)
2993 
2994 After call to this function preconditioner is changed to P  (P  is  copied
2995 into the internal buffer).
2996 
2997 NOTE:  you  can  change  preconditioner  "on  the  fly",  during algorithm
2998 iterations.
2999 
3000 NOTE 2:  P  should  be nonsingular. Exception will be thrown otherwise.
3001 
3002   -- ALGLIB --
3003      Copyright 13.10.2010 by Bochkanov Sergey
3004 *************************************************************************/
3005 void minlbfgssetpreccholesky(const minlbfgsstate &state, const real_2d_array &p, const bool isupper, const xparams _xparams = alglib::xdefault);
3006 
3007 
3008 /*************************************************************************
3009 Modification  of  the  preconditioner:  diagonal of approximate Hessian is
3010 used.
3011 
3012 INPUT PARAMETERS:
3013     State   -   structure which stores algorithm state
3014     D       -   diagonal of the approximate Hessian, array[0..N-1],
3015                 (if larger, only leading N elements are used).
3016 
3017 NOTE:  you  can  change  preconditioner  "on  the  fly",  during algorithm
3018 iterations.
3019 
3020 NOTE 2: D[i] should be positive. Exception will be thrown otherwise.
3021 
3022 NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
3023 
3024   -- ALGLIB --
3025      Copyright 13.10.2010 by Bochkanov Sergey
3026 *************************************************************************/
3027 void minlbfgssetprecdiag(const minlbfgsstate &state, const real_1d_array &d, const xparams _xparams = alglib::xdefault);
3028 
3029 
3030 /*************************************************************************
3031 Modification of the preconditioner: scale-based diagonal preconditioning.
3032 
3033 This preconditioning mode can be useful when you  don't  have  approximate
3034 diagonal of Hessian, but you know that your  variables  are  badly  scaled
3035 (for  example,  one  variable is in [1,10], and another in [1000,100000]),
3036 and most part of the ill-conditioning comes from different scales of vars.
3037 
3038 In this case simple  scale-based  preconditioner,  with H[i] = 1/(s[i]^2),
3039 can greatly improve convergence.
3040 
3041 IMPRTANT: you should set scale of your variables  with  MinLBFGSSetScale()
3042 call  (before  or after MinLBFGSSetPrecScale() call). Without knowledge of
3043 the scale of your variables scale-based preconditioner will be  just  unit
3044 matrix.
3045 
3046 INPUT PARAMETERS:
3047     State   -   structure which stores algorithm state
3048 
3049   -- ALGLIB --
3050      Copyright 13.10.2010 by Bochkanov Sergey
3051 *************************************************************************/
3052 void minlbfgssetprecscale(const minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
3053 
3054 
3055 /*************************************************************************
3056 This function provides reverse communication interface
3057 Reverse communication interface is not documented or recommended to use.
3058 See below for functions which provide better documented API
3059 *************************************************************************/
3060 bool minlbfgsiteration(const minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
3061 
3062 
3063 /*************************************************************************
3064 This family of functions is used to launcn iterations of nonlinear optimizer
3065 
3066 These functions accept following parameters:
3067     state   -   algorithm state
3068     func    -   callback which calculates function (or merit function)
3069                 value func at given point x
3070     grad    -   callback which calculates function (or merit function)
3071                 value func and gradient grad at given point x
3072     rep     -   optional callback which is called after each iteration
3073                 can be NULL
3074     ptr     -   optional pointer which is passed to func/grad/hess/jac/rep
3075                 can be NULL
3076 
3077 NOTES:
3078 
3079 1. This function has two different implementations: one which  uses  exact
3080    (analytical) user-supplied gradient,  and one which uses function value
3081    only  and  numerically  differentiates  function  in  order  to  obtain
3082    gradient.
3083 
3084    Depending  on  the  specific  function  used to create optimizer object
3085    (either MinLBFGSCreate() for analytical gradient  or  MinLBFGSCreateF()
3086    for numerical differentiation) you should choose appropriate variant of
3087    MinLBFGSOptimize() - one  which  accepts  function  AND gradient or one
3088    which accepts function ONLY.
3089 
3090    Be careful to choose variant of MinLBFGSOptimize() which corresponds to
3091    your optimization scheme! Table below lists different  combinations  of
3092    callback (function/gradient) passed to MinLBFGSOptimize()  and specific
3093    function used to create optimizer.
3094 
3095 
3096                      |         USER PASSED TO MinLBFGSOptimize()
3097    CREATED WITH      |  function only   |  function and gradient
3098    ------------------------------------------------------------
3099    MinLBFGSCreateF() |     work                FAIL
3100    MinLBFGSCreate()  |     FAIL                work
3101 
3102    Here "FAIL" denotes inappropriate combinations  of  optimizer  creation
3103    function  and  MinLBFGSOptimize()  version.   Attemps   to   use   such
3104    combination (for example, to create optimizer with MinLBFGSCreateF() and
3105    to pass gradient information to MinCGOptimize()) will lead to exception
3106    being thrown. Either  you  did  not pass gradient when it WAS needed or
3107    you passed gradient when it was NOT needed.
3108 
3109   -- ALGLIB --
3110      Copyright 20.03.2009 by Bochkanov Sergey
3111 
3112 *************************************************************************/
3113 void minlbfgsoptimize(minlbfgsstate &state,
3114     void (*func)(const real_1d_array &x, double &func, void *ptr),
3115     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
3116     void *ptr = NULL,
3117     const xparams _xparams = alglib::xdefault);
3118 void minlbfgsoptimize(minlbfgsstate &state,
3119     void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
3120     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
3121     void *ptr = NULL,
3122     const xparams _xparams = alglib::xdefault);
3123 
3124 
3125 /*************************************************************************
3126 This  function  activates/deactivates verification  of  the  user-supplied
3127 analytic gradient.
3128 
3129 Upon  activation  of  this  option  OptGuard  integrity  checker  performs
3130 numerical differentiation of your target function  at  the  initial  point
3131 (note: future versions may also perform check  at  the  final  point)  and
3132 compares numerical gradient with analytic one provided by you.
3133 
3134 If difference is too large, an error flag is set and optimization  session
3135 continues. After optimization session is over, you can retrieve the report
3136 which  stores  both  gradients  and  specific  components  highlighted  as
3137 suspicious by the OptGuard.
3138 
3139 The primary OptGuard report can be retrieved with minlbfgsoptguardresults().
3140 
3141 IMPORTANT: gradient check is a high-overhead option which  will  cost  you
3142            about 3*N additional function evaluations. In many cases it may
3143            cost as much as the rest of the optimization session.
3144 
3145            YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO
3146            CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY.
3147 
3148 NOTE: unlike previous incarnation of the gradient checking code,  OptGuard
3149       does NOT interrupt optimization even if it discovers bad gradient.
3150 
3151 INPUT PARAMETERS:
3152     State       -   structure used to store algorithm state
3153     TestStep    -   verification step used for numerical differentiation:
3154                     * TestStep=0 turns verification off
3155                     * TestStep>0 activates verification
3156                     You should carefully choose TestStep. Value  which  is
3157                     too large (so large that  function  behavior  is  non-
3158                     cubic at this scale) will lead  to  false  alarms. Too
3159                     short step will result in rounding  errors  dominating
3160                     numerical derivative.
3161 
3162                     You may use different step for different parameters by
3163                     means of setting scale with minlbfgssetscale().
3164 
3165 === EXPLANATION ==========================================================
3166 
3167 In order to verify gradient algorithm performs following steps:
3168   * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
3169     where X[i] is i-th component of the initial point and S[i] is a  scale
3170     of i-th parameter
3171   * F(X) is evaluated at these trial points
3172   * we perform one more evaluation in the middle point of the interval
3173   * we  build  cubic  model using function values and derivatives at trial
3174     points and we compare its prediction with actual value in  the  middle
3175     point
3176 
3177   -- ALGLIB --
3178      Copyright 15.06.2014 by Bochkanov Sergey
3179 *************************************************************************/
3180 void minlbfgsoptguardgradient(const minlbfgsstate &state, const double teststep, const xparams _xparams = alglib::xdefault);
3181 
3182 
3183 /*************************************************************************
3184 This  function  activates/deactivates nonsmoothness monitoring  option  of
3185 the  OptGuard  integrity  checker. Smoothness  monitor  silently  observes
3186 solution process and tries to detect ill-posed problems, i.e. ones with:
3187 a) discontinuous target function (non-C0)
3188 b) nonsmooth     target function (non-C1)
3189 
3190 Smoothness monitoring does NOT interrupt optimization  even if it suspects
3191 that your problem is nonsmooth. It just sets corresponding  flags  in  the
3192 OptGuard report which can be retrieved after optimization is over.
3193 
3194 Smoothness monitoring is a moderate overhead option which often adds  less
3195 than 1% to the optimizer running time. Thus, you can use it even for large
3196 scale problems.
3197 
3198 NOTE: OptGuard does  NOT  guarantee  that  it  will  always  detect  C0/C1
3199       continuity violations.
3200 
3201       First, minor errors are hard to  catch - say, a 0.0001 difference in
3202       the model values at two sides of the gap may be due to discontinuity
3203       of the model - or simply because the model has changed.
3204 
3205       Second, C1-violations  are  especially  difficult  to  detect  in  a
3206       noninvasive way. The optimizer usually  performs  very  short  steps
3207       near the nonsmoothness, and differentiation  usually   introduces  a
3208       lot of numerical noise.  It  is  hard  to  tell  whether  some  tiny
3209       discontinuity in the slope is due to real nonsmoothness or just  due
3210       to numerical noise alone.
3211 
3212       Our top priority was to avoid false positives, so in some rare cases
3213       minor errors may went unnoticed (however, in most cases they can  be
3214       spotted with restart from different initial point).
3215 
3216 INPUT PARAMETERS:
3217     state   -   algorithm state
3218     level   -   monitoring level:
3219                 * 0 - monitoring is disabled
3220                 * 1 - noninvasive low-overhead monitoring; function values
3221                       and/or gradients are recorded, but OptGuard does not
3222                       try to perform additional evaluations  in  order  to
3223                       get more information about suspicious locations.
3224 
3225 === EXPLANATION ==========================================================
3226 
3227 One major source of headache during optimization  is  the  possibility  of
3228 the coding errors in the target function/constraints (or their gradients).
3229 Such  errors   most   often   manifest   themselves  as  discontinuity  or
3230 nonsmoothness of the target/constraints.
3231 
3232 Another frequent situation is when you try to optimize something involving
3233 lots of min() and max() operations, i.e. nonsmooth target. Although not  a
3234 coding error, it is nonsmoothness anyway - and smooth  optimizers  usually
3235 stop right after encountering nonsmoothness, well before reaching solution.
3236 
3237 OptGuard integrity checker helps you to catch such situations: it monitors
3238 function values/gradients being passed  to  the  optimizer  and  tries  to
3239 errors. Upon discovering suspicious pair of points it  raises  appropriate
3240 flag (and allows you to continue optimization). When optimization is done,
3241 you can study OptGuard result.
3242 
3243   -- ALGLIB --
3244      Copyright 21.11.2018 by Bochkanov Sergey
3245 *************************************************************************/
3246 void minlbfgsoptguardsmoothness(const minlbfgsstate &state, const ae_int_t level, const xparams _xparams = alglib::xdefault);
3247 void minlbfgsoptguardsmoothness(const minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
3248 
3249 
3250 /*************************************************************************
3251 Results of OptGuard integrity check, should be called  after  optimization
3252 session is over.
3253 
3254 === PRIMARY REPORT =======================================================
3255 
3256 OptGuard performs several checks which are intended to catch common errors
3257 in the implementation of nonlinear function/gradient:
3258 * incorrect analytic gradient
3259 * discontinuous (non-C0) target functions (constraints)
3260 * nonsmooth     (non-C1) target functions (constraints)
3261 
3262 Each of these checks is activated with appropriate function:
3263 * minlbfgsoptguardgradient() for gradient verification
3264 * minlbfgsoptguardsmoothness() for C0/C1 checks
3265 
3266 Following flags are set when these errors are suspected:
3267 * rep.badgradsuspected, and additionally:
3268   * rep.badgradvidx for specific variable (gradient element) suspected
3269   * rep.badgradxbase, a point where gradient is tested
3270   * rep.badgraduser, user-provided gradient  (stored  as  2D  matrix  with
3271     single row in order to make  report  structure  compatible  with  more
3272     complex optimizers like MinNLC or MinLM)
3273   * rep.badgradnum,   reference    gradient    obtained    via   numerical
3274     differentiation (stored as  2D matrix with single row in order to make
3275     report structure compatible with more complex optimizers  like  MinNLC
3276     or MinLM)
3277 * rep.nonc0suspected
3278 * rep.nonc1suspected
3279 
3280 === ADDITIONAL REPORTS/LOGS ==============================================
3281 
3282 Several different tests are performed to catch C0/C1 errors, you can  find
3283 out specific test signaled error by looking to:
3284 * rep.nonc0test0positive, for non-C0 test #0
3285 * rep.nonc1test0positive, for non-C1 test #0
3286 * rep.nonc1test1positive, for non-C1 test #1
3287 
3288 Additional information (including line search logs)  can  be  obtained  by
3289 means of:
3290 * minlbfgsoptguardnonc1test0results()
3291 * minlbfgsoptguardnonc1test1results()
3292 which return detailed error reports, specific points where discontinuities
3293 were found, and so on.
3294 
3295 ==========================================================================
3296 
3297 INPUT PARAMETERS:
3298     state   -   algorithm state
3299 
3300 OUTPUT PARAMETERS:
3301     rep     -   generic OptGuard report;  more  detailed  reports  can  be
3302                 retrieved with other functions.
3303 
3304 NOTE: false negatives (nonsmooth problems are not identified as  nonsmooth
3305       ones) are possible although unlikely.
3306 
3307       The reason  is  that  you  need  to  make several evaluations around
3308       nonsmoothness  in  order  to  accumulate  enough  information  about
3309       function curvature. Say, if you start right from the nonsmooth point,
3310       optimizer simply won't get enough data to understand what  is  going
3311       wrong before it terminates due to abrupt changes in the  derivative.
3312       It is also  possible  that  "unlucky"  step  will  move  us  to  the
3313       termination too quickly.
3314 
3315       Our current approach is to have less than 0.1%  false  negatives  in
3316       our test examples  (measured  with  multiple  restarts  from  random
3317       points), and to have exactly 0% false positives.
3318 
3319   -- ALGLIB --
3320      Copyright 21.11.2018 by Bochkanov Sergey
3321 *************************************************************************/
3322 void minlbfgsoptguardresults(const minlbfgsstate &state, optguardreport &rep, const xparams _xparams = alglib::xdefault);
3323 
3324 
3325 /*************************************************************************
3326 Detailed results of the OptGuard integrity check for nonsmoothness test #0
3327 
3328 Nonsmoothness (non-C1) test #0 studies  function  values  (not  gradient!)
3329 obtained during line searches and monitors  behavior  of  the  directional
3330 derivative estimate.
3331 
3332 This test is less powerful than test #1, but it does  not  depend  on  the
3333 gradient values and thus it is more robust against artifacts introduced by
3334 numerical differentiation.
3335 
3336 Two reports are returned:
3337 * a "strongest" one, corresponding  to  line   search  which  had  highest
3338   value of the nonsmoothness indicator
3339 * a "longest" one, corresponding to line search which  had  more  function
3340   evaluations, and thus is more detailed
3341 
3342 In both cases following fields are returned:
3343 
3344 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
3345   did not notice anything (in the latter cases fields below are empty).
3346 * x0[], d[] - arrays of length N which store initial point  and  direction
3347   for line search (d[] can be normalized, but does not have to)
3348 * stp[], f[] - arrays of length CNT which store step lengths and  function
3349   values at these points; f[i] is evaluated in x0+stp[i]*d.
3350 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
3351   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
3352   with  most  likely  position  of  the  violation  between  stpidxa+1 and
3353   stpidxa+2.
3354 
3355 ==========================================================================
3356 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
3357 =                   see where C1 continuity is violated.
3358 ==========================================================================
3359 
3360 INPUT PARAMETERS:
3361     state   -   algorithm state
3362 
3363 OUTPUT PARAMETERS:
3364     strrep  -   C1 test #0 "strong" report
3365     lngrep  -   C1 test #0 "long" report
3366 
3367   -- ALGLIB --
3368      Copyright 21.11.2018 by Bochkanov Sergey
3369 *************************************************************************/
3370 void minlbfgsoptguardnonc1test0results(const minlbfgsstate &state, optguardnonc1test0report &strrep, optguardnonc1test0report &lngrep, const xparams _xparams = alglib::xdefault);
3371 
3372 
3373 /*************************************************************************
3374 Detailed results of the OptGuard integrity check for nonsmoothness test #1
3375 
3376 Nonsmoothness (non-C1)  test  #1  studies  individual  components  of  the
3377 gradient computed during line search.
3378 
3379 When precise analytic gradient is provided this test is more powerful than
3380 test #0  which  works  with  function  values  and  ignores  user-provided
3381 gradient.  However,  test  #0  becomes  more   powerful   when   numerical
3382 differentiation is employed (in such cases test #1 detects  higher  levels
3383 of numerical noise and becomes too conservative).
3384 
3385 This test also tells specific components of the gradient which violate  C1
3386 continuity, which makes it more informative than #0, which just tells that
3387 continuity is violated.
3388 
3389 Two reports are returned:
3390 * a "strongest" one, corresponding  to  line   search  which  had  highest
3391   value of the nonsmoothness indicator
3392 * a "longest" one, corresponding to line search which  had  more  function
3393   evaluations, and thus is more detailed
3394 
3395 In both cases following fields are returned:
3396 
3397 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
3398   did not notice anything (in the latter cases fields below are empty).
3399 * vidx - is an index of the variable in [0,N) with nonsmooth derivative
3400 * x0[], d[] - arrays of length N which store initial point  and  direction
3401   for line search (d[] can be normalized, but does not have to)
3402 * stp[], g[] - arrays of length CNT which store step lengths and  gradient
3403   values at these points; g[i] is evaluated in  x0+stp[i]*d  and  contains
3404   vidx-th component of the gradient.
3405 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
3406   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
3407   with  most  likely  position  of  the  violation  between  stpidxa+1 and
3408   stpidxa+2.
3409 
3410 ==========================================================================
3411 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
3412 =                   see where C1 continuity is violated.
3413 ==========================================================================
3414 
3415 INPUT PARAMETERS:
3416     state   -   algorithm state
3417 
3418 OUTPUT PARAMETERS:
3419     strrep  -   C1 test #1 "strong" report
3420     lngrep  -   C1 test #1 "long" report
3421 
3422   -- ALGLIB --
3423      Copyright 21.11.2018 by Bochkanov Sergey
3424 *************************************************************************/
3425 void minlbfgsoptguardnonc1test1results(const minlbfgsstate &state, optguardnonc1test1report &strrep, optguardnonc1test1report &lngrep, const xparams _xparams = alglib::xdefault);
3426 
3427 
3428 /*************************************************************************
3429 L-BFGS algorithm results
3430 
3431 INPUT PARAMETERS:
3432     State   -   algorithm state
3433 
3434 OUTPUT PARAMETERS:
3435     X       -   array[0..N-1], solution
3436     Rep     -   optimization report:
3437                 * Rep.TerminationType completetion code:
3438                     * -8    internal integrity control  detected  infinite
3439                             or NAN values in  function/gradient.  Abnormal
3440                             termination signalled.
3441                     * -2    rounding errors prevent further improvement.
3442                             X contains best point found.
3443                     * -1    incorrect parameters were specified
3444                     *  1    relative function improvement is no more than
3445                             EpsF.
3446                     *  2    relative step is no more than EpsX.
3447                     *  4    gradient norm is no more than EpsG
3448                     *  5    MaxIts steps was taken
3449                     *  7    stopping conditions are too stringent,
3450                             further improvement is impossible
3451                     *  8    terminated by user who called minlbfgsrequesttermination().
3452                             X contains point which was "current accepted" when
3453                             termination request was submitted.
3454                 * Rep.IterationsCount contains iterations count
3455                 * NFEV countains number of function calculations
3456 
3457   -- ALGLIB --
3458      Copyright 02.04.2010 by Bochkanov Sergey
3459 *************************************************************************/
3460 void minlbfgsresults(const minlbfgsstate &state, real_1d_array &x, minlbfgsreport &rep, const xparams _xparams = alglib::xdefault);
3461 
3462 
3463 /*************************************************************************
3464 L-BFGS algorithm results
3465 
3466 Buffered implementation of MinLBFGSResults which uses pre-allocated buffer
3467 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
3468 intended to be used in the inner cycles of performance critical algorithms
3469 where array reallocation penalty is too large to be ignored.
3470 
3471   -- ALGLIB --
3472      Copyright 20.08.2010 by Bochkanov Sergey
3473 *************************************************************************/
3474 void minlbfgsresultsbuf(const minlbfgsstate &state, real_1d_array &x, minlbfgsreport &rep, const xparams _xparams = alglib::xdefault);
3475 
3476 
3477 /*************************************************************************
3478 This  subroutine restarts LBFGS algorithm from new point. All optimization
3479 parameters are left unchanged.
3480 
3481 This  function  allows  to  solve multiple  optimization  problems  (which
3482 must have same number of dimensions) without object reallocation penalty.
3483 
3484 INPUT PARAMETERS:
3485     State   -   structure used to store algorithm state
3486     X       -   new starting point.
3487 
3488   -- ALGLIB --
3489      Copyright 30.07.2010 by Bochkanov Sergey
3490 *************************************************************************/
3491 void minlbfgsrestartfrom(const minlbfgsstate &state, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
3492 
3493 
3494 /*************************************************************************
3495 This subroutine submits request for termination of running  optimizer.  It
3496 should be called from user-supplied callback when user decides that it  is
3497 time to "smoothly" terminate optimization process.  As  result,  optimizer
3498 stops at point which was "current accepted" when termination  request  was
3499 submitted and returns error code 8 (successful termination).
3500 
3501 INPUT PARAMETERS:
3502     State   -   optimizer structure
3503 
3504 NOTE: after  request  for  termination  optimizer  may   perform   several
3505       additional calls to user-supplied callbacks. It does  NOT  guarantee
3506       to stop immediately - it just guarantees that these additional calls
3507       will be discarded later.
3508 
3509 NOTE: calling this function on optimizer which is NOT running will have no
3510       effect.
3511 
3512 NOTE: multiple calls to this function are possible. First call is counted,
3513       subsequent calls are silently ignored.
3514 
3515   -- ALGLIB --
3516      Copyright 08.10.2014 by Bochkanov Sergey
3517 *************************************************************************/
3518 void minlbfgsrequesttermination(const minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
3519 #endif
3520 
3521 #if defined(AE_COMPILE_QPDENSEAULSOLVER) || !defined(AE_PARTIAL_BUILD)
3522 
3523 #endif
3524 
3525 #if defined(AE_COMPILE_MINBLEIC) || !defined(AE_PARTIAL_BUILD)
3526 /*************************************************************************
3527                      BOUND CONSTRAINED OPTIMIZATION
3528        WITH ADDITIONAL LINEAR EQUALITY AND INEQUALITY CONSTRAINTS
3529 
3530 DESCRIPTION:
3531 The  subroutine  minimizes  function   F(x)  of N arguments subject to any
3532 combination of:
3533 * bound constraints
3534 * linear inequality constraints
3535 * linear equality constraints
3536 
3537 REQUIREMENTS:
3538 * user must provide function value and gradient
3539 * starting point X0 must be feasible or
3540   not too far away from the feasible set
3541 * grad(f) must be Lipschitz continuous on a level set:
3542   L = { x : f(x)<=f(x0) }
3543 * function must be defined everywhere on the feasible set F
3544 
3545 USAGE:
3546 
3547 Constrained optimization if far more complex than the unconstrained one.
3548 Here we give very brief outline of the BLEIC optimizer. We strongly recommend
3549 you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide
3550 on optimization, which is available at http://www.alglib.net/optimization/
3551 
3552 1. User initializes algorithm state with MinBLEICCreate() call
3553 
3554 2. USer adds boundary and/or linear constraints by calling
3555    MinBLEICSetBC() and MinBLEICSetLC() functions.
3556 
3557 3. User sets stopping conditions with MinBLEICSetCond().
3558 
3559 4. User calls MinBLEICOptimize() function which takes algorithm  state and
3560    pointer (delegate, etc.) to callback function which calculates F/G.
3561 
3562 5. User calls MinBLEICResults() to get solution
3563 
3564 6. Optionally user may call MinBLEICRestartFrom() to solve another problem
3565    with same N but another starting point.
3566    MinBLEICRestartFrom() allows to reuse already initialized structure.
3567 
3568 NOTE: if you have box-only constraints (no  general  linear  constraints),
3569       then MinBC optimizer can be better option. It uses  special,  faster
3570       constraint activation method, which performs better on problems with
3571       multiple constraints active at the solution.
3572 
3573       On small-scale problems performance of MinBC is similar to  that  of
3574       MinBLEIC, but on large-scale ones (hundreds and thousands of  active
3575       constraints) it can be several times faster than MinBLEIC.
3576 
3577 INPUT PARAMETERS:
3578     N       -   problem dimension, N>0:
3579                 * if given, only leading N elements of X are used
3580                 * if not given, automatically determined from size ofX
3581     X       -   starting point, array[N]:
3582                 * it is better to set X to a feasible point
3583                 * but X can be infeasible, in which case algorithm will try
3584                   to find feasible point first, using X as initial
3585                   approximation.
3586 
3587 OUTPUT PARAMETERS:
3588     State   -   structure stores algorithm state
3589 
3590   -- ALGLIB --
3591      Copyright 28.11.2010 by Bochkanov Sergey
3592 *************************************************************************/
3593 void minbleiccreate(const ae_int_t n, const real_1d_array &x, minbleicstate &state, const xparams _xparams = alglib::xdefault);
3594 void minbleiccreate(const real_1d_array &x, minbleicstate &state, const xparams _xparams = alglib::xdefault);
3595 
3596 
3597 /*************************************************************************
3598 The subroutine is finite difference variant of MinBLEICCreate().  It  uses
3599 finite differences in order to differentiate target function.
3600 
3601 Description below contains information which is specific to  this function
3602 only. We recommend to read comments on MinBLEICCreate() in  order  to  get
3603 more information about creation of BLEIC optimizer.
3604 
3605 INPUT PARAMETERS:
3606     N       -   problem dimension, N>0:
3607                 * if given, only leading N elements of X are used
3608                 * if not given, automatically determined from size of X
3609     X       -   starting point, array[0..N-1].
3610     DiffStep-   differentiation step, >0
3611 
3612 OUTPUT PARAMETERS:
3613     State   -   structure which stores algorithm state
3614 
3615 NOTES:
3616 1. algorithm uses 4-point central formula for differentiation.
3617 2. differentiation step along I-th axis is equal to DiffStep*S[I] where
3618    S[] is scaling vector which can be set by MinBLEICSetScale() call.
3619 3. we recommend you to use moderate values of  differentiation  step.  Too
3620    large step will result in too large truncation  errors, while too small
3621    step will result in too large numerical  errors.  1.0E-6  can  be  good
3622    value to start with.
3623 4. Numerical  differentiation  is   very   inefficient  -   one   gradient
3624    calculation needs 4*N function evaluations. This function will work for
3625    any N - either small (1...10), moderate (10...100) or  large  (100...).
3626    However, performance penalty will be too severe for any N's except  for
3627    small ones.
3628    We should also say that code which relies on numerical  differentiation
3629    is  less  robust and precise. CG needs exact gradient values. Imprecise
3630    gradient may slow  down  convergence, especially  on  highly  nonlinear
3631    problems.
3632    Thus  we  recommend to use this function for fast prototyping on small-
3633    dimensional problems only, and to implement analytical gradient as soon
3634    as possible.
3635 
3636   -- ALGLIB --
3637      Copyright 16.05.2011 by Bochkanov Sergey
3638 *************************************************************************/
3639 void minbleiccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minbleicstate &state, const xparams _xparams = alglib::xdefault);
3640 void minbleiccreatef(const real_1d_array &x, const double diffstep, minbleicstate &state, const xparams _xparams = alglib::xdefault);
3641 
3642 
3643 /*************************************************************************
3644 This function sets boundary constraints for BLEIC optimizer.
3645 
3646 Boundary constraints are inactive by default (after initial creation).
3647 They are preserved after algorithm restart with MinBLEICRestartFrom().
3648 
3649 NOTE: if you have box-only constraints (no  general  linear  constraints),
3650       then MinBC optimizer can be better option. It uses  special,  faster
3651       constraint activation method, which performs better on problems with
3652       multiple constraints active at the solution.
3653 
3654       On small-scale problems performance of MinBC is similar to  that  of
3655       MinBLEIC, but on large-scale ones (hundreds and thousands of  active
3656       constraints) it can be several times faster than MinBLEIC.
3657 
3658 INPUT PARAMETERS:
3659     State   -   structure stores algorithm state
3660     BndL    -   lower bounds, array[N].
3661                 If some (all) variables are unbounded, you may specify
3662                 very small number or -INF.
3663     BndU    -   upper bounds, array[N].
3664                 If some (all) variables are unbounded, you may specify
3665                 very large number or +INF.
3666 
3667 NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
3668 variable will be "frozen" at X[i]=BndL[i]=BndU[i].
3669 
3670 NOTE 2: this solver has following useful properties:
3671 * bound constraints are always satisfied exactly
3672 * function is evaluated only INSIDE area specified by  bound  constraints,
3673   even  when  numerical  differentiation is used (algorithm adjusts  nodes
3674   according to boundary constraints)
3675 
3676   -- ALGLIB --
3677      Copyright 28.11.2010 by Bochkanov Sergey
3678 *************************************************************************/
3679 void minbleicsetbc(const minbleicstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams = alglib::xdefault);
3680 
3681 
3682 /*************************************************************************
3683 This function sets linear constraints for BLEIC optimizer.
3684 
3685 Linear constraints are inactive by default (after initial creation).
3686 They are preserved after algorithm restart with MinBLEICRestartFrom().
3687 
3688 INPUT PARAMETERS:
3689     State   -   structure previously allocated with MinBLEICCreate call.
3690     C       -   linear constraints, array[K,N+1].
3691                 Each row of C represents one constraint, either equality
3692                 or inequality (see below):
3693                 * first N elements correspond to coefficients,
3694                 * last element corresponds to the right part.
3695                 All elements of C (including right part) must be finite.
3696     CT      -   type of constraints, array[K]:
3697                 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n]
3698                 * if CT[i]=0, then I-th constraint is C[i,*]*x  = C[i,n]
3699                 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n]
3700     K       -   number of equality/inequality constraints, K>=0:
3701                 * if given, only leading K elements of C/CT are used
3702                 * if not given, automatically determined from sizes of C/CT
3703 
3704 NOTE 1: linear (non-bound) constraints are satisfied only approximately:
3705 * there always exists some minor violation (about Epsilon in magnitude)
3706   due to rounding errors
3707 * numerical differentiation, if used, may  lead  to  function  evaluations
3708   outside  of the feasible  area,   because   algorithm  does  NOT  change
3709   numerical differentiation formula according to linear constraints.
3710 If you want constraints to be  satisfied  exactly, try to reformulate your
3711 problem  in  such  manner  that  all constraints will become boundary ones
3712 (this kind of constraints is always satisfied exactly, both in  the  final
3713 solution and in all intermediate points).
3714 
3715   -- ALGLIB --
3716      Copyright 28.11.2010 by Bochkanov Sergey
3717 *************************************************************************/
3718 void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams = alglib::xdefault);
3719 void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, const integer_1d_array &ct, const xparams _xparams = alglib::xdefault);
3720 
3721 
3722 /*************************************************************************
3723 This function sets stopping conditions for the optimizer.
3724 
3725 INPUT PARAMETERS:
3726     State   -   structure which stores algorithm state
3727     EpsG    -   >=0
3728                 The  subroutine  finishes  its  work   if   the  condition
3729                 |v|<EpsG is satisfied, where:
3730                 * |.| means Euclidian norm
3731                 * v - scaled gradient vector, v[i]=g[i]*s[i]
3732                 * g - gradient
3733                 * s - scaling coefficients set by MinBLEICSetScale()
3734     EpsF    -   >=0
3735                 The  subroutine  finishes  its work if on k+1-th iteration
3736                 the  condition  |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
3737                 is satisfied.
3738     EpsX    -   >=0
3739                 The subroutine finishes its work if  on  k+1-th  iteration
3740                 the condition |v|<=EpsX is fulfilled, where:
3741                 * |.| means Euclidian norm
3742                 * v - scaled step vector, v[i]=dx[i]/s[i]
3743                 * dx - step vector, dx=X(k+1)-X(k)
3744                 * s - scaling coefficients set by MinBLEICSetScale()
3745     MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
3746                 iterations is unlimited.
3747 
3748 Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
3749 to automatic stopping criterion selection.
3750 
3751 NOTE: when SetCond() called with non-zero MaxIts, BLEIC solver may perform
3752       slightly more than MaxIts iterations. I.e., MaxIts  sets  non-strict
3753       limit on iterations count.
3754 
3755   -- ALGLIB --
3756      Copyright 28.11.2010 by Bochkanov Sergey
3757 *************************************************************************/
3758 void minbleicsetcond(const minbleicstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
3759 
3760 
3761 /*************************************************************************
3762 This function sets scaling coefficients for BLEIC optimizer.
3763 
3764 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
3765 size and gradient are scaled before comparison with tolerances).  Scale of
3766 the I-th variable is a translation invariant measure of:
3767 a) "how large" the variable is
3768 b) how large the step should be to make significant changes in the function
3769 
3770 Scaling is also used by finite difference variant of the optimizer  - step
3771 along I-th axis is equal to DiffStep*S[I].
3772 
3773 In  most  optimizers  (and  in  the  BLEIC  too)  scaling is NOT a form of
3774 preconditioning. It just  affects  stopping  conditions.  You  should  set
3775 preconditioner  by  separate  call  to  one  of  the  MinBLEICSetPrec...()
3776 functions.
3777 
3778 There is a special  preconditioning  mode, however,  which  uses   scaling
3779 coefficients to form diagonal preconditioning matrix. You  can  turn  this
3780 mode on, if you want.   But  you should understand that scaling is not the
3781 same thing as preconditioning - these are two different, although  related
3782 forms of tuning solver.
3783 
3784 INPUT PARAMETERS:
3785     State   -   structure stores algorithm state
3786     S       -   array[N], non-zero scaling coefficients
3787                 S[i] may be negative, sign doesn't matter.
3788 
3789   -- ALGLIB --
3790      Copyright 14.01.2011 by Bochkanov Sergey
3791 *************************************************************************/
3792 void minbleicsetscale(const minbleicstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
3793 
3794 
3795 /*************************************************************************
3796 Modification of the preconditioner: preconditioning is turned off.
3797 
3798 INPUT PARAMETERS:
3799     State   -   structure which stores algorithm state
3800 
3801   -- ALGLIB --
3802      Copyright 13.10.2010 by Bochkanov Sergey
3803 *************************************************************************/
3804 void minbleicsetprecdefault(const minbleicstate &state, const xparams _xparams = alglib::xdefault);
3805 
3806 
3807 /*************************************************************************
3808 Modification  of  the  preconditioner:  diagonal of approximate Hessian is
3809 used.
3810 
3811 INPUT PARAMETERS:
3812     State   -   structure which stores algorithm state
3813     D       -   diagonal of the approximate Hessian, array[0..N-1],
3814                 (if larger, only leading N elements are used).
3815 
3816 NOTE 1: D[i] should be positive. Exception will be thrown otherwise.
3817 
3818 NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
3819 
3820   -- ALGLIB --
3821      Copyright 13.10.2010 by Bochkanov Sergey
3822 *************************************************************************/
3823 void minbleicsetprecdiag(const minbleicstate &state, const real_1d_array &d, const xparams _xparams = alglib::xdefault);
3824 
3825 
3826 /*************************************************************************
3827 Modification of the preconditioner: scale-based diagonal preconditioning.
3828 
3829 This preconditioning mode can be useful when you  don't  have  approximate
3830 diagonal of Hessian, but you know that your  variables  are  badly  scaled
3831 (for  example,  one  variable is in [1,10], and another in [1000,100000]),
3832 and most part of the ill-conditioning comes from different scales of vars.
3833 
3834 In this case simple  scale-based  preconditioner,  with H[i] = 1/(s[i]^2),
3835 can greatly improve convergence.
3836 
3837 IMPRTANT: you should set scale of your variables  with  MinBLEICSetScale()
3838 call  (before  or after MinBLEICSetPrecScale() call). Without knowledge of
3839 the scale of your variables scale-based preconditioner will be  just  unit
3840 matrix.
3841 
3842 INPUT PARAMETERS:
3843     State   -   structure which stores algorithm state
3844 
3845   -- ALGLIB --
3846      Copyright 13.10.2010 by Bochkanov Sergey
3847 *************************************************************************/
3848 void minbleicsetprecscale(const minbleicstate &state, const xparams _xparams = alglib::xdefault);
3849 
3850 
3851 /*************************************************************************
3852 This function turns on/off reporting.
3853 
3854 INPUT PARAMETERS:
3855     State   -   structure which stores algorithm state
3856     NeedXRep-   whether iteration reports are needed or not
3857 
3858 If NeedXRep is True, algorithm will call rep() callback function if  it is
3859 provided to MinBLEICOptimize().
3860 
3861   -- ALGLIB --
3862      Copyright 28.11.2010 by Bochkanov Sergey
3863 *************************************************************************/
3864 void minbleicsetxrep(const minbleicstate &state, const bool needxrep, const xparams _xparams = alglib::xdefault);
3865 
3866 
3867 /*************************************************************************
3868 This function sets maximum step length
3869 
3870 IMPORTANT: this feature is hard to combine with preconditioning. You can't
3871 set upper limit on step length, when you solve optimization  problem  with
3872 linear (non-boundary) constraints AND preconditioner turned on.
3873 
3874 When  non-boundary  constraints  are  present,  you  have to either a) use
3875 preconditioner, or b) use upper limit on step length.  YOU CAN'T USE BOTH!
3876 In this case algorithm will terminate with appropriate error code.
3877 
3878 INPUT PARAMETERS:
3879     State   -   structure which stores algorithm state
3880     StpMax  -   maximum step length, >=0. Set StpMax to 0.0,  if you don't
3881                 want to limit step length.
3882 
3883 Use this subroutine when you optimize target function which contains exp()
3884 or  other  fast  growing  functions,  and optimization algorithm makes too
3885 large  steps  which  lead   to overflow. This function allows us to reject
3886 steps  that  are  too  large  (and  therefore  expose  us  to the possible
3887 overflow) without actually calculating function value at the x+stp*d.
3888 
3889   -- ALGLIB --
3890      Copyright 02.04.2010 by Bochkanov Sergey
3891 *************************************************************************/
3892 void minbleicsetstpmax(const minbleicstate &state, const double stpmax, const xparams _xparams = alglib::xdefault);
3893 
3894 
3895 /*************************************************************************
3896 This function provides reverse communication interface
3897 Reverse communication interface is not documented or recommended to use.
3898 See below for functions which provide better documented API
3899 *************************************************************************/
3900 bool minbleiciteration(const minbleicstate &state, const xparams _xparams = alglib::xdefault);
3901 
3902 
3903 /*************************************************************************
3904 This family of functions is used to launcn iterations of nonlinear optimizer
3905 
3906 These functions accept following parameters:
3907     state   -   algorithm state
3908     func    -   callback which calculates function (or merit function)
3909                 value func at given point x
3910     grad    -   callback which calculates function (or merit function)
3911                 value func and gradient grad at given point x
3912     rep     -   optional callback which is called after each iteration
3913                 can be NULL
3914     ptr     -   optional pointer which is passed to func/grad/hess/jac/rep
3915                 can be NULL
3916 
3917 NOTES:
3918 
3919 1. This function has two different implementations: one which  uses  exact
3920    (analytical) user-supplied gradient,  and one which uses function value
3921    only  and  numerically  differentiates  function  in  order  to  obtain
3922    gradient.
3923 
3924    Depending  on  the  specific  function  used to create optimizer object
3925    (either  MinBLEICCreate() for analytical gradient or  MinBLEICCreateF()
3926    for numerical differentiation) you should choose appropriate variant of
3927    MinBLEICOptimize() - one  which  accepts  function  AND gradient or one
3928    which accepts function ONLY.
3929 
3930    Be careful to choose variant of MinBLEICOptimize() which corresponds to
3931    your optimization scheme! Table below lists different  combinations  of
3932    callback (function/gradient) passed to MinBLEICOptimize()  and specific
3933    function used to create optimizer.
3934 
3935 
3936                      |         USER PASSED TO MinBLEICOptimize()
3937    CREATED WITH      |  function only   |  function and gradient
3938    ------------------------------------------------------------
3939    MinBLEICCreateF() |     work                FAIL
3940    MinBLEICCreate()  |     FAIL                work
3941 
3942    Here "FAIL" denotes inappropriate combinations  of  optimizer  creation
3943    function  and  MinBLEICOptimize()  version.   Attemps   to   use   such
3944    combination (for  example,  to  create optimizer with MinBLEICCreateF()
3945    and  to  pass  gradient information to MinBLEICOptimize()) will lead to
3946    exception being thrown. Either  you  did  not pass gradient when it WAS
3947    needed or you passed gradient when it was NOT needed.
3948 
3949   -- ALGLIB --
3950      Copyright 28.11.2010 by Bochkanov Sergey
3951 
3952 *************************************************************************/
3953 void minbleicoptimize(minbleicstate &state,
3954     void (*func)(const real_1d_array &x, double &func, void *ptr),
3955     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
3956     void *ptr = NULL,
3957     const xparams _xparams = alglib::xdefault);
3958 void minbleicoptimize(minbleicstate &state,
3959     void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
3960     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
3961     void *ptr = NULL,
3962     const xparams _xparams = alglib::xdefault);
3963 
3964 
3965 /*************************************************************************
3966 This  function  activates/deactivates verification  of  the  user-supplied
3967 analytic gradient.
3968 
3969 Upon  activation  of  this  option  OptGuard  integrity  checker  performs
3970 numerical differentiation of your target function  at  the  initial  point
3971 (note: future versions may also perform check  at  the  final  point)  and
3972 compares numerical gradient with analytic one provided by you.
3973 
3974 If difference is too large, an error flag is set and optimization  session
3975 continues. After optimization session is over, you can retrieve the report
3976 which  stores  both  gradients  and  specific  components  highlighted  as
3977 suspicious by the OptGuard.
3978 
3979 The primary OptGuard report can be retrieved with minbleicoptguardresults().
3980 
3981 IMPORTANT: gradient check is a high-overhead option which  will  cost  you
3982            about 3*N additional function evaluations. In many cases it may
3983            cost as much as the rest of the optimization session.
3984 
3985            YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO
3986            CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY.
3987 
3988 NOTE: unlike previous incarnation of the gradient checking code,  OptGuard
3989       does NOT interrupt optimization even if it discovers bad gradient.
3990 
3991 INPUT PARAMETERS:
3992     State       -   structure used to store algorithm state
3993     TestStep    -   verification step used for numerical differentiation:
3994                     * TestStep=0 turns verification off
3995                     * TestStep>0 activates verification
3996                     You should carefully choose TestStep. Value  which  is
3997                     too large (so large that  function  behavior  is  non-
3998                     cubic at this scale) will lead  to  false  alarms. Too
3999                     short step will result in rounding  errors  dominating
4000                     numerical derivative.
4001 
4002                     You may use different step for different parameters by
4003                     means of setting scale with minbleicsetscale().
4004 
4005 === EXPLANATION ==========================================================
4006 
4007 In order to verify gradient algorithm performs following steps:
4008   * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
4009     where X[i] is i-th component of the initial point and S[i] is a  scale
4010     of i-th parameter
4011   * F(X) is evaluated at these trial points
4012   * we perform one more evaluation in the middle point of the interval
4013   * we  build  cubic  model using function values and derivatives at trial
4014     points and we compare its prediction with actual value in  the  middle
4015     point
4016 
4017   -- ALGLIB --
4018      Copyright 15.06.2014 by Bochkanov Sergey
4019 *************************************************************************/
4020 void minbleicoptguardgradient(const minbleicstate &state, const double teststep, const xparams _xparams = alglib::xdefault);
4021 
4022 
4023 /*************************************************************************
4024 This  function  activates/deactivates nonsmoothness monitoring  option  of
4025 the  OptGuard  integrity  checker. Smoothness  monitor  silently  observes
4026 solution process and tries to detect ill-posed problems, i.e. ones with:
4027 a) discontinuous target function (non-C0)
4028 b) nonsmooth     target function (non-C1)
4029 
4030 Smoothness monitoring does NOT interrupt optimization  even if it suspects
4031 that your problem is nonsmooth. It just sets corresponding  flags  in  the
4032 OptGuard report which can be retrieved after optimization is over.
4033 
4034 Smoothness monitoring is a moderate overhead option which often adds  less
4035 than 1% to the optimizer running time. Thus, you can use it even for large
4036 scale problems.
4037 
4038 NOTE: OptGuard does  NOT  guarantee  that  it  will  always  detect  C0/C1
4039       continuity violations.
4040 
4041       First, minor errors are hard to  catch - say, a 0.0001 difference in
4042       the model values at two sides of the gap may be due to discontinuity
4043       of the model - or simply because the model has changed.
4044 
4045       Second, C1-violations  are  especially  difficult  to  detect  in  a
4046       noninvasive way. The optimizer usually  performs  very  short  steps
4047       near the nonsmoothness, and differentiation  usually   introduces  a
4048       lot of numerical noise.  It  is  hard  to  tell  whether  some  tiny
4049       discontinuity in the slope is due to real nonsmoothness or just  due
4050       to numerical noise alone.
4051 
4052       Our top priority was to avoid false positives, so in some rare cases
4053       minor errors may went unnoticed (however, in most cases they can  be
4054       spotted with restart from different initial point).
4055 
4056 INPUT PARAMETERS:
4057     state   -   algorithm state
4058     level   -   monitoring level:
4059                 * 0 - monitoring is disabled
4060                 * 1 - noninvasive low-overhead monitoring; function values
4061                       and/or gradients are recorded, but OptGuard does not
4062                       try to perform additional evaluations  in  order  to
4063                       get more information about suspicious locations.
4064 
4065 === EXPLANATION ==========================================================
4066 
4067 One major source of headache during optimization  is  the  possibility  of
4068 the coding errors in the target function/constraints (or their gradients).
4069 Such  errors   most   often   manifest   themselves  as  discontinuity  or
4070 nonsmoothness of the target/constraints.
4071 
4072 Another frequent situation is when you try to optimize something involving
4073 lots of min() and max() operations, i.e. nonsmooth target. Although not  a
4074 coding error, it is nonsmoothness anyway - and smooth  optimizers  usually
4075 stop right after encountering nonsmoothness, well before reaching solution.
4076 
4077 OptGuard integrity checker helps you to catch such situations: it monitors
4078 function values/gradients being passed  to  the  optimizer  and  tries  to
4079 errors. Upon discovering suspicious pair of points it  raises  appropriate
4080 flag (and allows you to continue optimization). When optimization is done,
4081 you can study OptGuard result.
4082 
4083   -- ALGLIB --
4084      Copyright 21.11.2018 by Bochkanov Sergey
4085 *************************************************************************/
4086 void minbleicoptguardsmoothness(const minbleicstate &state, const ae_int_t level, const xparams _xparams = alglib::xdefault);
4087 void minbleicoptguardsmoothness(const minbleicstate &state, const xparams _xparams = alglib::xdefault);
4088 
4089 
4090 /*************************************************************************
4091 Results of OptGuard integrity check, should be called  after  optimization
4092 session is over.
4093 
4094 === PRIMARY REPORT =======================================================
4095 
4096 OptGuard performs several checks which are intended to catch common errors
4097 in the implementation of nonlinear function/gradient:
4098 * incorrect analytic gradient
4099 * discontinuous (non-C0) target functions (constraints)
4100 * nonsmooth     (non-C1) target functions (constraints)
4101 
4102 Each of these checks is activated with appropriate function:
4103 * minbleicoptguardgradient() for gradient verification
4104 * minbleicoptguardsmoothness() for C0/C1 checks
4105 
4106 Following flags are set when these errors are suspected:
4107 * rep.badgradsuspected, and additionally:
4108   * rep.badgradvidx for specific variable (gradient element) suspected
4109   * rep.badgradxbase, a point where gradient is tested
4110   * rep.badgraduser, user-provided gradient  (stored  as  2D  matrix  with
4111     single row in order to make  report  structure  compatible  with  more
4112     complex optimizers like MinNLC or MinLM)
4113   * rep.badgradnum,   reference    gradient    obtained    via   numerical
4114     differentiation (stored as  2D matrix with single row in order to make
4115     report structure compatible with more complex optimizers  like  MinNLC
4116     or MinLM)
4117 * rep.nonc0suspected
4118 * rep.nonc1suspected
4119 
4120 === ADDITIONAL REPORTS/LOGS ==============================================
4121 
4122 Several different tests are performed to catch C0/C1 errors, you can  find
4123 out specific test signaled error by looking to:
4124 * rep.nonc0test0positive, for non-C0 test #0
4125 * rep.nonc1test0positive, for non-C1 test #0
4126 * rep.nonc1test1positive, for non-C1 test #1
4127 
4128 Additional information (including line search logs)  can  be  obtained  by
4129 means of:
4130 * minbleicoptguardnonc1test0results()
4131 * minbleicoptguardnonc1test1results()
4132 which return detailed error reports, specific points where discontinuities
4133 were found, and so on.
4134 
4135 ==========================================================================
4136 
4137 INPUT PARAMETERS:
4138     state   -   algorithm state
4139 
4140 OUTPUT PARAMETERS:
4141     rep     -   generic OptGuard report;  more  detailed  reports  can  be
4142                 retrieved with other functions.
4143 
4144 NOTE: false negatives (nonsmooth problems are not identified as  nonsmooth
4145       ones) are possible although unlikely.
4146 
4147       The reason  is  that  you  need  to  make several evaluations around
4148       nonsmoothness  in  order  to  accumulate  enough  information  about
4149       function curvature. Say, if you start right from the nonsmooth point,
4150       optimizer simply won't get enough data to understand what  is  going
4151       wrong before it terminates due to abrupt changes in the  derivative.
4152       It is also  possible  that  "unlucky"  step  will  move  us  to  the
4153       termination too quickly.
4154 
4155       Our current approach is to have less than 0.1%  false  negatives  in
4156       our test examples  (measured  with  multiple  restarts  from  random
4157       points), and to have exactly 0% false positives.
4158 
4159   -- ALGLIB --
4160      Copyright 21.11.2018 by Bochkanov Sergey
4161 *************************************************************************/
4162 void minbleicoptguardresults(const minbleicstate &state, optguardreport &rep, const xparams _xparams = alglib::xdefault);
4163 
4164 
4165 /*************************************************************************
4166 Detailed results of the OptGuard integrity check for nonsmoothness test #0
4167 
4168 Nonsmoothness (non-C1) test #0 studies  function  values  (not  gradient!)
4169 obtained during line searches and monitors  behavior  of  the  directional
4170 derivative estimate.
4171 
4172 This test is less powerful than test #1, but it does  not  depend  on  the
4173 gradient values and thus it is more robust against artifacts introduced by
4174 numerical differentiation.
4175 
4176 Two reports are returned:
4177 * a "strongest" one, corresponding  to  line   search  which  had  highest
4178   value of the nonsmoothness indicator
4179 * a "longest" one, corresponding to line search which  had  more  function
4180   evaluations, and thus is more detailed
4181 
4182 In both cases following fields are returned:
4183 
4184 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
4185   did not notice anything (in the latter cases fields below are empty).
4186 * x0[], d[] - arrays of length N which store initial point  and  direction
4187   for line search (d[] can be normalized, but does not have to)
4188 * stp[], f[] - arrays of length CNT which store step lengths and  function
4189   values at these points; f[i] is evaluated in x0+stp[i]*d.
4190 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
4191   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
4192   with  most  likely  position  of  the  violation  between  stpidxa+1 and
4193   stpidxa+2.
4194 
4195 ==========================================================================
4196 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
4197 =                   see where C1 continuity is violated.
4198 ==========================================================================
4199 
4200 INPUT PARAMETERS:
4201     state   -   algorithm state
4202 
4203 OUTPUT PARAMETERS:
4204     strrep  -   C1 test #0 "strong" report
4205     lngrep  -   C1 test #0 "long" report
4206 
4207   -- ALGLIB --
4208      Copyright 21.11.2018 by Bochkanov Sergey
4209 *************************************************************************/
4210 void minbleicoptguardnonc1test0results(const minbleicstate &state, optguardnonc1test0report &strrep, optguardnonc1test0report &lngrep, const xparams _xparams = alglib::xdefault);
4211 
4212 
4213 /*************************************************************************
4214 Detailed results of the OptGuard integrity check for nonsmoothness test #1
4215 
4216 Nonsmoothness (non-C1)  test  #1  studies  individual  components  of  the
4217 gradient computed during line search.
4218 
4219 When precise analytic gradient is provided this test is more powerful than
4220 test #0  which  works  with  function  values  and  ignores  user-provided
4221 gradient.  However,  test  #0  becomes  more   powerful   when   numerical
4222 differentiation is employed (in such cases test #1 detects  higher  levels
4223 of numerical noise and becomes too conservative).
4224 
4225 This test also tells specific components of the gradient which violate  C1
4226 continuity, which makes it more informative than #0, which just tells that
4227 continuity is violated.
4228 
4229 Two reports are returned:
4230 * a "strongest" one, corresponding  to  line   search  which  had  highest
4231   value of the nonsmoothness indicator
4232 * a "longest" one, corresponding to line search which  had  more  function
4233   evaluations, and thus is more detailed
4234 
4235 In both cases following fields are returned:
4236 
4237 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
4238   did not notice anything (in the latter cases fields below are empty).
4239 * vidx - is an index of the variable in [0,N) with nonsmooth derivative
4240 * x0[], d[] - arrays of length N which store initial point  and  direction
4241   for line search (d[] can be normalized, but does not have to)
4242 * stp[], g[] - arrays of length CNT which store step lengths and  gradient
4243   values at these points; g[i] is evaluated in  x0+stp[i]*d  and  contains
4244   vidx-th component of the gradient.
4245 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
4246   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
4247   with  most  likely  position  of  the  violation  between  stpidxa+1 and
4248   stpidxa+2.
4249 
4250 ==========================================================================
4251 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
4252 =                   see where C1 continuity is violated.
4253 ==========================================================================
4254 
4255 INPUT PARAMETERS:
4256     state   -   algorithm state
4257 
4258 OUTPUT PARAMETERS:
4259     strrep  -   C1 test #1 "strong" report
4260     lngrep  -   C1 test #1 "long" report
4261 
4262   -- ALGLIB --
4263      Copyright 21.11.2018 by Bochkanov Sergey
4264 *************************************************************************/
4265 void minbleicoptguardnonc1test1results(const minbleicstate &state, optguardnonc1test1report &strrep, optguardnonc1test1report &lngrep, const xparams _xparams = alglib::xdefault);
4266 
4267 
4268 /*************************************************************************
4269 BLEIC results
4270 
4271 INPUT PARAMETERS:
4272     State   -   algorithm state
4273 
4274 OUTPUT PARAMETERS:
4275     X       -   array[0..N-1], solution
4276     Rep     -   optimization report. You should check Rep.TerminationType
4277                 in  order  to  distinguish  successful  termination  from
4278                 unsuccessful one:
4279                 * -8    internal integrity control  detected  infinite or
4280                         NAN   values   in   function/gradient.   Abnormal
4281                         termination signalled.
4282                 * -3   inconsistent constraints. Feasible point is
4283                        either nonexistent or too hard to find. Try to
4284                        restart optimizer with better initial approximation
4285                 *  1   relative function improvement is no more than EpsF.
4286                 *  2   scaled step is no more than EpsX.
4287                 *  4   scaled gradient norm is no more than EpsG.
4288                 *  5   MaxIts steps was taken
4289                 *  8   terminated by user who called minbleicrequesttermination().
4290                        X contains point which was "current accepted"  when
4291                        termination request was submitted.
4292                 More information about fields of this  structure  can  be
4293                 found in the comments on MinBLEICReport datatype.
4294 
4295   -- ALGLIB --
4296      Copyright 28.11.2010 by Bochkanov Sergey
4297 *************************************************************************/
4298 void minbleicresults(const minbleicstate &state, real_1d_array &x, minbleicreport &rep, const xparams _xparams = alglib::xdefault);
4299 
4300 
4301 /*************************************************************************
4302 BLEIC results
4303 
4304 Buffered implementation of MinBLEICResults() which uses pre-allocated buffer
4305 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
4306 intended to be used in the inner cycles of performance critical algorithms
4307 where array reallocation penalty is too large to be ignored.
4308 
4309   -- ALGLIB --
4310      Copyright 28.11.2010 by Bochkanov Sergey
4311 *************************************************************************/
4312 void minbleicresultsbuf(const minbleicstate &state, real_1d_array &x, minbleicreport &rep, const xparams _xparams = alglib::xdefault);
4313 
4314 
4315 /*************************************************************************
4316 This subroutine restarts algorithm from new point.
4317 All optimization parameters (including constraints) are left unchanged.
4318 
4319 This  function  allows  to  solve multiple  optimization  problems  (which
4320 must have  same number of dimensions) without object reallocation penalty.
4321 
4322 INPUT PARAMETERS:
4323     State   -   structure previously allocated with MinBLEICCreate call.
4324     X       -   new starting point.
4325 
4326   -- ALGLIB --
4327      Copyright 28.11.2010 by Bochkanov Sergey
4328 *************************************************************************/
4329 void minbleicrestartfrom(const minbleicstate &state, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
4330 
4331 
4332 /*************************************************************************
4333 This subroutine submits request for termination of running  optimizer.  It
4334 should be called from user-supplied callback when user decides that it  is
4335 time to "smoothly" terminate optimization process.  As  result,  optimizer
4336 stops at point which was "current accepted" when termination  request  was
4337 submitted and returns error code 8 (successful termination).
4338 
4339 INPUT PARAMETERS:
4340     State   -   optimizer structure
4341 
4342 NOTE: after  request  for  termination  optimizer  may   perform   several
4343       additional calls to user-supplied callbacks. It does  NOT  guarantee
4344       to stop immediately - it just guarantees that these additional calls
4345       will be discarded later.
4346 
4347 NOTE: calling this function on optimizer which is NOT running will have no
4348       effect.
4349 
4350 NOTE: multiple calls to this function are possible. First call is counted,
4351       subsequent calls are silently ignored.
4352 
4353   -- ALGLIB --
4354      Copyright 08.10.2014 by Bochkanov Sergey
4355 *************************************************************************/
4356 void minbleicrequesttermination(const minbleicstate &state, const xparams _xparams = alglib::xdefault);
4357 #endif
4358 
4359 #if defined(AE_COMPILE_QPBLEICSOLVER) || !defined(AE_PARTIAL_BUILD)
4360 
4361 #endif
4362 
4363 #if defined(AE_COMPILE_MINQP) || !defined(AE_PARTIAL_BUILD)
4364 /*************************************************************************
4365                     CONSTRAINED QUADRATIC PROGRAMMING
4366 
4367 The subroutine creates QP optimizer. After initial creation,  it  contains
4368 default optimization problem with zero quadratic and linear terms  and  no
4369 constraints. You should set quadratic/linear terms with calls to functions
4370 provided by MinQP subpackage.
4371 
4372 You should also choose appropriate QP solver and set it  and  its stopping
4373 criteria by means of MinQPSetAlgo??????() function. Then, you should start
4374 solution process by means of MinQPOptimize() call. Solution itself can  be
4375 obtained with MinQPResults() function.
4376 
4377 Following solvers are recommended:
4378 * QuickQP for dense problems with box-only constraints (or no constraints
4379   at all)
4380 * QP-BLEIC for dense/sparse problems with moderate (up to 50) number of
4381   general linear constraints
4382 * DENSE-AUL-QP for dense problems with any (small or large) number of
4383   general linear constraints
4384 
4385 INPUT PARAMETERS:
4386     N       -   problem size
4387 
4388 OUTPUT PARAMETERS:
4389     State   -   optimizer with zero quadratic/linear terms
4390                 and no constraints
4391 
4392   -- ALGLIB --
4393      Copyright 11.01.2011 by Bochkanov Sergey
4394 *************************************************************************/
4395 void minqpcreate(const ae_int_t n, minqpstate &state, const xparams _xparams = alglib::xdefault);
4396 
4397 
4398 /*************************************************************************
4399 This function sets linear term for QP solver.
4400 
4401 By default, linear term is zero.
4402 
4403 INPUT PARAMETERS:
4404     State   -   structure which stores algorithm state
4405     B       -   linear term, array[N].
4406 
4407   -- ALGLIB --
4408      Copyright 11.01.2011 by Bochkanov Sergey
4409 *************************************************************************/
4410 void minqpsetlinearterm(const minqpstate &state, const real_1d_array &b, const xparams _xparams = alglib::xdefault);
4411 
4412 
4413 /*************************************************************************
4414 This  function  sets  dense  quadratic  term  for  QP solver. By  default,
4415 quadratic term is zero.
4416 
4417 SUPPORT BY QP SOLVERS:
4418 
4419 Dense quadratic term can be handled by following QP solvers:
4420 * QuickQP
4421 * BLEIC-QP
4422 * Dense-AUL-QP
4423 
4424 IMPORTANT:
4425 
4426 This solver minimizes following  function:
4427     f(x) = 0.5*x'*A*x + b'*x.
4428 Note that quadratic term has 0.5 before it. So if  you  want  to  minimize
4429     f(x) = x^2 + x
4430 you should rewrite your problem as follows:
4431     f(x) = 0.5*(2*x^2) + x
4432 and your matrix A will be equal to [[2.0]], not to [[1.0]]
4433 
4434 INPUT PARAMETERS:
4435     State   -   structure which stores algorithm state
4436     A       -   matrix, array[N,N]
4437     IsUpper -   (optional) storage type:
4438                 * if True, symmetric matrix  A  is  given  by  its  upper
4439                   triangle, and the lower triangle isn't used
4440                 * if False, symmetric matrix  A  is  given  by  its lower
4441                   triangle, and the upper triangle isn't used
4442                 * if not given, both lower and upper  triangles  must  be
4443                   filled.
4444 
4445   -- ALGLIB --
4446      Copyright 11.01.2011 by Bochkanov Sergey
4447 *************************************************************************/
4448 void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a, const bool isupper, const xparams _xparams = alglib::xdefault);
4449 void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a, const xparams _xparams = alglib::xdefault);
4450 
4451 
4452 /*************************************************************************
4453 This  function  sets  sparse  quadratic  term  for  QP solver. By default,
4454 quadratic  term  is  zero.  This  function  overrides  previous  calls  to
4455 minqpsetquadraticterm() or minqpsetquadratictermsparse().
4456 
4457 SUPPORT BY QP SOLVERS:
4458 
4459 Sparse quadratic term can be handled by following QP solvers:
4460 * QuickQP
4461 * BLEIC-QP
4462 * Dense-AUL-QP (internally converts sparse matrix to dense format)
4463 
4464 IMPORTANT:
4465 
4466 This solver minimizes following  function:
4467     f(x) = 0.5*x'*A*x + b'*x.
4468 Note that quadratic term has 0.5 before it. So if  you  want  to  minimize
4469     f(x) = x^2 + x
4470 you should rewrite your problem as follows:
4471     f(x) = 0.5*(2*x^2) + x
4472 and your matrix A will be equal to [[2.0]], not to [[1.0]]
4473 
4474 INPUT PARAMETERS:
4475     State   -   structure which stores algorithm state
4476     A       -   matrix, array[N,N]
4477     IsUpper -   (optional) storage type:
4478                 * if True, symmetric matrix  A  is  given  by  its  upper
4479                   triangle, and the lower triangle isn't used
4480                 * if False, symmetric matrix  A  is  given  by  its lower
4481                   triangle, and the upper triangle isn't used
4482                 * if not given, both lower and upper  triangles  must  be
4483                   filled.
4484 
4485   -- ALGLIB --
4486      Copyright 11.01.2011 by Bochkanov Sergey
4487 *************************************************************************/
4488 void minqpsetquadratictermsparse(const minqpstate &state, const sparsematrix &a, const bool isupper, const xparams _xparams = alglib::xdefault);
4489 
4490 
4491 /*************************************************************************
4492 This function sets starting point for QP solver. It is useful to have
4493 good initial approximation to the solution, because it will increase
4494 speed of convergence and identification of active constraints.
4495 
4496 INPUT PARAMETERS:
4497     State   -   structure which stores algorithm state
4498     X       -   starting point, array[N].
4499 
4500   -- ALGLIB --
4501      Copyright 11.01.2011 by Bochkanov Sergey
4502 *************************************************************************/
4503 void minqpsetstartingpoint(const minqpstate &state, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
4504 
4505 
4506 /*************************************************************************
4507 This  function sets origin for QP solver. By default, following QP program
4508 is solved:
4509 
4510     min(0.5*x'*A*x+b'*x)
4511 
4512 This function allows to solve different problem:
4513 
4514     min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin))
4515 
4516 Specification of non-zero origin affects function being minimized, but not
4517 constraints. Box and  linear  constraints  are  still  calculated  without
4518 origin.
4519 
4520 INPUT PARAMETERS:
4521     State   -   structure which stores algorithm state
4522     XOrigin -   origin, array[N].
4523 
4524   -- ALGLIB --
4525      Copyright 11.01.2011 by Bochkanov Sergey
4526 *************************************************************************/
4527 void minqpsetorigin(const minqpstate &state, const real_1d_array &xorigin, const xparams _xparams = alglib::xdefault);
4528 
4529 
4530 /*************************************************************************
4531 This function sets scaling coefficients.
4532 
4533 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
4534 size and gradient are scaled before comparison  with  tolerances)  and  as
4535 preconditioner.
4536 
4537 Scale of the I-th variable is a translation invariant measure of:
4538 a) "how large" the variable is
4539 b) how large the step should be to make significant changes in the
4540    function
4541 
4542 If you do not know how to choose scales of your variables, you can:
4543 * read www.alglib.net/optimization/scaling.php article
4544 * use minqpsetscaleautodiag(), which calculates scale  using  diagonal  of
4545   the  quadratic  term:  S  is  set to 1/sqrt(diag(A)), which works well
4546   sometimes.
4547 
4548 INPUT PARAMETERS:
4549     State   -   structure stores algorithm state
4550     S       -   array[N], non-zero scaling coefficients
4551                 S[i] may be negative, sign doesn't matter.
4552 
4553   -- ALGLIB --
4554      Copyright 14.01.2011 by Bochkanov Sergey
4555 *************************************************************************/
4556 void minqpsetscale(const minqpstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
4557 
4558 
4559 /*************************************************************************
4560 This function sets automatic evaluation of variable scaling.
4561 
4562 IMPORTANT: this function works only for  matrices  with positive  diagonal
4563            elements! Zero or negative elements will  result  in  -9  error
4564            code  being  returned.  Specify  scale  vector  manually   with
4565            minqpsetscale() in such cases.
4566 
4567 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
4568 size and gradient are scaled before comparison  with  tolerances)  and  as
4569 preconditioner.
4570 
4571 The  best  way  to  set  scaling  is  to manually specify variable scales.
4572 However, sometimes you just need quick-and-dirty solution  -  either  when
4573 you perform fast prototyping, or when you know your problem well  and  you
4574 are 100% sure that this quick solution is robust enough in your case.
4575 
4576 One such solution is to evaluate scale of I-th variable as 1/Sqrt(A[i,i]),
4577 where A[i,i] is an I-th diagonal element of the quadratic term.
4578 
4579 Such approach works well sometimes, but you have to be careful here.
4580 
4581 INPUT PARAMETERS:
4582     State   -   structure stores algorithm state
4583 
4584   -- ALGLIB --
4585      Copyright 26.12.2017 by Bochkanov Sergey
4586 *************************************************************************/
4587 void minqpsetscaleautodiag(const minqpstate &state, const xparams _xparams = alglib::xdefault);
4588 
4589 
4590 /*************************************************************************
4591 This function tells solver to use BLEIC-based algorithm and sets  stopping
4592 criteria for the algorithm.
4593 
4594 This algorithm is fast  enough  for large-scale  problems  with  following
4595 properties:
4596 a) feasible initial point, moderate amount of general linear constraints
4597 b) arbitrary (can be infeasible) initial point, small  amount  of  general
4598    linear constraints (say, hundred or less)
4599 
4600 If you solve large-scale QP problem with many inequality  constraints  and
4601 without initial feasibility guarantees, consider  using  DENSE-AUL  solver
4602 instead. Initial feasibility detection stage by BLEIC may take too long on
4603 such problems.
4604 
4605 ALGORITHM FEATURES:
4606 
4607 * supports dense and sparse QP problems
4608 * supports box and general linear equality/inequality constraints
4609 * can solve all types of problems  (convex,  semidefinite,  nonconvex)  as
4610   long as they are bounded from below under constraints.
4611   Say, it is possible to solve "min{-x^2} subject to -1<=x<=+1".
4612   Of course, global  minimum  is found only  for  positive  definite   and
4613   semidefinite  problems.  As  for indefinite ones - only local minimum is
4614   found.
4615 
4616 ALGORITHM OUTLINE:
4617 
4618 * BLEIC-QP solver is just a driver function for MinBLEIC solver; it solves
4619   quadratic  programming   problem   as   general   linearly   constrained
4620   optimization problem, which is solved by means of BLEIC solver  (part of
4621   ALGLIB, active set method).
4622 
4623 ALGORITHM LIMITATIONS:
4624 * This algorithm is inefficient on  problems with hundreds  and  thousands
4625   of general inequality constraints and infeasible initial point.  Initial
4626   feasibility detection stage may take too long on such constraint sets.
4627   Consider using DENSE-AUL instead.
4628 * unlike QuickQP solver, this algorithm does not perform Newton steps  and
4629   does not use Level 3 BLAS. Being general-purpose active set  method,  it
4630   can activate constraints only one-by-one. Thus, its performance is lower
4631   than that of QuickQP.
4632 * its precision is also a bit  inferior  to  that  of   QuickQP.  BLEIC-QP
4633   performs only LBFGS steps (no Newton steps), which are good at detecting
4634   neighborhood of the solution, buy needs many iterations to find solution
4635   with more than 6 digits of precision.
4636 
4637 INPUT PARAMETERS:
4638     State   -   structure which stores algorithm state
4639     EpsG    -   >=0
4640                 The  subroutine  finishes  its  work   if   the  condition
4641                 |v|<EpsG is satisfied, where:
4642                 * |.| means Euclidian norm
4643                 * v - scaled constrained gradient vector, v[i]=g[i]*s[i]
4644                 * g - gradient
4645                 * s - scaling coefficients set by MinQPSetScale()
4646     EpsF    -   >=0
4647                 The  subroutine  finishes its work if exploratory steepest
4648                 descent  step  on  k+1-th iteration  satisfies   following
4649                 condition:  |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
4650     EpsX    -   >=0
4651                 The  subroutine  finishes its work if exploratory steepest
4652                 descent  step  on  k+1-th iteration  satisfies   following
4653                 condition:
4654                 * |.| means Euclidian norm
4655                 * v - scaled step vector, v[i]=dx[i]/s[i]
4656                 * dx - step vector, dx=X(k+1)-X(k)
4657                 * s - scaling coefficients set by MinQPSetScale()
4658     MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
4659                 iterations is unlimited. NOTE: this  algorithm uses  LBFGS
4660                 iterations,  which  are  relatively  cheap,  but   improve
4661                 function value only a bit. So you will need many iterations
4662                 to converge - from 0.1*N to 10*N, depending  on  problem's
4663                 condition number.
4664 
4665 IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS  ALGORITHM
4666 BECAUSE ITS STOPPING CRITERIA ARE SCALE-DEPENDENT!
4667 
4668 Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
4669 to automatic stopping criterion selection (presently it is  small    step
4670 length, but it may change in the future versions of ALGLIB).
4671 
4672   -- ALGLIB --
4673      Copyright 11.01.2011 by Bochkanov Sergey
4674 *************************************************************************/
4675 void minqpsetalgobleic(const minqpstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
4676 
4677 
4678 /*************************************************************************
4679 This function tells QP solver to use Dense-AUL algorithm and sets stopping
4680 criteria for the algorithm.
4681 
4682 ALGORITHM FEATURES:
4683 
4684 * supports  box  and  dense/sparse  general   linear   equality/inequality
4685   constraints
4686 * convergence is theoretically proved for positive-definite  (convex)   QP
4687   problems. Semidefinite and non-convex problems can be solved as long  as
4688   they  are   bounded  from  below  under  constraints,  although  without
4689   theoretical guarantees.
4690 * this solver is better than QP-BLEIC on problems  with  large  number  of
4691   general linear constraints. It better handles infeasible initial points.
4692 
4693 ALGORITHM OUTLINE:
4694 
4695 * this  algorithm   is   an   augmented   Lagrangian   method  with  dense
4696   preconditioner (hence  its  name).  It  is  similar  to  barrier/penalty
4697   methods, but much more precise and faster.
4698 * it performs several outer iterations in order to refine  values  of  the
4699   Lagrange multipliers. Single outer  iteration  is  a  solution  of  some
4700   unconstrained optimization problem: first  it  performs  dense  Cholesky
4701   factorization of the Hessian in order to build preconditioner  (adaptive
4702   regularization is applied to enforce positive  definiteness),  and  then
4703   it uses L-BFGS optimizer to solve optimization problem.
4704 * typically you need about 5-10 outer iterations to converge to solution
4705 
4706 ALGORITHM LIMITATIONS:
4707 
4708 * because dense Cholesky driver is used, this algorithm has O(N^2)  memory
4709   requirements and O(OuterIterations*N^3) minimum running time.  From  the
4710   practical  point  of  view,  it  limits  its  applicability  by  several
4711   thousands of variables.
4712   From  the  other  side,  variables  count  is  the most limiting factor,
4713   and dependence on constraint count is  much  more  lower. Assuming  that
4714   constraint matrix is sparse, it may handle tens of thousands  of general
4715   linear constraints.
4716 
4717 INPUT PARAMETERS:
4718     State   -   structure which stores algorithm state
4719     EpsX    -   >=0, stopping criteria for inner optimizer.
4720                 Inner  iterations  are  stopped  when  step  length  (with
4721                 variable scaling being applied) is less than EpsX.
4722                 See  minqpsetscale()  for  more  information  on  variable
4723                 scaling.
4724     Rho     -   penalty coefficient, Rho>0:
4725                 * large enough  that  algorithm  converges  with   desired
4726                   precision.
4727                 * not TOO large to prevent ill-conditioning
4728                 * recommended values are 100, 1000 or 10000
4729     ItsCnt  -   number of outer iterations:
4730                 * recommended values: 10-15 (although  in  most  cases  it
4731                   converges within 5 iterations, you may need a  few  more
4732                   to be sure).
4733                 * ItsCnt=0 means that small number of outer iterations  is
4734                   automatically chosen (10 iterations in current version).
4735                 * ItsCnt=1 means that AUL algorithm performs just as usual
4736                   penalty method.
4737                 * ItsCnt>1 means that  AUL  algorithm  performs  specified
4738                   number of outer iterations
4739 
4740 IT IS VERY IMPORTANT TO CALL minqpsetscale() WHEN YOU USE THIS  ALGORITHM
4741 BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING CRITERIA ARE SCALE-DEPENDENT!
4742 
4743 NOTE: Passing  EpsX=0  will  lead  to  automatic  step  length  selection
4744       (specific step length chosen may change in the future  versions  of
4745       ALGLIB, so it is better to specify step length explicitly).
4746 
4747   -- ALGLIB --
4748      Copyright 20.08.2016 by Bochkanov Sergey
4749 *************************************************************************/
4750 void minqpsetalgodenseaul(const minqpstate &state, const double epsx, const double rho, const ae_int_t itscnt, const xparams _xparams = alglib::xdefault);
4751 
4752 
4753 /*************************************************************************
4754 This function tells solver to use QuickQP  algorithm:  special  extra-fast
4755 algorithm for problems with box-only constrants. It may  solve  non-convex
4756 problems as long as they are bounded from below under constraints.
4757 
4758 ALGORITHM FEATURES:
4759 * many times (from 5x to 50x!) faster than BLEIC-based QP solver; utilizes
4760   accelerated methods for activation of constraints.
4761 * supports dense and sparse QP problems
4762 * supports ONLY box constraints; general linear constraints are NOT
4763   supported by this solver
4764 * can solve all types of problems  (convex,  semidefinite,  nonconvex)  as
4765   long as they are bounded from below under constraints.
4766   Say, it is possible to solve "min{-x^2} subject to -1<=x<=+1".
4767   In convex/semidefinite case global minimum  is  returned,  in  nonconvex
4768   case - algorithm returns one of the local minimums.
4769 
4770 ALGORITHM OUTLINE:
4771 
4772 * algorithm  performs  two kinds of iterations: constrained CG  iterations
4773   and constrained Newton iterations
4774 * initially it performs small number of constrained CG  iterations,  which
4775   can efficiently activate/deactivate multiple constraints
4776 * after CG phase algorithm tries to calculate Cholesky  decomposition  and
4777   to perform several constrained Newton steps. If  Cholesky  decomposition
4778   failed (matrix is indefinite even under constraints),  we  perform  more
4779   CG iterations until we converge to such set of constraints  that  system
4780   matrix becomes  positive  definite.  Constrained  Newton  steps  greatly
4781   increase convergence speed and precision.
4782 * algorithm interleaves CG and Newton iterations which  allows  to  handle
4783   indefinite matrices (CG phase) and quickly converge after final  set  of
4784   constraints is found (Newton phase). Combination of CG and Newton phases
4785   is called "outer iteration".
4786 * it is possible to turn off Newton  phase  (beneficial  for  semidefinite
4787   problems - Cholesky decomposition will fail too often)
4788 
4789 ALGORITHM LIMITATIONS:
4790 
4791 * algorithm does not support general  linear  constraints;  only  box ones
4792   are supported
4793 * Cholesky decomposition for sparse problems  is  performed  with  Skyline
4794   Cholesky solver, which is intended for low-profile matrices. No profile-
4795   reducing reordering of variables is performed in this version of ALGLIB.
4796 * problems with near-zero negative eigenvalues (or exacty zero  ones)  may
4797   experience about 2-3x performance penalty. The reason is  that  Cholesky
4798   decomposition can not be performed until we identify directions of  zero
4799   and negative curvature and activate corresponding boundary constraints -
4800   but we need a lot of trial and errors because these directions  are hard
4801   to notice in the matrix spectrum.
4802   In this case you may turn off Newton phase of algorithm.
4803   Large negative eigenvalues  are  not  an  issue,  so  highly  non-convex
4804   problems can be solved very efficiently.
4805 
4806 INPUT PARAMETERS:
4807     State   -   structure which stores algorithm state
4808     EpsG    -   >=0
4809                 The  subroutine  finishes  its  work   if   the  condition
4810                 |v|<EpsG is satisfied, where:
4811                 * |.| means Euclidian norm
4812                 * v - scaled constrained gradient vector, v[i]=g[i]*s[i]
4813                 * g - gradient
4814                 * s - scaling coefficients set by MinQPSetScale()
4815     EpsF    -   >=0
4816                 The  subroutine  finishes its work if exploratory steepest
4817                 descent  step  on  k+1-th iteration  satisfies   following
4818                 condition:  |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
4819     EpsX    -   >=0
4820                 The  subroutine  finishes its work if exploratory steepest
4821                 descent  step  on  k+1-th iteration  satisfies   following
4822                 condition:
4823                 * |.| means Euclidian norm
4824                 * v - scaled step vector, v[i]=dx[i]/s[i]
4825                 * dx - step vector, dx=X(k+1)-X(k)
4826                 * s - scaling coefficients set by MinQPSetScale()
4827     MaxOuterIts-maximum number of OUTER iterations.  One  outer  iteration
4828                 includes some amount of CG iterations (from 5 to  ~N)  and
4829                 one or several (usually small amount) Newton steps.  Thus,
4830                 one outer iteration has high cost, but can greatly  reduce
4831                 funcation value.
4832                 Use 0 if you do not want to limit number of outer iterations.
4833     UseNewton-  use Newton phase or not:
4834                 * Newton phase improves performance of  positive  definite
4835                   dense problems (about 2 times improvement can be observed)
4836                 * can result in some performance penalty  on  semidefinite
4837                   or slightly negative definite  problems  -  each  Newton
4838                   phase will bring no improvement (Cholesky failure),  but
4839                   still will require computational time.
4840                 * if you doubt, you can turn off this  phase  -  optimizer
4841                   will retain its most of its high speed.
4842 
4843 IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS  ALGORITHM
4844 BECAUSE ITS STOPPING CRITERIA ARE SCALE-DEPENDENT!
4845 
4846 Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
4847 to automatic stopping criterion selection (presently it is  small    step
4848 length, but it may change in the future versions of ALGLIB).
4849 
4850   -- ALGLIB --
4851      Copyright 22.05.2014 by Bochkanov Sergey
4852 *************************************************************************/
4853 void minqpsetalgoquickqp(const minqpstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxouterits, const bool usenewton, const xparams _xparams = alglib::xdefault);
4854 
4855 
4856 /*************************************************************************
4857 This function sets box constraints for QP solver
4858 
4859 Box constraints are inactive by default (after  initial  creation).  After
4860 being  set,  they  are  preserved until explicitly turned off with another
4861 SetBC() call.
4862 
4863 All QP solvers may handle box constraints.
4864 
4865 INPUT PARAMETERS:
4866     State   -   structure stores algorithm state
4867     BndL    -   lower bounds, array[N].
4868                 If some (all) variables are unbounded, you may specify
4869                 very small number or -INF (latter is recommended because
4870                 it will allow solver to use better algorithm).
4871     BndU    -   upper bounds, array[N].
4872                 If some (all) variables are unbounded, you may specify
4873                 very large number or +INF (latter is recommended because
4874                 it will allow solver to use better algorithm).
4875 
4876 NOTE: it is possible to specify BndL[i]=BndU[i]. In this case I-th
4877 variable will be "frozen" at X[i]=BndL[i]=BndU[i].
4878 
4879   -- ALGLIB --
4880      Copyright 11.01.2011 by Bochkanov Sergey
4881 *************************************************************************/
4882 void minqpsetbc(const minqpstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams = alglib::xdefault);
4883 
4884 
4885 /*************************************************************************
4886 This function sets dense linear constraints for QP optimizer.
4887 
4888 This  function  overrides  results  of  previous  calls  to  minqpsetlc(),
4889 minqpsetlcsparse() and minqpsetlcmixed().  After  call  to  this  function
4890 sparse constraints are dropped, and you have only those constraints  which
4891 were specified in the present call.
4892 
4893 If you want  to  specify  mixed  (with  dense  and  sparse  terms)  linear
4894 constraints, you should call minqpsetlcmixed().
4895 
4896 SUPPORT BY QP SOLVERS:
4897 
4898 Following QP solvers can handle dense linear constraints:
4899 * BLEIC-QP          -   handles them  with  high  precision,  but  may  be
4900                         inefficient for problems with hundreds of constraints
4901 * Dense-AUL-QP      -   handles them with moderate precision (approx. 10^-6),
4902                         may efficiently handle thousands of constraints.
4903 
4904 Following QP solvers can NOT handle dense linear constraints:
4905 * QuickQP           -   can not handle general linear constraints
4906 
4907 INPUT PARAMETERS:
4908     State   -   structure previously allocated with MinQPCreate call.
4909     C       -   linear constraints, array[K,N+1].
4910                 Each row of C represents one constraint, either equality
4911                 or inequality (see below):
4912                 * first N elements correspond to coefficients,
4913                 * last element corresponds to the right part.
4914                 All elements of C (including right part) must be finite.
4915     CT      -   type of constraints, array[K]:
4916                 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
4917                 * if CT[i]=0, then I-th constraint is C[i,*]*x  = C[i,n+1]
4918                 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
4919     K       -   number of equality/inequality constraints, K>=0:
4920                 * if given, only leading K elements of C/CT are used
4921                 * if not given, automatically determined from sizes of C/CT
4922 
4923 NOTE 1: linear (non-bound) constraints are satisfied only approximately  -
4924         there always exists some violation due  to  numerical  errors  and
4925         algorithmic limitations (BLEIC-QP solver is most  precise,  AUL-QP
4926         solver is less precise).
4927 
4928   -- ALGLIB --
4929      Copyright 19.06.2012 by Bochkanov Sergey
4930 *************************************************************************/
4931 void minqpsetlc(const minqpstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams = alglib::xdefault);
4932 void minqpsetlc(const minqpstate &state, const real_2d_array &c, const integer_1d_array &ct, const xparams _xparams = alglib::xdefault);
4933 
4934 
4935 /*************************************************************************
4936 This function sets sparse linear constraints for QP optimizer.
4937 
4938 This  function  overrides  results  of  previous  calls  to  minqpsetlc(),
4939 minqpsetlcsparse() and minqpsetlcmixed().  After  call  to  this  function
4940 dense constraints are dropped, and you have only those  constraints  which
4941 were specified in the present call.
4942 
4943 If you want  to  specify  mixed  (with  dense  and  sparse  terms)  linear
4944 constraints, you should call minqpsetlcmixed().
4945 
4946 SUPPORT BY QP SOLVERS:
4947 
4948 Following QP solvers can handle sparse linear constraints:
4949 * BLEIC-QP          -   handles them  with  high  precision,  but can  not
4950                         utilize their sparsity - sparse constraint  matrix
4951                         is silently converted to dense  format.  Thus,  it
4952                         may be inefficient for problems with  hundreds  of
4953                         constraints.
4954 * Dense-AUL-QP      -   although this solver uses dense linear algebra  to
4955                         calculate   Cholesky   preconditioner,   it    may
4956                         efficiently  handle  sparse  constraints.  It  may
4957                         solve problems  with  hundreds  and  thousands  of
4958                         constraints. The only drawback is  that  precision
4959                         of constraint handling is typically within 1E-4...
4960                         ..1E-6 range.
4961 
4962 Following QP solvers can NOT handle sparse linear constraints:
4963 * QuickQP           -   can not handle general linear constraints
4964 
4965 INPUT PARAMETERS:
4966     State   -   structure previously allocated with MinQPCreate call.
4967     C       -   linear  constraints,  sparse  matrix  with  dimensions  at
4968                 least [K,N+1]. If matrix has  larger  size,  only  leading
4969                 Kx(N+1) rectangle is used.
4970                 Each row of C represents one constraint, either equality
4971                 or inequality (see below):
4972                 * first N elements correspond to coefficients,
4973                 * last element corresponds to the right part.
4974                 All elements of C (including right part) must be finite.
4975     CT      -   type of constraints, array[K]:
4976                 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
4977                 * if CT[i]=0, then I-th constraint is C[i,*]*x  = C[i,n+1]
4978                 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
4979     K       -   number of equality/inequality constraints, K>=0
4980 
4981 NOTE 1: linear (non-bound) constraints are satisfied only approximately  -
4982         there always exists some violation due  to  numerical  errors  and
4983         algorithmic limitations (BLEIC-QP solver is most  precise,  AUL-QP
4984         solver is less precise).
4985 
4986   -- ALGLIB --
4987      Copyright 22.08.2016 by Bochkanov Sergey
4988 *************************************************************************/
4989 void minqpsetlcsparse(const minqpstate &state, const sparsematrix &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams = alglib::xdefault);
4990 
4991 
4992 /*************************************************************************
4993 This function sets mixed linear constraints, which include a set of  dense
4994 rows, and a set of sparse rows.
4995 
4996 This  function  overrides  results  of  previous  calls  to  minqpsetlc(),
4997 minqpsetlcsparse() and minqpsetlcmixed().
4998 
4999 This function may be useful if constraint matrix includes large number  of
5000 both types of rows - dense and sparse. If you have just a few sparse rows,
5001 you  may  represent  them  in  dense  format  without loosing performance.
5002 Similarly, if you have just a few dense rows, you may store them in sparse
5003 format with almost same performance.
5004 
5005 SUPPORT BY QP SOLVERS:
5006 
5007 Following QP solvers can handle mixed dense/sparse linear constraints:
5008 * BLEIC-QP          -   handles them  with  high  precision,  but can  not
5009                         utilize their sparsity - sparse constraint  matrix
5010                         is silently converted to dense  format.  Thus,  it
5011                         may be inefficient for problems with  hundreds  of
5012                         constraints.
5013 * Dense-AUL-QP      -   although this solver uses dense linear algebra  to
5014                         calculate   Cholesky   preconditioner,   it    may
5015                         efficiently  handle  sparse  constraints.  It  may
5016                         solve problems  with  hundreds  and  thousands  of
5017                         constraints. The only drawback is  that  precision
5018                         of constraint handling is typically within 1E-4...
5019                         ..1E-6 range.
5020 
5021 Following QP solvers can NOT handle mixed linear constraints:
5022 * QuickQP           -   can not handle general linear constraints at all
5023 
5024 INPUT PARAMETERS:
5025     State   -   structure previously allocated with MinQPCreate call.
5026     DenseC  -   dense linear constraints, array[K,N+1].
5027                 Each row of DenseC represents one constraint, either equality
5028                 or inequality (see below):
5029                 * first N elements correspond to coefficients,
5030                 * last element corresponds to the right part.
5031                 All elements of DenseC (including right part) must be finite.
5032     DenseCT -   type of constraints, array[K]:
5033                 * if DenseCT[i]>0, then I-th constraint is DenseC[i,*]*x >= DenseC[i,n+1]
5034                 * if DenseCT[i]=0, then I-th constraint is DenseC[i,*]*x  = DenseC[i,n+1]
5035                 * if DenseCT[i]<0, then I-th constraint is DenseC[i,*]*x <= DenseC[i,n+1]
5036     DenseK  -   number of equality/inequality constraints, DenseK>=0
5037     SparseC -   linear  constraints,  sparse  matrix  with  dimensions  at
5038                 least [SparseK,N+1]. If matrix has  larger  size,  only  leading
5039                 SPARSEKx(N+1) rectangle is used.
5040                 Each row of C represents one constraint, either equality
5041                 or inequality (see below):
5042                 * first N elements correspond to coefficients,
5043                 * last element corresponds to the right part.
5044                 All elements of C (including right part) must be finite.
5045     SparseCT-   type of sparse constraints, array[K]:
5046                 * if SparseCT[i]>0, then I-th constraint is SparseC[i,*]*x >= SparseC[i,n+1]
5047                 * if SparseCT[i]=0, then I-th constraint is SparseC[i,*]*x  = SparseC[i,n+1]
5048                 * if SparseCT[i]<0, then I-th constraint is SparseC[i,*]*x <= SparseC[i,n+1]
5049     SparseK -   number of sparse equality/inequality constraints, K>=0
5050 
5051 NOTE 1: linear (non-bound) constraints are satisfied only approximately  -
5052         there always exists some violation due  to  numerical  errors  and
5053         algorithmic limitations (BLEIC-QP solver is most  precise,  AUL-QP
5054         solver is less precise).
5055 
5056   -- ALGLIB --
5057      Copyright 22.08.2016 by Bochkanov Sergey
5058 *************************************************************************/
5059 void minqpsetlcmixed(const minqpstate &state, const real_2d_array &densec, const integer_1d_array &densect, const ae_int_t densek, const sparsematrix &sparsec, const integer_1d_array &sparsect, const ae_int_t sparsek, const xparams _xparams = alglib::xdefault);
5060 
5061 
5062 /*************************************************************************
5063 This function solves quadratic programming problem.
5064 
5065 Prior to calling this function you should choose solver by means of one of
5066 the following functions:
5067 
5068 * minqpsetalgoquickqp()     - for QuickQP solver
5069 * minqpsetalgobleic()       - for BLEIC-QP solver
5070 * minqpsetalgodenseaul()    - for Dense-AUL-QP solver
5071 
5072 These functions also allow you to control stopping criteria of the solver.
5073 If you did not set solver,  MinQP  subpackage  will  automatically  select
5074 solver for your problem and will run it with default stopping criteria.
5075 
5076 However, it is better to set explicitly solver and its stopping criteria.
5077 
5078 INPUT PARAMETERS:
5079     State   -   algorithm state
5080 
5081 You should use MinQPResults() function to access results after calls
5082 to this function.
5083 
5084   -- ALGLIB --
5085      Copyright 11.01.2011 by Bochkanov Sergey.
5086      Special thanks to Elvira Illarionova  for  important  suggestions  on
5087      the linearly constrained QP algorithm.
5088 *************************************************************************/
5089 void minqpoptimize(const minqpstate &state, const xparams _xparams = alglib::xdefault);
5090 
5091 
5092 /*************************************************************************
5093 QP solver results
5094 
5095 INPUT PARAMETERS:
5096     State   -   algorithm state
5097 
5098 OUTPUT PARAMETERS:
5099     X       -   array[0..N-1], solution.
5100                 This array is allocated and initialized only when
5101                 Rep.TerminationType parameter is positive (success).
5102     Rep     -   optimization report. You should check Rep.TerminationType,
5103                 which contains completion code, and you may check  another
5104                 fields which contain another information  about  algorithm
5105                 functioning.
5106 
5107                 Failure codes returned by algorithm are:
5108                 * -9    failure of the automatic scale evaluation:  one of
5109                         the diagonal elements of  the  quadratic  term  is
5110                         non-positive.  Specify variable scales manually!
5111                 * -5    inappropriate solver was used:
5112                         * QuickQP solver for problem with  general  linear
5113                           constraints
5114                 * -4    BLEIC-QP/QuickQP   solver    found   unconstrained
5115                         direction  of   negative  curvature  (function  is
5116                         unbounded from below even under constraints),   no
5117                         meaningful minimum can be found.
5118                 * -3    inconsistent constraints (or maybe  feasible point
5119                         is too  hard  to  find).  If  you  are  sure  that
5120                         constraints are feasible, try to restart optimizer
5121                         with better initial approximation.
5122 
5123                 Completion codes specific for Cholesky algorithm:
5124                 *  4   successful completion
5125 
5126                 Completion codes specific for BLEIC/QuickQP algorithms:
5127                 *  1   relative function improvement is no more than EpsF.
5128                 *  2   scaled step is no more than EpsX.
5129                 *  4   scaled gradient norm is no more than EpsG.
5130                 *  5   MaxIts steps was taken
5131 
5132   -- ALGLIB --
5133      Copyright 11.01.2011 by Bochkanov Sergey
5134 *************************************************************************/
5135 void minqpresults(const minqpstate &state, real_1d_array &x, minqpreport &rep, const xparams _xparams = alglib::xdefault);
5136 
5137 
5138 /*************************************************************************
5139 QP results
5140 
5141 Buffered implementation of MinQPResults() which uses pre-allocated  buffer
5142 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
5143 intended to be used in the inner cycles of performance critical algorithms
5144 where array reallocation penalty is too large to be ignored.
5145 
5146   -- ALGLIB --
5147      Copyright 11.01.2011 by Bochkanov Sergey
5148 *************************************************************************/
5149 void minqpresultsbuf(const minqpstate &state, real_1d_array &x, minqpreport &rep, const xparams _xparams = alglib::xdefault);
5150 #endif
5151 
5152 #if defined(AE_COMPILE_REVISEDDUALSIMPLEX) || !defined(AE_PARTIAL_BUILD)
5153 
5154 #endif
5155 
5156 #if defined(AE_COMPILE_MINLP) || !defined(AE_PARTIAL_BUILD)
5157 /*************************************************************************
5158                             LINEAR PROGRAMMING
5159 
5160 The subroutine creates LP  solver.  After  initial  creation  it  contains
5161 default optimization problem with zero cost vector and all variables being
5162 fixed to zero values and no constraints.
5163 
5164 In order to actually solve something you should:
5165 * set cost vector with minlpsetcost()
5166 * set variable bounds with minlpsetbc() or minlpsetbcall()
5167 * specify constraint matrix with one of the following functions:
5168   [*] minlpsetlc()        for dense one-sided constraints
5169   [*] minlpsetlc2dense()  for dense two-sided constraints
5170   [*] minlpsetlc2()       for sparse two-sided constraints
5171   [*] minlpaddlc2dense()  to add one dense row to constraint matrix
5172   [*] minlpaddlc2()       to add one row to constraint matrix (compressed format)
5173 * call minlpoptimize() to run the solver and  minlpresults()  to  get  the
5174   solution vector and additional information.
5175 
5176 Presently  this  optimizer  supports  only  revised  simplex   method   as
5177 underlying solver. DSE pricing and bounds flipping ratio  test  (aka  long
5178 dual step) are supported. Large-scale sparse LU solver with  Forest-Tomlin
5179 is used internally as linear algebra driver.
5180 
5181 Future releases of ALGLIB may introduce other solvers.
5182 
5183 INPUT PARAMETERS:
5184     N       -   problem size
5185 
5186 OUTPUT PARAMETERS:
5187     State   -   optimizer in the default state
5188 
5189   -- ALGLIB --
5190      Copyright 19.07.2018 by Bochkanov Sergey
5191 *************************************************************************/
5192 void minlpcreate(const ae_int_t n, minlpstate &state, const xparams _xparams = alglib::xdefault);
5193 
5194 
5195 /*************************************************************************
5196 This function sets cost term for LP solver.
5197 
5198 By default, cost term is zero.
5199 
5200 INPUT PARAMETERS:
5201     State   -   structure which stores algorithm state
5202     C       -   cost term, array[N].
5203 
5204   -- ALGLIB --
5205      Copyright 19.07.2018 by Bochkanov Sergey
5206 *************************************************************************/
5207 void minlpsetcost(const minlpstate &state, const real_1d_array &c, const xparams _xparams = alglib::xdefault);
5208 
5209 
5210 /*************************************************************************
5211 This function sets scaling coefficients.
5212 
5213 ALGLIB optimizers use scaling matrices to test stopping  conditions and as
5214 preconditioner.
5215 
5216 Scale of the I-th variable is a translation invariant measure of:
5217 a) "how large" the variable is
5218 b) how large the step should be to make significant changes in the
5219    function
5220 
5221 INPUT PARAMETERS:
5222     State   -   structure stores algorithm state
5223     S       -   array[N], non-zero scaling coefficients
5224                 S[i] may be negative, sign doesn't matter.
5225 
5226   -- ALGLIB --
5227      Copyright 19.07.2018 by Bochkanov Sergey
5228 *************************************************************************/
5229 void minlpsetscale(const minlpstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
5230 
5231 
5232 /*************************************************************************
5233 This function sets box constraints for LP solver (all variables  at  once,
5234 different constraints for different variables).
5235 
5236 The default state of constraints is to have all variables fixed  at  zero.
5237 You have to overwrite it by your own constraint vector. Constraint  status
5238 is preserved until constraints are  explicitly  overwritten  with  another
5239 minlpsetbc()  call,   overwritten   with  minlpsetbcall(),  or   partially
5240 overwritten with minlmsetbci() call.
5241 
5242 Following types of constraints are supported:
5243 
5244     DESCRIPTION         CONSTRAINT              HOW TO SPECIFY
5245     fixed variable      x[i]=Bnd[i]             BndL[i]=BndU[i]
5246     lower bound         BndL[i]<=x[i]           BndU[i]=+INF
5247     upper bound         x[i]<=BndU[i]           BndL[i]=-INF
5248     range               BndL[i]<=x[i]<=BndU[i]  ...
5249     free variable       -                       BndL[I]=-INF, BndU[I]+INF
5250 
5251 INPUT PARAMETERS:
5252     State   -   structure stores algorithm state
5253     BndL    -   lower bounds, array[N].
5254     BndU    -   upper bounds, array[N].
5255 
5256 NOTE: infinite values can be specified by means of Double.PositiveInfinity
5257       and  Double.NegativeInfinity  (in  C#)  and  alglib::fp_posinf   and
5258       alglib::fp_neginf (in C++).
5259 
5260 NOTE: you may replace infinities by very small/very large values,  but  it
5261       is not recommended because large numbers may introduce large numerical
5262       errors in the algorithm.
5263 
5264 NOTE: if constraints for all variables are same you may use minlpsetbcall()
5265       which allows to specify constraints without using arrays.
5266 
5267 NOTE: BndL>BndU will result in LP problem being recognized as infeasible.
5268 
5269   -- ALGLIB --
5270      Copyright 19.07.2018 by Bochkanov Sergey
5271 *************************************************************************/
5272 void minlpsetbc(const minlpstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams = alglib::xdefault);
5273 
5274 
5275 /*************************************************************************
5276 This function sets box constraints for LP solver (all variables  at  once,
5277 same constraints for all variables)
5278 
5279 The default state of constraints is to have all variables fixed  at  zero.
5280 You have to overwrite it by your own constraint vector. Constraint  status
5281 is preserved until constraints are  explicitly  overwritten  with  another
5282 minlpsetbc() call or partially overwritten with minlpsetbcall().
5283 
5284 Following types of constraints are supported:
5285 
5286     DESCRIPTION         CONSTRAINT              HOW TO SPECIFY
5287     fixed variable      x[i]=Bnd[i]             BndL[i]=BndU[i]
5288     lower bound         BndL[i]<=x[i]           BndU[i]=+INF
5289     upper bound         x[i]<=BndU[i]           BndL[i]=-INF
5290     range               BndL[i]<=x[i]<=BndU[i]  ...
5291     free variable       -                       BndL[I]=-INF, BndU[I]+INF
5292 
5293 INPUT PARAMETERS:
5294     State   -   structure stores algorithm state
5295     BndL    -   lower bound, same for all variables
5296     BndU    -   upper bound, same for all variables
5297 
5298 NOTE: infinite values can be specified by means of Double.PositiveInfinity
5299       and  Double.NegativeInfinity  (in  C#)  and  alglib::fp_posinf   and
5300       alglib::fp_neginf (in C++).
5301 
5302 NOTE: you may replace infinities by very small/very large values,  but  it
5303       is not recommended because large numbers may introduce large numerical
5304       errors in the algorithm.
5305 
5306 NOTE: minlpsetbc() can  be  used  to  specify  different  constraints  for
5307       different variables.
5308 
5309 NOTE: BndL>BndU will result in LP problem being recognized as infeasible.
5310 
5311   -- ALGLIB --
5312      Copyright 19.07.2018 by Bochkanov Sergey
5313 *************************************************************************/
5314 void minlpsetbcall(const minlpstate &state, const double bndl, const double bndu, const xparams _xparams = alglib::xdefault);
5315 
5316 
5317 /*************************************************************************
5318 This function sets box constraints for I-th variable (other variables are
5319 not modified).
5320 
5321 The default state of constraints is to have all variables fixed  at  zero.
5322 You have to overwrite it by your own constraint vector.
5323 
5324 Following types of constraints are supported:
5325 
5326     DESCRIPTION         CONSTRAINT              HOW TO SPECIFY
5327     fixed variable      x[i]=Bnd[i]             BndL[i]=BndU[i]
5328     lower bound         BndL[i]<=x[i]           BndU[i]=+INF
5329     upper bound         x[i]<=BndU[i]           BndL[i]=-INF
5330     range               BndL[i]<=x[i]<=BndU[i]  ...
5331     free variable       -                       BndL[I]=-INF, BndU[I]+INF
5332 
5333 INPUT PARAMETERS:
5334     State   -   structure stores algorithm state
5335     I       -   variable index, in [0,N)
5336     BndL    -   lower bound for I-th variable
5337     BndU    -   upper bound for I-th variable
5338 
5339 NOTE: infinite values can be specified by means of Double.PositiveInfinity
5340       and  Double.NegativeInfinity  (in  C#)  and  alglib::fp_posinf   and
5341       alglib::fp_neginf (in C++).
5342 
5343 NOTE: you may replace infinities by very small/very large values,  but  it
5344       is not recommended because large numbers may introduce large numerical
5345       errors in the algorithm.
5346 
5347 NOTE: minlpsetbc() can  be  used  to  specify  different  constraints  for
5348       different variables.
5349 
5350 NOTE: BndL>BndU will result in LP problem being recognized as infeasible.
5351 
5352   -- ALGLIB --
5353      Copyright 19.07.2018 by Bochkanov Sergey
5354 *************************************************************************/
5355 void minlpsetbci(const minlpstate &state, const ae_int_t i, const double bndl, const double bndu, const xparams _xparams = alglib::xdefault);
5356 
5357 
5358 /*************************************************************************
5359 This function sets one-sided linear constraints A*x ~ AU, where "~" can be
5360 a mix of "<=", "=" and ">=".
5361 
5362 IMPORTANT: this function is provided here for compatibility with the  rest
5363            of ALGLIB optimizers which accept constraints  in  format  like
5364            this one. Many real-life problems feature two-sided constraints
5365            like a0 <= a*x <= a1. It is really inefficient to add them as a
5366            pair of one-sided constraints.
5367 
5368            Use minlpsetlc2dense(), minlpsetlc2(), minlpaddlc2()  (or   its
5369            sparse version) wherever possible.
5370 
5371 INPUT PARAMETERS:
5372     State   -   structure previously allocated with minlpcreate() call.
5373     A       -   linear constraints, array[K,N+1]. Each row of A represents
5374                 one constraint, with first N elements being linear coefficients,
5375                 and last element being right side.
5376     CT      -   constraint types, array[K]:
5377                 * if CT[i]>0, then I-th constraint is A[i,*]*x >= A[i,n]
5378                 * if CT[i]=0, then I-th constraint is A[i,*]*x  = A[i,n]
5379                 * if CT[i]<0, then I-th constraint is A[i,*]*x <= A[i,n]
5380     K       -   number of equality/inequality constraints,  K>=0;  if  not
5381                 given, inferred from sizes of A and CT.
5382 
5383   -- ALGLIB --
5384      Copyright 19.07.2018 by Bochkanov Sergey
5385 *************************************************************************/
5386 void minlpsetlc(const minlpstate &state, const real_2d_array &a, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams = alglib::xdefault);
5387 void minlpsetlc(const minlpstate &state, const real_2d_array &a, const integer_1d_array &ct, const xparams _xparams = alglib::xdefault);
5388 
5389 
5390 /*************************************************************************
5391 This function sets two-sided linear constraints AL <= A*x <= AU.
5392 
5393 This version accepts dense matrix as  input;  internally  LP  solver  uses
5394 sparse storage  anyway  (most  LP  problems  are  sparse),  but  for  your
5395 convenience it may accept dense inputs. This  function  overwrites  linear
5396 constraints set by previous calls (if such calls were made).
5397 
5398 We recommend you to use sparse version of this function unless  you  solve
5399 small-scale LP problem (less than few hundreds of variables).
5400 
5401 NOTE: there also exist several versions of this function:
5402       * one-sided dense version which  accepts  constraints  in  the  same
5403         format as one used by QP and  NLP solvers
5404       * two-sided sparse version which accepts sparse matrix
5405       * two-sided dense  version which allows you to add constraints row by row
5406       * two-sided sparse version which allows you to add constraints row by row
5407 
5408 INPUT PARAMETERS:
5409     State   -   structure previously allocated with minlpcreate() call.
5410     A       -   linear constraints, array[K,N]. Each row of  A  represents
5411                 one  constraint. One-sided  inequality   constraints, two-
5412                 sided inequality  constraints,  equality  constraints  are
5413                 supported (see below)
5414     AL, AU  -   lower and upper bounds, array[K];
5415                 * AL[i]=AU[i] => equality constraint Ai*x
5416                 * AL[i]<AU[i] => two-sided constraint AL[i]<=Ai*x<=AU[i]
5417                 * AL[i]=-INF  => one-sided constraint Ai*x<=AU[i]
5418                 * AU[i]=+INF  => one-sided constraint AL[i]<=Ai*x
5419                 * AL[i]=-INF, AU[i]=+INF => constraint is ignored
5420     K       -   number of equality/inequality constraints,  K>=0;  if  not
5421                 given, inferred from sizes of A, AL, AU.
5422 
5423   -- ALGLIB --
5424      Copyright 19.07.2018 by Bochkanov Sergey
5425 *************************************************************************/
5426 void minlpsetlc2dense(const minlpstate &state, const real_2d_array &a, const real_1d_array &al, const real_1d_array &au, const ae_int_t k, const xparams _xparams = alglib::xdefault);
5427 void minlpsetlc2dense(const minlpstate &state, const real_2d_array &a, const real_1d_array &al, const real_1d_array &au, const xparams _xparams = alglib::xdefault);
5428 
5429 
5430 /*************************************************************************
5431 This  function  sets  two-sided linear  constraints  AL <= A*x <= AU  with
5432 sparse constraining matrix A. Recommended for large-scale problems.
5433 
5434 This  function  overwrites  linear  (non-box)  constraints set by previous
5435 calls (if such calls were made).
5436 
5437 INPUT PARAMETERS:
5438     State   -   structure previously allocated with minlpcreate() call.
5439     A       -   sparse matrix with size [K,N] (exactly!).
5440                 Each row of A represents one general linear constraint.
5441                 A can be stored in any sparse storage format.
5442     AL, AU  -   lower and upper bounds, array[K];
5443                 * AL[i]=AU[i] => equality constraint Ai*x
5444                 * AL[i]<AU[i] => two-sided constraint AL[i]<=Ai*x<=AU[i]
5445                 * AL[i]=-INF  => one-sided constraint Ai*x<=AU[i]
5446                 * AU[i]=+INF  => one-sided constraint AL[i]<=Ai*x
5447                 * AL[i]=-INF, AU[i]=+INF => constraint is ignored
5448     K       -   number  of equality/inequality constraints, K>=0.  If  K=0
5449                 is specified, A, AL, AU are ignored.
5450 
5451   -- ALGLIB --
5452      Copyright 19.07.2018 by Bochkanov Sergey
5453 *************************************************************************/
5454 void minlpsetlc2(const minlpstate &state, const sparsematrix &a, const real_1d_array &al, const real_1d_array &au, const ae_int_t k, const xparams _xparams = alglib::xdefault);
5455 
5456 
5457 /*************************************************************************
5458 This function appends two-sided linear constraint  AL <= A*x <= AU  to the
5459 list of currently present constraints.
5460 
5461 This version accepts dense constraint vector as input, but  sparsifies  it
5462 for internal storage and processing. Thus, time to add one  constraint  in
5463 is O(N) - we have to scan entire array of length N. Sparse version of this
5464 function is order of magnitude faster for  constraints  with  just  a  few
5465 nonzeros per row.
5466 
5467 INPUT PARAMETERS:
5468     State   -   structure previously allocated with minlpcreate() call.
5469     A       -   linear constraint coefficient, array[N], right side is NOT
5470                 included.
5471     AL, AU  -   lower and upper bounds;
5472                 * AL=AU    => equality constraint Ai*x
5473                 * AL<AU    => two-sided constraint AL<=A*x<=AU
5474                 * AL=-INF  => one-sided constraint Ai*x<=AU
5475                 * AU=+INF  => one-sided constraint AL<=Ai*x
5476                 * AL=-INF, AU=+INF => constraint is ignored
5477 
5478   -- ALGLIB --
5479      Copyright 19.07.2018 by Bochkanov Sergey
5480 *************************************************************************/
5481 void minlpaddlc2dense(const minlpstate &state, const real_1d_array &a, const double al, const double au, const xparams _xparams = alglib::xdefault);
5482 
5483 
5484 /*************************************************************************
5485 This function appends two-sided linear constraint  AL <= A*x <= AU  to the
5486 list of currently present constraints.
5487 
5488 Constraint is passed in compressed format: as list of non-zero entries  of
5489 coefficient vector A. Such approach is more efficient than  dense  storage
5490 for highly sparse constraint vectors.
5491 
5492 INPUT PARAMETERS:
5493     State   -   structure previously allocated with minlpcreate() call.
5494     IdxA    -   array[NNZ], indexes of non-zero elements of A:
5495                 * can be unsorted
5496                 * can include duplicate indexes (corresponding entries  of
5497                   ValA[] will be summed)
5498     ValA    -   array[NNZ], values of non-zero elements of A
5499     NNZ     -   number of non-zero coefficients in A
5500     AL, AU  -   lower and upper bounds;
5501                 * AL=AU    => equality constraint A*x
5502                 * AL<AU    => two-sided constraint AL<=A*x<=AU
5503                 * AL=-INF  => one-sided constraint A*x<=AU
5504                 * AU=+INF  => one-sided constraint AL<=A*x
5505                 * AL=-INF, AU=+INF => constraint is ignored
5506 
5507   -- ALGLIB --
5508      Copyright 19.07.2018 by Bochkanov Sergey
5509 *************************************************************************/
5510 void minlpaddlc2(const minlpstate &state, const integer_1d_array &idxa, const real_1d_array &vala, const ae_int_t nnz, const double al, const double au, const xparams _xparams = alglib::xdefault);
5511 
5512 
5513 /*************************************************************************
5514 This function solves LP problem.
5515 
5516 INPUT PARAMETERS:
5517     State   -   algorithm state
5518 
5519 You should use minlpresults() function to access results  after  calls  to
5520 this function.
5521 
5522   -- ALGLIB --
5523      Copyright 19.07.2018 by Bochkanov Sergey.
5524 *************************************************************************/
5525 void minlpoptimize(const minlpstate &state, const xparams _xparams = alglib::xdefault);
5526 
5527 
5528 /*************************************************************************
5529 LP solver results
5530 
5531 INPUT PARAMETERS:
5532     State   -   algorithm state
5533 
5534 OUTPUT PARAMETERS:
5535     X       -   array[N], solution. Filled by zeros on failure.
5536     Rep     -   optimization report. You should check Rep.TerminationType,
5537                 which contains completion code, and you may check  another
5538                 fields which contain another information  about  algorithm
5539                 functioning.
5540 
5541                 Failure codes returned by algorithm are:
5542                 * -4    LP problem is primal unbounded (dual infeasible)
5543                 * -3    LP problem is primal infeasible (dual unbounded)
5544 
5545                 Success codes:
5546                 *  1..4 successful completion
5547                 *  5    MaxIts steps was taken
5548 
5549   -- ALGLIB --
5550      Copyright 11.01.2011 by Bochkanov Sergey
5551 *************************************************************************/
5552 void minlpresults(const minlpstate &state, real_1d_array &x, minlpreport &rep, const xparams _xparams = alglib::xdefault);
5553 
5554 
5555 /*************************************************************************
5556 LP results
5557 
5558 Buffered implementation of MinLPResults() which uses pre-allocated  buffer
5559 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
5560 intended to be used in the inner cycles of performance critical algorithms
5561 where array reallocation penalty is too large to be ignored.
5562 
5563   -- ALGLIB --
5564      Copyright 11.01.2011 by Bochkanov Sergey
5565 *************************************************************************/
5566 void minlpresultsbuf(const minlpstate &state, real_1d_array &x, minlpreport &rep, const xparams _xparams = alglib::xdefault);
5567 #endif
5568 
5569 #if defined(AE_COMPILE_NLCSLP) || !defined(AE_PARTIAL_BUILD)
5570 
5571 #endif
5572 
5573 #if defined(AE_COMPILE_MINNLC) || !defined(AE_PARTIAL_BUILD)
5574 /*************************************************************************
5575                   NONLINEARLY  CONSTRAINED  OPTIMIZATION
5576             WITH PRECONDITIONED AUGMENTED LAGRANGIAN ALGORITHM
5577 
5578 DESCRIPTION:
5579 The  subroutine  minimizes  function   F(x)  of N arguments subject to any
5580 combination of:
5581 * bound constraints
5582 * linear inequality constraints
5583 * linear equality constraints
5584 * nonlinear equality constraints Gi(x)=0
5585 * nonlinear inequality constraints Hi(x)<=0
5586 
5587 REQUIREMENTS:
5588 * user must provide function value and gradient for F(), H(), G()
5589 * starting point X0 must be feasible or not too far away from the feasible
5590   set
5591 * F(), G(), H() are twice continuously differentiable on the feasible  set
5592   and its neighborhood
5593 * nonlinear constraints G() and H() must have non-zero gradient at  G(x)=0
5594   and at H(x)=0. Say, constraint like x^2>=1 is supported, but x^2>=0   is
5595   NOT supported.
5596 
5597 USAGE:
5598 
5599 Constrained optimization if far more complex than the  unconstrained  one.
5600 Nonlinearly constrained optimization is one of the most esoteric numerical
5601 procedures.
5602 
5603 Here we give very brief outline  of  the  MinNLC  optimizer.  We  strongly
5604 recommend you to study examples in the ALGLIB Reference Manual and to read
5605 ALGLIB User Guide on optimization, which is available at
5606 http://www.alglib.net/optimization/
5607 
5608 1. User initializes algorithm state with MinNLCCreate() call  and  chooses
5609    what NLC solver to use. There is some solver which is used by  default,
5610    with default settings, but you should NOT rely on  default  choice.  It
5611    may change in future releases of ALGLIB without notice, and no one  can
5612    guarantee that new solver will be  able  to  solve  your  problem  with
5613    default settings.
5614 
5615    From the other side, if you choose solver explicitly, you can be pretty
5616    sure that it will work with new ALGLIB releases.
5617 
5618    In the current release following solvers can be used:
5619    * SLP solver (activated with minnlcsetalgoslp() function) -  successive
5620      linear programming, recommended as the first step.
5621    * AUL solver (activated with minnlcsetalgoaul() function)  -  augmented
5622      Lagrangian method with dense preconditioner.
5623 
5624    SLP solver is the most robust one  in  ALGLIB  and  converges  in  less
5625    iterations than AUL; however, each iteration has higher overhead  -  we
5626    have to solve an LP problem. From  the  other  side,  AUL  has  cheaper
5627    iterations - although it typically needs more of them, and also  it  is
5628    less robust in nonconvex setting.
5629 
5630 2. [optional] user activates OptGuard  integrity checker  which  tries  to
5631    detect possible errors in the user-supplied callbacks:
5632    * discontinuity/nonsmoothness of the target/nonlinear constraints
5633    * errors in the analytic gradient provided by user
5634    This feature is essential for early prototyping stages because it helps
5635    to catch common coding and problem statement errors.
5636    OptGuard can be activated with following functions (one per each  check
5637    performed):
5638    * minnlcoptguardsmoothness()
5639    * minnlcoptguardgradient()
5640 
5641 3. User adds boundary and/or linear and/or nonlinear constraints by  means
5642    of calling one of the following functions:
5643    a) minnlcsetbc() for boundary constraints
5644    b) minnlcsetlc() for linear constraints
5645    c) minnlcsetnlc() for nonlinear constraints
5646    You may combine (a), (b) and (c) in one optimization problem.
5647 
5648 4. User sets scale of the variables with minnlcsetscale() function. It  is
5649    VERY important to set  scale  of  the  variables,  because  nonlinearly
5650    constrained problems are hard to solve when variables are badly scaled.
5651 
5652 5. User sets  stopping  conditions  with  minnlcsetcond(). If  NLC  solver
5653    uses  inner/outer  iteration  layout,  this  function   sets   stopping
5654    conditions for INNER iterations.
5655 
5656 6. Finally, user calls minnlcoptimize()  function  which  takes  algorithm
5657    state and pointer (delegate, etc.) to callback function which calculates
5658    F/G/H.
5659 
5660 7. User calls  minnlcresults()  to  get  solution;  additionally  you  can
5661    retrieve OptGuard report with minnlcoptguardresults(), and get detailed
5662    report about purported errors in the target function with:
5663    * minnlcoptguardnonc1test0results()
5664    * minnlcoptguardnonc1test1results()
5665 
5666 8. Optionally user may call minnlcrestartfrom() to solve  another  problem
5667    with same N but another starting point. minnlcrestartfrom()  allows  to
5668    reuse already initialized structure.
5669 
5670 
5671 INPUT PARAMETERS:
5672     N       -   problem dimension, N>0:
5673                 * if given, only leading N elements of X are used
5674                 * if not given, automatically determined from size ofX
5675     X       -   starting point, array[N]:
5676                 * it is better to set X to a feasible point
5677                 * but X can be infeasible, in which case algorithm will try
5678                   to find feasible point first, using X as initial
5679                   approximation.
5680 
5681 OUTPUT PARAMETERS:
5682     State   -   structure stores algorithm state
5683 
5684   -- ALGLIB --
5685      Copyright 06.06.2014 by Bochkanov Sergey
5686 *************************************************************************/
5687 void minnlccreate(const ae_int_t n, const real_1d_array &x, minnlcstate &state, const xparams _xparams = alglib::xdefault);
5688 void minnlccreate(const real_1d_array &x, minnlcstate &state, const xparams _xparams = alglib::xdefault);
5689 
5690 
5691 /*************************************************************************
5692 This subroutine is a finite  difference variant of MinNLCCreate(). It uses
5693 finite differences in order to differentiate target function.
5694 
5695 Description below contains information which is specific to this  function
5696 only. We recommend to read comments on MinNLCCreate() in order to get more
5697 information about creation of NLC optimizer.
5698 
5699 INPUT PARAMETERS:
5700     N       -   problem dimension, N>0:
5701                 * if given, only leading N elements of X are used
5702                 * if not given, automatically determined from size ofX
5703     X       -   starting point, array[N]:
5704                 * it is better to set X to a feasible point
5705                 * but X can be infeasible, in which case algorithm will try
5706                   to find feasible point first, using X as initial
5707                   approximation.
5708     DiffStep-   differentiation step, >0
5709 
5710 OUTPUT PARAMETERS:
5711     State   -   structure stores algorithm state
5712 
5713 NOTES:
5714 1. algorithm uses 4-point central formula for differentiation.
5715 2. differentiation step along I-th axis is equal to DiffStep*S[I] where
5716    S[] is scaling vector which can be set by MinNLCSetScale() call.
5717 3. we recommend you to use moderate values of  differentiation  step.  Too
5718    large step will result in too large TRUNCATION  errors, while too small
5719    step will result in too large NUMERICAL  errors.  1.0E-4  can  be  good
5720    value to start from.
5721 4. Numerical  differentiation  is   very   inefficient  -   one   gradient
5722    calculation needs 4*N function evaluations. This function will work for
5723    any N - either small (1...10), moderate (10...100) or  large  (100...).
5724    However, performance penalty will be too severe for any N's except  for
5725    small ones.
5726    We should also say that code which relies on numerical  differentiation
5727    is  less   robust   and  precise.  Imprecise  gradient  may  slow  down
5728    convergence, especially on highly nonlinear problems.
5729    Thus  we  recommend to use this function for fast prototyping on small-
5730    dimensional problems only, and to implement analytical gradient as soon
5731    as possible.
5732 
5733   -- ALGLIB --
5734      Copyright 06.06.2014 by Bochkanov Sergey
5735 *************************************************************************/
5736 void minnlccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minnlcstate &state, const xparams _xparams = alglib::xdefault);
5737 void minnlccreatef(const real_1d_array &x, const double diffstep, minnlcstate &state, const xparams _xparams = alglib::xdefault);
5738 
5739 
5740 /*************************************************************************
5741 This function sets boundary constraints for NLC optimizer.
5742 
5743 Boundary constraints are inactive by  default  (after  initial  creation).
5744 They are preserved after algorithm restart with  MinNLCRestartFrom().
5745 
5746 You may combine boundary constraints with  general  linear ones - and with
5747 nonlinear ones! Boundary constraints are  handled  more  efficiently  than
5748 other types.  Thus,  if  your  problem  has  mixed  constraints,  you  may
5749 explicitly specify some of them as boundary and save some time/space.
5750 
5751 INPUT PARAMETERS:
5752     State   -   structure stores algorithm state
5753     BndL    -   lower bounds, array[N].
5754                 If some (all) variables are unbounded, you may specify
5755                 very small number or -INF.
5756     BndU    -   upper bounds, array[N].
5757                 If some (all) variables are unbounded, you may specify
5758                 very large number or +INF.
5759 
5760 NOTE 1:  it is possible to specify  BndL[i]=BndU[i].  In  this  case  I-th
5761 variable will be "frozen" at X[i]=BndL[i]=BndU[i].
5762 
5763 NOTE 2:  when you solve your problem  with  augmented  Lagrangian  solver,
5764          boundary constraints are  satisfied  only  approximately!  It  is
5765          possible   that  algorithm  will  evaluate  function  outside  of
5766          feasible area!
5767 
5768   -- ALGLIB --
5769      Copyright 06.06.2014 by Bochkanov Sergey
5770 *************************************************************************/
5771 void minnlcsetbc(const minnlcstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams = alglib::xdefault);
5772 
5773 
5774 /*************************************************************************
5775 This function sets linear constraints for MinNLC optimizer.
5776 
5777 Linear constraints are inactive by default (after initial creation).  They
5778 are preserved after algorithm restart with MinNLCRestartFrom().
5779 
5780 You may combine linear constraints with boundary ones - and with nonlinear
5781 ones! If your problem has mixed constraints, you  may  explicitly  specify
5782 some of them as linear. It  may  help  optimizer   to   handle  them  more
5783 efficiently.
5784 
5785 INPUT PARAMETERS:
5786     State   -   structure previously allocated with MinNLCCreate call.
5787     C       -   linear constraints, array[K,N+1].
5788                 Each row of C represents one constraint, either equality
5789                 or inequality (see below):
5790                 * first N elements correspond to coefficients,
5791                 * last element corresponds to the right part.
5792                 All elements of C (including right part) must be finite.
5793     CT      -   type of constraints, array[K]:
5794                 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
5795                 * if CT[i]=0, then I-th constraint is C[i,*]*x  = C[i,n+1]
5796                 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
5797     K       -   number of equality/inequality constraints, K>=0:
5798                 * if given, only leading K elements of C/CT are used
5799                 * if not given, automatically determined from sizes of C/CT
5800 
5801 NOTE 1: when you solve your problem  with  augmented  Lagrangian   solver,
5802         linear constraints are  satisfied  only   approximately!   It   is
5803         possible   that  algorithm  will  evaluate  function  outside   of
5804         feasible area!
5805 
5806   -- ALGLIB --
5807      Copyright 06.06.2014 by Bochkanov Sergey
5808 *************************************************************************/
5809 void minnlcsetlc(const minnlcstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams = alglib::xdefault);
5810 void minnlcsetlc(const minnlcstate &state, const real_2d_array &c, const integer_1d_array &ct, const xparams _xparams = alglib::xdefault);
5811 
5812 
5813 /*************************************************************************
5814 This function sets nonlinear constraints for MinNLC optimizer.
5815 
5816 In fact, this function sets NUMBER of nonlinear  constraints.  Constraints
5817 itself (constraint functions) are passed to MinNLCOptimize() method.  This
5818 method requires user-defined vector function F[]  and  its  Jacobian  J[],
5819 where:
5820 * first component of F[] and first row  of  Jacobian  J[]  corresponds  to
5821   function being minimized
5822 * next NLEC components of F[] (and rows  of  J)  correspond  to  nonlinear
5823   equality constraints G_i(x)=0
5824 * next NLIC components of F[] (and rows  of  J)  correspond  to  nonlinear
5825   inequality constraints H_i(x)<=0
5826 
5827 NOTE: you may combine nonlinear constraints with linear/boundary ones.  If
5828       your problem has mixed constraints, you  may explicitly specify some
5829       of them as linear ones. It may help optimizer to  handle  them  more
5830       efficiently.
5831 
5832 INPUT PARAMETERS:
5833     State   -   structure previously allocated with MinNLCCreate call.
5834     NLEC    -   number of Non-Linear Equality Constraints (NLEC), >=0
5835     NLIC    -   number of Non-Linear Inquality Constraints (NLIC), >=0
5836 
5837 NOTE 1: when you solve your problem  with  augmented  Lagrangian   solver,
5838         nonlinear constraints are satisfied only  approximately!   It   is
5839         possible   that  algorithm  will  evaluate  function  outside   of
5840         feasible area!
5841 
5842 NOTE 2: algorithm scales variables  according  to   scale   specified   by
5843         MinNLCSetScale()  function,  so  it can handle problems with badly
5844         scaled variables (as long as we KNOW their scales).
5845 
5846         However,  there  is  no  way  to  automatically  scale   nonlinear
5847         constraints Gi(x) and Hi(x). Inappropriate scaling  of  Gi/Hi  may
5848         ruin convergence. Solving problem with  constraint  "1000*G0(x)=0"
5849         is NOT same as solving it with constraint "0.001*G0(x)=0".
5850 
5851         It  means  that  YOU  are  the  one who is responsible for correct
5852         scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you
5853         to scale nonlinear constraints in such way that I-th component  of
5854         dG/dX (or dH/dx) has approximately unit  magnitude  (for  problems
5855         with unit scale)  or  has  magnitude approximately equal to 1/S[i]
5856         (where S is a scale set by MinNLCSetScale() function).
5857 
5858 
5859   -- ALGLIB --
5860      Copyright 06.06.2014 by Bochkanov Sergey
5861 *************************************************************************/
5862 void minnlcsetnlc(const minnlcstate &state, const ae_int_t nlec, const ae_int_t nlic, const xparams _xparams = alglib::xdefault);
5863 
5864 
5865 /*************************************************************************
5866 This function sets stopping conditions for inner iterations of  optimizer.
5867 
5868 INPUT PARAMETERS:
5869     State   -   structure which stores algorithm state
5870     EpsX    -   >=0
5871                 The subroutine finishes its work if  on  k+1-th  iteration
5872                 the condition |v|<=EpsX is fulfilled, where:
5873                 * |.| means Euclidian norm
5874                 * v - scaled step vector, v[i]=dx[i]/s[i]
5875                 * dx - step vector, dx=X(k+1)-X(k)
5876                 * s - scaling coefficients set by MinNLCSetScale()
5877     MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
5878                 iterations is unlimited.
5879 
5880 Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic
5881 selection of the stopping condition.
5882 
5883   -- ALGLIB --
5884      Copyright 06.06.2014 by Bochkanov Sergey
5885 *************************************************************************/
5886 void minnlcsetcond(const minnlcstate &state, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
5887 
5888 
5889 /*************************************************************************
5890 This function sets scaling coefficients for NLC optimizer.
5891 
5892 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
5893 size and gradient are scaled before comparison with tolerances).  Scale of
5894 the I-th variable is a translation invariant measure of:
5895 a) "how large" the variable is
5896 b) how large the step should be to make significant changes in the function
5897 
5898 Scaling is also used by finite difference variant of the optimizer  - step
5899 along I-th axis is equal to DiffStep*S[I].
5900 
5901 INPUT PARAMETERS:
5902     State   -   structure stores algorithm state
5903     S       -   array[N], non-zero scaling coefficients
5904                 S[i] may be negative, sign doesn't matter.
5905 
5906   -- ALGLIB --
5907      Copyright 06.06.2014 by Bochkanov Sergey
5908 *************************************************************************/
5909 void minnlcsetscale(const minnlcstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
5910 
5911 
5912 /*************************************************************************
5913 This function sets preconditioner to "inexact LBFGS-based" mode.
5914 
5915 Preconditioning is very important for convergence of  Augmented Lagrangian
5916 algorithm because presence of penalty term makes problem  ill-conditioned.
5917 Difference between  performance  of  preconditioned  and  unpreconditioned
5918 methods can be as large as 100x!
5919 
5920 MinNLC optimizer may use following preconditioners,  each  with   its  own
5921 benefits and drawbacks:
5922     a) inexact LBFGS-based, with O(N*K) evaluation time
5923     b) exact low rank one,  with O(N*K^2) evaluation time
5924     c) exact robust one,    with O(N^3+K*N^2) evaluation time
5925 where K is a total number of general linear and nonlinear constraints (box
5926 ones are not counted).
5927 
5928 Inexact  LBFGS-based  preconditioner  uses L-BFGS  formula  combined  with
5929 orthogonality assumption to perform very fast updates. For a N-dimensional
5930 problem with K general linear or nonlinear constraints (boundary ones  are
5931 not counted) it has O(N*K) cost per iteration.  This   preconditioner  has
5932 best  quality  (less  iterations)  when   general   linear  and  nonlinear
5933 constraints are orthogonal to each other (orthogonality  with  respect  to
5934 boundary constraints is not required). Number of iterations increases when
5935 constraints  are  non-orthogonal, because algorithm assumes orthogonality,
5936 but still it is better than no preconditioner at all.
5937 
5938 INPUT PARAMETERS:
5939     State   -   structure stores algorithm state
5940 
5941   -- ALGLIB --
5942      Copyright 26.09.2014 by Bochkanov Sergey
5943 *************************************************************************/
5944 void minnlcsetprecinexact(const minnlcstate &state, const xparams _xparams = alglib::xdefault);
5945 
5946 
5947 /*************************************************************************
5948 This function sets preconditioner to "exact low rank" mode.
5949 
5950 Preconditioning is very important for convergence of  Augmented Lagrangian
5951 algorithm because presence of penalty term makes problem  ill-conditioned.
5952 Difference between  performance  of  preconditioned  and  unpreconditioned
5953 methods can be as large as 100x!
5954 
5955 MinNLC optimizer may use following preconditioners,  each  with   its  own
5956 benefits and drawbacks:
5957     a) inexact LBFGS-based, with O(N*K) evaluation time
5958     b) exact low rank one,  with O(N*K^2) evaluation time
5959     c) exact robust one,    with O(N^3+K*N^2) evaluation time
5960 where K is a total number of general linear and nonlinear constraints (box
5961 ones are not counted).
5962 
5963 It also provides special unpreconditioned mode of operation which  can  be
5964 used for test purposes. Comments below discuss low rank preconditioner.
5965 
5966 Exact low-rank preconditioner  uses  Woodbury  matrix  identity  to  build
5967 quadratic model of the penalized function. It has following features:
5968 * no special assumptions about orthogonality of constraints
5969 * preconditioner evaluation is optimized for K<<N. Its cost  is  O(N*K^2),
5970   so it may become prohibitively slow for K>=N.
5971 * finally, stability of the process is guaranteed only for K<<N.  Woodbury
5972   update often fail for K>=N due to degeneracy of  intermediate  matrices.
5973   That's why we recommend to use "exact robust"  preconditioner  for  such
5974   cases.
5975 
5976 RECOMMENDATIONS
5977 
5978 We  recommend  to  choose  between  "exact  low  rank"  and "exact robust"
5979 preconditioners, with "low rank" version being chosen  when  you  know  in
5980 advance that total count of non-box constraints won't exceed N, and "robust"
5981 version being chosen when you need bulletproof solution.
5982 
5983 INPUT PARAMETERS:
5984     State   -   structure stores algorithm state
5985     UpdateFreq- update frequency. Preconditioner is  rebuilt  after  every
5986                 UpdateFreq iterations. Recommended value: 10 or higher.
5987                 Zero value means that good default value will be used.
5988 
5989   -- ALGLIB --
5990      Copyright 26.09.2014 by Bochkanov Sergey
5991 *************************************************************************/
5992 void minnlcsetprecexactlowrank(const minnlcstate &state, const ae_int_t updatefreq, const xparams _xparams = alglib::xdefault);
5993 
5994 
5995 /*************************************************************************
5996 This function sets preconditioner to "exact robust" mode.
5997 
5998 Preconditioning is very important for convergence of  Augmented Lagrangian
5999 algorithm because presence of penalty term makes problem  ill-conditioned.
6000 Difference between  performance  of  preconditioned  and  unpreconditioned
6001 methods can be as large as 100x!
6002 
6003 MinNLC optimizer may use following preconditioners,  each  with   its  own
6004 benefits and drawbacks:
6005     a) inexact LBFGS-based, with O(N*K) evaluation time
6006     b) exact low rank one,  with O(N*K^2) evaluation time
6007     c) exact robust one,    with O(N^3+K*N^2) evaluation time
6008 where K is a total number of general linear and nonlinear constraints (box
6009 ones are not counted).
6010 
6011 It also provides special unpreconditioned mode of operation which  can  be
6012 used for test purposes. Comments below discuss robust preconditioner.
6013 
6014 Exact  robust  preconditioner   uses   Cholesky  decomposition  to  invert
6015 approximate Hessian matrix H=D+W'*C*W (where D stands for  diagonal  terms
6016 of Hessian, combined result of initial scaling matrix and penalty from box
6017 constraints; W stands for general linear constraints and linearization  of
6018 nonlinear ones; C stands for diagonal matrix of penalty coefficients).
6019 
6020 This preconditioner has following features:
6021 * no special assumptions about constraint structure
6022 * preconditioner is optimized  for  stability;  unlike  "exact  low  rank"
6023   version which fails for K>=N, this one works well for any value of K.
6024 * the only drawback is that is takes O(N^3+K*N^2) time  to  build  it.  No
6025   economical  Woodbury update is applied even when it  makes  sense,  thus
6026   there  are  exist situations (K<<N) when "exact low rank" preconditioner
6027   outperforms this one.
6028 
6029 RECOMMENDATIONS
6030 
6031 We  recommend  to  choose  between  "exact  low  rank"  and "exact robust"
6032 preconditioners, with "low rank" version being chosen  when  you  know  in
6033 advance that total count of non-box constraints won't exceed N, and "robust"
6034 version being chosen when you need bulletproof solution.
6035 
6036 INPUT PARAMETERS:
6037     State   -   structure stores algorithm state
6038     UpdateFreq- update frequency. Preconditioner is  rebuilt  after  every
6039                 UpdateFreq iterations. Recommended value: 10 or higher.
6040                 Zero value means that good default value will be used.
6041 
6042   -- ALGLIB --
6043      Copyright 26.09.2014 by Bochkanov Sergey
6044 *************************************************************************/
6045 void minnlcsetprecexactrobust(const minnlcstate &state, const ae_int_t updatefreq, const xparams _xparams = alglib::xdefault);
6046 
6047 
6048 /*************************************************************************
6049 This function sets preconditioner to "turned off" mode.
6050 
6051 Preconditioning is very important for convergence of  Augmented Lagrangian
6052 algorithm because presence of penalty term makes problem  ill-conditioned.
6053 Difference between  performance  of  preconditioned  and  unpreconditioned
6054 methods can be as large as 100x!
6055 
6056 MinNLC optimizer may  utilize  two  preconditioners,  each  with  its  own
6057 benefits and drawbacks: a) inexact LBFGS-based, and b) exact low rank one.
6058 It also provides special unpreconditioned mode of operation which  can  be
6059 used for test purposes.
6060 
6061 This function activates this test mode. Do not use it in  production  code
6062 to solve real-life problems.
6063 
6064 INPUT PARAMETERS:
6065     State   -   structure stores algorithm state
6066 
6067   -- ALGLIB --
6068      Copyright 26.09.2014 by Bochkanov Sergey
6069 *************************************************************************/
6070 void minnlcsetprecnone(const minnlcstate &state, const xparams _xparams = alglib::xdefault);
6071 
6072 
6073 /*************************************************************************
6074 This function sets maximum step length (after scaling of step vector  with
6075 respect to variable scales specified by minnlcsetscale() call).
6076 
6077 INPUT PARAMETERS:
6078     State   -   structure which stores algorithm state
6079     StpMax  -   maximum step length, >=0. Set StpMax to 0.0 (default),  if
6080                 you don't want to limit step length.
6081 
6082 Use this subroutine when you optimize target function which contains exp()
6083 or  other  fast  growing  functions,  and optimization algorithm makes too
6084 large  steps  which  leads  to overflow. This function allows us to reject
6085 steps  that  are  too  large  (and  therefore  expose  us  to the possible
6086 overflow) without actually calculating function value at the x+stp*d.
6087 
6088 NOTE: different solvers employed by MinNLC optimizer use  different  norms
6089       for step; AUL solver uses 2-norm, whilst SLP solver uses INF-norm.
6090 
6091   -- ALGLIB --
6092      Copyright 02.04.2010 by Bochkanov Sergey
6093 *************************************************************************/
6094 void minnlcsetstpmax(const minnlcstate &state, const double stpmax, const xparams _xparams = alglib::xdefault);
6095 
6096 
6097 /*************************************************************************
6098 This  function  tells MinNLC unit to use  Augmented  Lagrangian  algorithm
6099 for nonlinearly constrained  optimization.  This  algorithm  is  a  slight
6100 modification of one described in "A Modified Barrier-Augmented  Lagrangian
6101 Method for  Constrained  Minimization  (1999)"  by  D.GOLDFARB,  R.POLYAK,
6102 K. SCHEINBERG, I.YUZEFOVICH.
6103 
6104 AUL solver can be significantly faster than SLP on easy problems, although
6105 it is less robust than SLP (the "gold standard" of robust optimization).
6106 
6107 Augmented Lagrangian algorithm works by converting problem  of  minimizing
6108 F(x) subject to equality/inequality constraints   to unconstrained problem
6109 of the form
6110 
6111     min[ f(x) +
6112         + Rho*PENALTY_EQ(x)   + SHIFT_EQ(x,Nu1) +
6113         + Rho*PENALTY_INEQ(x) + SHIFT_INEQ(x,Nu2) ]
6114 
6115 where:
6116 * Rho is a fixed penalization coefficient
6117 * PENALTY_EQ(x) is a penalty term, which is used to APPROXIMATELY  enforce
6118   equality constraints
6119 * SHIFT_EQ(x) is a special "shift"  term  which  is  used  to  "fine-tune"
6120   equality constraints, greatly increasing precision
6121 * PENALTY_INEQ(x) is a penalty term which is used to approximately enforce
6122   inequality constraints
6123 * SHIFT_INEQ(x) is a special "shift"  term  which  is  used to "fine-tune"
6124   inequality constraints, greatly increasing precision
6125 * Nu1/Nu2 are vectors of Lagrange coefficients which are fine-tuned during
6126   outer iterations of algorithm
6127 
6128 This  version  of  AUL  algorithm  uses   preconditioner,  which   greatly
6129 accelerates convergence. Because this  algorithm  is  similar  to  penalty
6130 methods,  it  may  perform  steps  into  infeasible  area.  All  kinds  of
6131 constraints (boundary, linear and nonlinear ones) may   be   violated   in
6132 intermediate points - and in the solution.  However,  properly  configured
6133 AUL method is significantly better at handling  constraints  than  barrier
6134 and/or penalty methods.
6135 
6136 The very basic outline of algorithm is given below:
6137 1) first outer iteration is performed with "default"  values  of  Lagrange
6138    multipliers Nu1/Nu2. Solution quality is low (candidate  point  can  be
6139    too  far  away  from  true  solution; large violation of constraints is
6140    possible) and is comparable with that of penalty methods.
6141 2) subsequent outer iterations  refine  Lagrange  multipliers  and improve
6142    quality of the solution.
6143 
6144 INPUT PARAMETERS:
6145     State   -   structure which stores algorithm state
6146     Rho     -   penalty coefficient, Rho>0:
6147                 * large enough  that  algorithm  converges  with   desired
6148                   precision. Minimum value is 10*max(S'*diag(H)*S),  where
6149                   S is a scale matrix (set by MinNLCSetScale) and H  is  a
6150                   Hessian of the function being minimized. If you can  not
6151                   easily estimate Hessian norm,  see  our  recommendations
6152                   below.
6153                 * not TOO large to prevent ill-conditioning
6154                 * for unit-scale problems (variables and Hessian have unit
6155                   magnitude), Rho=100 or Rho=1000 can be used.
6156                 * it is important to note that Rho is internally multiplied
6157                   by scaling matrix, i.e. optimum value of Rho depends  on
6158                   scale of variables specified  by  MinNLCSetScale().
6159     ItsCnt  -   number of outer iterations:
6160                 * ItsCnt=0 means that small number of outer iterations  is
6161                   automatically chosen (10 iterations in current version).
6162                 * ItsCnt=1 means that AUL algorithm performs just as usual
6163                   barrier method.
6164                 * ItsCnt>1 means that  AUL  algorithm  performs  specified
6165                   number of outer iterations
6166 
6167 HOW TO CHOOSE PARAMETERS
6168 
6169 Nonlinear optimization is a tricky area and Augmented Lagrangian algorithm
6170 is sometimes hard to tune. Good values of  Rho  and  ItsCnt  are  problem-
6171 specific.  In  order  to  help  you   we   prepared   following   set   of
6172 recommendations:
6173 
6174 * for  unit-scale  problems  (variables  and Hessian have unit magnitude),
6175   Rho=100 or Rho=1000 can be used.
6176 
6177 * start from  some  small  value of Rho and solve problem  with  just  one
6178   outer iteration (ItcCnt=1). In this case algorithm behaves like  penalty
6179   method. Increase Rho in 2x or 10x steps until you  see  that  one  outer
6180   iteration returns point which is "rough approximation to solution".
6181 
6182   It is very important to have Rho so  large  that  penalty  term  becomes
6183   constraining i.e. modified function becomes highly convex in constrained
6184   directions.
6185 
6186   From the other side, too large Rho may prevent you  from  converging  to
6187   the solution. You can diagnose it by studying number of inner iterations
6188   performed by algorithm: too few (5-10 on  1000-dimensional  problem)  or
6189   too many (orders of magnitude more than  dimensionality)  usually  means
6190   that Rho is too large.
6191 
6192 * with just one outer iteration you  usually  have  low-quality  solution.
6193   Some constraints can be violated with very  large  margin,  while  other
6194   ones (which are NOT violated in the true solution) can push final  point
6195   too far in the inner area of the feasible set.
6196 
6197   For example, if you have constraint x0>=0 and true solution  x0=1,  then
6198   merely a presence of "x0>=0" will introduce a bias towards larger values
6199   of x0. Say, algorithm may stop at x0=1.5 instead of 1.0.
6200 
6201 * after you found good Rho, you may increase number of  outer  iterations.
6202   ItsCnt=10 is a good value. Subsequent outer iteration will refine values
6203   of  Lagrange  multipliers.  Constraints  which  were  violated  will  be
6204   enforced, inactive constraints will be dropped (corresponding multipliers
6205   will be decreased). Ideally, you  should  see  10-1000x  improvement  in
6206   constraint handling (constraint violation is reduced).
6207 
6208 * if  you  see  that  algorithm  converges  to  vicinity  of solution, but
6209   additional outer iterations do not refine solution,  it  may  mean  that
6210   algorithm is unstable - it wanders around true  solution,  but  can  not
6211   approach it. Sometimes algorithm may be stabilized by increasing Rho one
6212   more time, making it 5x or 10x larger.
6213 
6214 SCALING OF CONSTRAINTS [IMPORTANT]
6215 
6216 AUL optimizer scales   variables   according   to   scale   specified   by
6217 MinNLCSetScale() function, so it can handle  problems  with  badly  scaled
6218 variables (as long as we KNOW their scales).   However,  because  function
6219 being optimized is a mix  of  original  function and  constraint-dependent
6220 penalty  functions, it  is   important  to   rescale  both  variables  AND
6221 constraints.
6222 
6223 Say,  if  you  minimize f(x)=x^2 subject to 1000000*x>=0,  then  you  have
6224 constraint whose scale is different from that of target  function (another
6225 example is 0.000001*x>=0). It is also possible to have constraints   whose
6226 scales  are   misaligned:   1000000*x0>=0, 0.000001*x1<=0.   Inappropriate
6227 scaling may ruin convergence because minimizing x^2 subject to x>=0 is NOT
6228 same as minimizing it subject to 1000000*x>=0.
6229 
6230 Because we  know  coefficients  of  boundary/linear  constraints,  we  can
6231 automatically rescale and normalize them. However,  there  is  no  way  to
6232 automatically rescale nonlinear constraints Gi(x) and  Hi(x)  -  they  are
6233 black boxes.
6234 
6235 It means that YOU are the one who is  responsible  for  correct scaling of
6236 nonlinear constraints  Gi(x)  and  Hi(x).  We  recommend  you  to  rescale
6237 nonlinear constraints in such way that I-th component of dG/dX (or  dH/dx)
6238 has magnitude approximately equal to 1/S[i] (where S  is  a  scale  set by
6239 MinNLCSetScale() function).
6240 
6241 WHAT IF IT DOES NOT CONVERGE?
6242 
6243 It is possible that AUL algorithm fails to converge to precise  values  of
6244 Lagrange multipliers. It stops somewhere around true solution, but candidate
6245 point is still too far from solution, and some constraints  are  violated.
6246 Such kind of failure is specific for Lagrangian algorithms -  technically,
6247 they stop at some point, but this point is not constrained solution.
6248 
6249 There are exist several reasons why algorithm may fail to converge:
6250 a) too loose stopping criteria for inner iteration
6251 b) degenerate, redundant constraints
6252 c) target function has unconstrained extremum exactly at the  boundary  of
6253    some constraint
6254 d) numerical noise in the target function
6255 
6256 In all these cases algorithm is unstable - each outer iteration results in
6257 large and almost random step which improves handling of some  constraints,
6258 but violates other ones (ideally  outer iterations should form a  sequence
6259 of progressively decreasing steps towards solution).
6260 
6261 First reason possible is  that  too  loose  stopping  criteria  for  inner
6262 iteration were specified. Augmented Lagrangian algorithm solves a sequence
6263 of intermediate problems, and requries each of them to be solved with high
6264 precision. Insufficient precision results in incorrect update of  Lagrange
6265 multipliers.
6266 
6267 Another reason is that you may have specified degenerate constraints: say,
6268 some constraint was repeated twice. In most cases AUL algorithm gracefully
6269 handles such situations, but sometimes it may spend too much time figuring
6270 out subtle degeneracies in constraint matrix.
6271 
6272 Third reason is tricky and hard to diagnose. Consider situation  when  you
6273 minimize  f=x^2  subject to constraint x>=0.  Unconstrained   extremum  is
6274 located  exactly  at  the  boundary  of  constrained  area.  In  this case
6275 algorithm will tend to oscillate between negative  and  positive  x.  Each
6276 time it stops at x<0 it "reinforces" constraint x>=0, and each time it  is
6277 bounced to x>0 it "relaxes" constraint (and is  attracted  to  x<0).
6278 
6279 Such situation  sometimes  happens  in  problems  with  hidden  symetries.
6280 Algorithm  is  got  caught  in  a  loop with  Lagrange  multipliers  being
6281 continuously increased/decreased. Luckily, such loop forms after at  least
6282 three iterations, so this problem can be solved by  DECREASING  number  of
6283 outer iterations down to 1-2 and increasing  penalty  coefficient  Rho  as
6284 much as possible.
6285 
6286 Final reason is numerical noise. AUL algorithm is robust against  moderate
6287 noise (more robust than, say, active set methods),  but  large  noise  may
6288 destabilize algorithm.
6289 
6290   -- ALGLIB --
6291      Copyright 06.06.2014 by Bochkanov Sergey
6292 *************************************************************************/
6293 void minnlcsetalgoaul(const minnlcstate &state, const double rho, const ae_int_t itscnt, const xparams _xparams = alglib::xdefault);
6294 
6295 
6296 /*************************************************************************
6297 This   function  tells  MinNLC  optimizer  to  use  SLP (Successive Linear
6298 Programming) algorithm for  nonlinearly  constrained   optimization.  This
6299 algorithm  is  a  slight  modification  of  one  described  in  "A  Linear
6300 programming-based optimization algorithm for solving nonlinear programming
6301 problems" (2010) by Claus Still and Tapio Westerlund.
6302 
6303 Despite its name ("linear" = "first order method") this algorithm performs
6304 steps similar to that of conjugate gradients method;  internally  it  uses
6305 orthogonality/conjugacy requirement for subsequent steps  which  makes  it
6306 closer to second order methods in terms of convergence speed.
6307 
6308 Convergence is proved for the following case:
6309 * function and constraints are continuously differentiable (C1 class)
6310 * extended Mangasarian–Fromovitz constraint qualification  (EMFCQ)  holds;
6311   in the context of this algorithm EMFCQ  means  that  one  can,  for  any
6312   infeasible  point,  find  a  search  direction  such that the constraint
6313   infeasibilities are reduced.
6314 
6315 This algorithm has following nice properties:
6316 * no parameters to tune
6317 * no convexity requirements for target function or constraints
6318 * initial point can be infeasible
6319 * algorithm respects box constraints in all intermediate points  (it  does
6320   not even evaluate function outside of box constrained area)
6321 * once linear constraints are enforced, algorithm will not violate them
6322 * no such guarantees can be provided for nonlinear constraints,  but  once
6323   nonlinear constraints are enforced, algorithm will try  to  respect them
6324   as much as possible
6325 * numerical differentiation does not  violate  box  constraints  (although
6326   general linear and nonlinear ones can be violated during differentiation)
6327 
6328 However, following drawbacks can be noted:
6329 * algorithm performance decreased on problems with dense constraints
6330 * it has higher iteration cost than AUL - we have to solve an  LP  problem
6331   at each step.
6332 
6333 We recommend this algorithm as a first step; as soon as you make sure that
6334 it converges, you can try switching to AUL which is sometimes much faster.
6335 
6336 INPUT PARAMETERS:
6337     State   -   structure which stores algorithm state
6338 
6339   -- ALGLIB --
6340      Copyright 02.04.2018 by Bochkanov Sergey
6341 *************************************************************************/
6342 void minnlcsetalgoslp(const minnlcstate &state, const xparams _xparams = alglib::xdefault);
6343 
6344 
6345 /*************************************************************************
6346 This function turns on/off reporting.
6347 
6348 INPUT PARAMETERS:
6349     State   -   structure which stores algorithm state
6350     NeedXRep-   whether iteration reports are needed or not
6351 
6352 If NeedXRep is True, algorithm will call rep() callback function if  it is
6353 provided to MinNLCOptimize().
6354 
6355 NOTE: algorithm passes two parameters to rep() callback  -  current  point
6356       and penalized function value at current point. Important -  function
6357       value which is returned is NOT function being minimized. It  is  sum
6358       of the value of the function being minimized - and penalty term.
6359 
6360   -- ALGLIB --
6361      Copyright 28.11.2010 by Bochkanov Sergey
6362 *************************************************************************/
6363 void minnlcsetxrep(const minnlcstate &state, const bool needxrep, const xparams _xparams = alglib::xdefault);
6364 
6365 
6366 /*************************************************************************
6367 This function provides reverse communication interface
6368 Reverse communication interface is not documented or recommended to use.
6369 See below for functions which provide better documented API
6370 *************************************************************************/
6371 bool minnlciteration(const minnlcstate &state, const xparams _xparams = alglib::xdefault);
6372 
6373 
6374 /*************************************************************************
6375 This family of functions is used to launcn iterations of nonlinear optimizer
6376 
6377 These functions accept following parameters:
6378     state   -   algorithm state
6379     fvec    -   callback which calculates function vector fi[]
6380                 at given point x
6381     jac     -   callback which calculates function vector fi[]
6382                 and Jacobian jac at given point x
6383     rep     -   optional callback which is called after each iteration
6384                 can be NULL
6385     ptr     -   optional pointer which is passed to func/grad/hess/jac/rep
6386                 can be NULL
6387 
6388 
6389 NOTES:
6390 
6391 1. This function has two different implementations: one which  uses  exact
6392    (analytical) user-supplied Jacobian, and one which uses  only  function
6393    vector and numerically  differentiates  function  in  order  to  obtain
6394    gradient.
6395 
6396    Depending  on  the  specific  function  used to create optimizer object
6397    you should choose appropriate variant of MinNLCOptimize() -  one  which
6398    accepts function AND Jacobian or one which accepts ONLY function.
6399 
6400    Be careful to choose variant of MinNLCOptimize()  which  corresponds to
6401    your optimization scheme! Table below lists different  combinations  of
6402    callback (function/gradient) passed to MinNLCOptimize()   and  specific
6403    function used to create optimizer.
6404 
6405 
6406                      |         USER PASSED TO MinNLCOptimize()
6407    CREATED WITH      |  function only   |  function and gradient
6408    ------------------------------------------------------------
6409    MinNLCCreateF()   |     works               FAILS
6410    MinNLCCreate()    |     FAILS               works
6411 
6412    Here "FAILS" denotes inappropriate combinations  of  optimizer creation
6413    function  and  MinNLCOptimize()  version.   Attemps   to    use    such
6414    combination will lead to exception. Either  you  did  not pass gradient
6415    when it WAS needed or you passed gradient when it was NOT needed.
6416 
6417   -- ALGLIB --
6418      Copyright 06.06.2014 by Bochkanov Sergey
6419 
6420 *************************************************************************/
6421 void minnlcoptimize(minnlcstate &state,
6422     void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
6423     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6424     void *ptr = NULL,
6425     const xparams _xparams = alglib::xdefault);
6426 void minnlcoptimize(minnlcstate &state,
6427     void  (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
6428     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6429     void *ptr = NULL,
6430     const xparams _xparams = alglib::xdefault);
6431 
6432 
6433 /*************************************************************************
6434 This  function  activates/deactivates verification  of  the  user-supplied
6435 analytic gradient/Jacobian.
6436 
6437 Upon  activation  of  this  option  OptGuard  integrity  checker  performs
6438 numerical differentiation of your target  function  (constraints)  at  the
6439 initial point (note: future versions may also perform check  at  the final
6440 point) and compares numerical gradient/Jacobian with analytic one provided
6441 by you.
6442 
6443 If difference is too large, an error flag is set and optimization  session
6444 continues. After optimization session is over, you can retrieve the report
6445 which stores both gradients/Jacobians, and specific components highlighted
6446 as suspicious by the OptGuard.
6447 
6448 The primary OptGuard report can be retrieved with minnlcoptguardresults().
6449 
6450 IMPORTANT: gradient check is a high-overhead option which  will  cost  you
6451            about 3*N additional function evaluations. In many cases it may
6452            cost as much as the rest of the optimization session.
6453 
6454            YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO
6455            CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY.
6456 
6457 NOTE: unlike previous incarnation of the gradient checking code,  OptGuard
6458       does NOT interrupt optimization even if it discovers bad gradient.
6459 
6460 INPUT PARAMETERS:
6461     State       -   structure used to store algorithm state
6462     TestStep    -   verification step used for numerical differentiation:
6463                     * TestStep=0 turns verification off
6464                     * TestStep>0 activates verification
6465                     You should carefully choose TestStep. Value  which  is
6466                     too large (so large that  function  behavior  is  non-
6467                     cubic at this scale) will lead  to  false  alarms. Too
6468                     short step will result in rounding  errors  dominating
6469                     numerical derivative.
6470 
6471                     You may use different step for different parameters by
6472                     means of setting scale with minnlcsetscale().
6473 
6474 === EXPLANATION ==========================================================
6475 
6476 In order to verify gradient algorithm performs following steps:
6477   * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
6478     where X[i] is i-th component of the initial point and S[i] is a  scale
6479     of i-th parameter
6480   * F(X) is evaluated at these trial points
6481   * we perform one more evaluation in the middle point of the interval
6482   * we  build  cubic  model using function values and derivatives at trial
6483     points and we compare its prediction with actual value in  the  middle
6484     point
6485 
6486   -- ALGLIB --
6487      Copyright 15.06.2014 by Bochkanov Sergey
6488 *************************************************************************/
6489 void minnlcoptguardgradient(const minnlcstate &state, const double teststep, const xparams _xparams = alglib::xdefault);
6490 
6491 
6492 /*************************************************************************
6493 This  function  activates/deactivates nonsmoothness monitoring  option  of
6494 the  OptGuard  integrity  checker. Smoothness  monitor  silently  observes
6495 solution process and tries to detect ill-posed problems, i.e. ones with:
6496 a) discontinuous target function (non-C0) and/or constraints
6497 b) nonsmooth     target function (non-C1) and/or constraints
6498 
6499 Smoothness monitoring does NOT interrupt optimization  even if it suspects
6500 that your problem is nonsmooth. It just sets corresponding  flags  in  the
6501 OptGuard report which can be retrieved after optimization is over.
6502 
6503 Smoothness monitoring is a moderate overhead option which often adds  less
6504 than 1% to the optimizer running time. Thus, you can use it even for large
6505 scale problems.
6506 
6507 NOTE: OptGuard does  NOT  guarantee  that  it  will  always  detect  C0/C1
6508       continuity violations.
6509 
6510       First, minor errors are hard to  catch - say, a 0.0001 difference in
6511       the model values at two sides of the gap may be due to discontinuity
6512       of the model - or simply because the model has changed.
6513 
6514       Second, C1-violations  are  especially  difficult  to  detect  in  a
6515       noninvasive way. The optimizer usually  performs  very  short  steps
6516       near the nonsmoothness, and differentiation  usually   introduces  a
6517       lot of numerical noise.  It  is  hard  to  tell  whether  some  tiny
6518       discontinuity in the slope is due to real nonsmoothness or just  due
6519       to numerical noise alone.
6520 
6521       Our top priority was to avoid false positives, so in some rare cases
6522       minor errors may went unnoticed (however, in most cases they can  be
6523       spotted with restart from different initial point).
6524 
6525 INPUT PARAMETERS:
6526     state   -   algorithm state
6527     level   -   monitoring level:
6528                 * 0 - monitoring is disabled
6529                 * 1 - noninvasive low-overhead monitoring; function values
6530                       and/or gradients are recorded, but OptGuard does not
6531                       try to perform additional evaluations  in  order  to
6532                       get more information about suspicious locations.
6533 
6534 === EXPLANATION ==========================================================
6535 
6536 One major source of headache during optimization  is  the  possibility  of
6537 the coding errors in the target function/constraints (or their gradients).
6538 Such  errors   most   often   manifest   themselves  as  discontinuity  or
6539 nonsmoothness of the target/constraints.
6540 
6541 Another frequent situation is when you try to optimize something involving
6542 lots of min() and max() operations, i.e. nonsmooth target. Although not  a
6543 coding error, it is nonsmoothness anyway - and smooth  optimizers  usually
6544 stop right after encountering nonsmoothness, well before reaching solution.
6545 
6546 OptGuard integrity checker helps you to catch such situations: it monitors
6547 function values/gradients being passed  to  the  optimizer  and  tries  to
6548 errors. Upon discovering suspicious pair of points it  raises  appropriate
6549 flag (and allows you to continue optimization). When optimization is done,
6550 you can study OptGuard result.
6551 
6552   -- ALGLIB --
6553      Copyright 21.11.2018 by Bochkanov Sergey
6554 *************************************************************************/
6555 void minnlcoptguardsmoothness(const minnlcstate &state, const ae_int_t level, const xparams _xparams = alglib::xdefault);
6556 void minnlcoptguardsmoothness(const minnlcstate &state, const xparams _xparams = alglib::xdefault);
6557 
6558 
6559 /*************************************************************************
6560 Results of OptGuard integrity check, should be called  after  optimization
6561 session is over.
6562 
6563 === PRIMARY REPORT =======================================================
6564 
6565 OptGuard performs several checks which are intended to catch common errors
6566 in the implementation of nonlinear function/gradient:
6567 * incorrect analytic gradient
6568 * discontinuous (non-C0) target functions (constraints)
6569 * nonsmooth     (non-C1) target functions (constraints)
6570 
6571 Each of these checks is activated with appropriate function:
6572 * minnlcoptguardgradient() for gradient verification
6573 * minnlcoptguardsmoothness() for C0/C1 checks
6574 
6575 Following flags are set when these errors are suspected:
6576 * rep.badgradsuspected, and additionally:
6577   * rep.badgradfidx for specific function (Jacobian row) suspected
6578   * rep.badgradvidx for specific variable (Jacobian column) suspected
6579   * rep.badgradxbase, a point where gradient/Jacobian is tested
6580   * rep.badgraduser, user-provided gradient/Jacobian
6581   * rep.badgradnum, reference gradient/Jacobian obtained via numerical
6582     differentiation
6583 * rep.nonc0suspected, and additionally:
6584   * rep.nonc0fidx - an index of specific function violating C0 continuity
6585 * rep.nonc1suspected, and additionally
6586   * rep.nonc1fidx - an index of specific function violating C1 continuity
6587 Here function index 0 means  target function, index 1  or  higher  denotes
6588 nonlinear constraints.
6589 
6590 === ADDITIONAL REPORTS/LOGS ==============================================
6591 
6592 Several different tests are performed to catch C0/C1 errors, you can  find
6593 out specific test signaled error by looking to:
6594 * rep.nonc0test0positive, for non-C0 test #0
6595 * rep.nonc1test0positive, for non-C1 test #0
6596 * rep.nonc1test1positive, for non-C1 test #1
6597 
6598 Additional information (including line search logs)  can  be  obtained  by
6599 means of:
6600 * minnlcoptguardnonc1test0results()
6601 * minnlcoptguardnonc1test1results()
6602 which return detailed error reports, specific points where discontinuities
6603 were found, and so on.
6604 
6605 ==========================================================================
6606 
6607 INPUT PARAMETERS:
6608     state   -   algorithm state
6609 
6610 OUTPUT PARAMETERS:
6611     rep     -   generic OptGuard report;  more  detailed  reports  can  be
6612                 retrieved with other functions.
6613 
6614 NOTE: false negatives (nonsmooth problems are not identified as  nonsmooth
6615       ones) are possible although unlikely.
6616 
6617       The reason  is  that  you  need  to  make several evaluations around
6618       nonsmoothness  in  order  to  accumulate  enough  information  about
6619       function curvature. Say, if you start right from the nonsmooth point,
6620       optimizer simply won't get enough data to understand what  is  going
6621       wrong before it terminates due to abrupt changes in the  derivative.
6622       It is also  possible  that  "unlucky"  step  will  move  us  to  the
6623       termination too quickly.
6624 
6625       Our current approach is to have less than 0.1%  false  negatives  in
6626       our test examples  (measured  with  multiple  restarts  from  random
6627       points), and to have exactly 0% false positives.
6628 
6629   -- ALGLIB --
6630      Copyright 21.11.2018 by Bochkanov Sergey
6631 *************************************************************************/
6632 void minnlcoptguardresults(const minnlcstate &state, optguardreport &rep, const xparams _xparams = alglib::xdefault);
6633 
6634 
6635 /*************************************************************************
6636 Detailed results of the OptGuard integrity check for nonsmoothness test #0
6637 
6638 Nonsmoothness (non-C1) test #0 studies  function  values  (not  gradient!)
6639 obtained during line searches and monitors  behavior  of  the  directional
6640 derivative estimate.
6641 
6642 This test is less powerful than test #1, but it does  not  depend  on  the
6643 gradient values and thus it is more robust against artifacts introduced by
6644 numerical differentiation.
6645 
6646 Two reports are returned:
6647 * a "strongest" one, corresponding  to  line   search  which  had  highest
6648   value of the nonsmoothness indicator
6649 * a "longest" one, corresponding to line search which  had  more  function
6650   evaluations, and thus is more detailed
6651 
6652 In both cases following fields are returned:
6653 
6654 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
6655   did not notice anything (in the latter cases fields below are empty).
6656 * fidx - is an index of the function (0 for  target  function, 1 or higher
6657   for nonlinear constraints) which is suspected of being "non-C1"
6658 * x0[], d[] - arrays of length N which store initial point  and  direction
6659   for line search (d[] can be normalized, but does not have to)
6660 * stp[], f[] - arrays of length CNT which store step lengths and  function
6661   values at these points; f[i] is evaluated in x0+stp[i]*d.
6662 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
6663   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
6664   with  most  likely  position  of  the  violation  between  stpidxa+1 and
6665   stpidxa+2.
6666 
6667 ==========================================================================
6668 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
6669 =                   see where C1 continuity is violated.
6670 ==========================================================================
6671 
6672 INPUT PARAMETERS:
6673     state   -   algorithm state
6674 
6675 OUTPUT PARAMETERS:
6676     strrep  -   C1 test #0 "strong" report
6677     lngrep  -   C1 test #0 "long" report
6678 
6679   -- ALGLIB --
6680      Copyright 21.11.2018 by Bochkanov Sergey
6681 *************************************************************************/
6682 void minnlcoptguardnonc1test0results(const minnlcstate &state, optguardnonc1test0report &strrep, optguardnonc1test0report &lngrep, const xparams _xparams = alglib::xdefault);
6683 
6684 
6685 /*************************************************************************
6686 Detailed results of the OptGuard integrity check for nonsmoothness test #1
6687 
6688 Nonsmoothness (non-C1)  test  #1  studies  individual  components  of  the
6689 gradient computed during line search.
6690 
6691 When precise analytic gradient is provided this test is more powerful than
6692 test #0  which  works  with  function  values  and  ignores  user-provided
6693 gradient.  However,  test  #0  becomes  more   powerful   when   numerical
6694 differentiation is employed (in such cases test #1 detects  higher  levels
6695 of numerical noise and becomes too conservative).
6696 
6697 This test also tells specific components of the gradient which violate  C1
6698 continuity, which makes it more informative than #0, which just tells that
6699 continuity is violated.
6700 
6701 Two reports are returned:
6702 * a "strongest" one, corresponding  to  line   search  which  had  highest
6703   value of the nonsmoothness indicator
6704 * a "longest" one, corresponding to line search which  had  more  function
6705   evaluations, and thus is more detailed
6706 
6707 In both cases following fields are returned:
6708 
6709 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
6710   did not notice anything (in the latter cases fields below are empty).
6711 * fidx - is an index of the function (0 for  target  function, 1 or higher
6712   for nonlinear constraints) which is suspected of being "non-C1"
6713 * vidx - is an index of the variable in [0,N) with nonsmooth derivative
6714 * x0[], d[] - arrays of length N which store initial point  and  direction
6715   for line search (d[] can be normalized, but does not have to)
6716 * stp[], g[] - arrays of length CNT which store step lengths and  gradient
6717   values at these points; g[i] is evaluated in  x0+stp[i]*d  and  contains
6718   vidx-th component of the gradient.
6719 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
6720   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
6721   with  most  likely  position  of  the  violation  between  stpidxa+1 and
6722   stpidxa+2.
6723 
6724 ==========================================================================
6725 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
6726 =                   see where C1 continuity is violated.
6727 ==========================================================================
6728 
6729 INPUT PARAMETERS:
6730     state   -   algorithm state
6731 
6732 OUTPUT PARAMETERS:
6733     strrep  -   C1 test #1 "strong" report
6734     lngrep  -   C1 test #1 "long" report
6735 
6736   -- ALGLIB --
6737      Copyright 21.11.2018 by Bochkanov Sergey
6738 *************************************************************************/
6739 void minnlcoptguardnonc1test1results(const minnlcstate &state, optguardnonc1test1report &strrep, optguardnonc1test1report &lngrep, const xparams _xparams = alglib::xdefault);
6740 
6741 
6742 /*************************************************************************
6743 MinNLC results:  the  solution  found,  completion  codes  and  additional
6744 information.
6745 
6746 If you activated OptGuard integrity checking functionality and want to get
6747 OptGuard report, it can be retrieved with:
6748 * minnlcoptguardresults() - for a primary report about (a) suspected C0/C1
6749   continuity violations and (b) errors in the analytic gradient.
6750 * minnlcoptguardnonc1test0results() - for C1 continuity violation test #0,
6751   detailed line search log
6752 * minnlcoptguardnonc1test1results() - for C1 continuity violation test #1,
6753   detailed line search log
6754 
6755 INPUT PARAMETERS:
6756     State   -   algorithm state
6757 
6758 OUTPUT PARAMETERS:
6759     X       -   array[0..N-1], solution
6760     Rep     -   optimization report, contains information about completion
6761                 code, constraint violation at the solution and so on.
6762 
6763                 You   should   check   rep.terminationtype  in  order   to
6764                 distinguish successful termination from unsuccessful one:
6765 
6766                 === FAILURE CODES ===
6767                 * -8    internal  integrity control  detected  infinite or
6768                         NAN   values    in   function/gradient.   Abnormal
6769                         termination signalled.
6770                 * -3    box  constraints are infeasible.
6771                         Note: infeasibility of  non-box  constraints  does
6772                               NOT trigger emergency completion;  you  have
6773                               to examine rep.bcerr/rep.lcerr/rep.nlcerr to
6774                               detect possibly inconsistent constraints.
6775 
6776                 === SUCCESS CODES ===
6777                 *  2   scaled step is no more than EpsX.
6778                 *  5   MaxIts steps were taken.
6779                 *  8   user   requested    algorithm    termination    via
6780                        minnlcrequesttermination(), last accepted point  is
6781                        returned.
6782 
6783                 More information about fields of this  structure  can  be
6784                 found in the comments on minnlcreport datatype.
6785 
6786   -- ALGLIB --
6787      Copyright 06.06.2014 by Bochkanov Sergey
6788 *************************************************************************/
6789 void minnlcresults(const minnlcstate &state, real_1d_array &x, minnlcreport &rep, const xparams _xparams = alglib::xdefault);
6790 
6791 
6792 /*************************************************************************
6793 NLC results
6794 
6795 Buffered implementation of MinNLCResults() which uses pre-allocated buffer
6796 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
6797 intended to be used in the inner cycles of performance critical algorithms
6798 where array reallocation penalty is too large to be ignored.
6799 
6800   -- ALGLIB --
6801      Copyright 28.11.2010 by Bochkanov Sergey
6802 *************************************************************************/
6803 void minnlcresultsbuf(const minnlcstate &state, real_1d_array &x, minnlcreport &rep, const xparams _xparams = alglib::xdefault);
6804 
6805 
6806 /*************************************************************************
6807 This subroutine submits request for termination of running  optimizer.  It
6808 should be called from user-supplied callback when user decides that it  is
6809 time to "smoothly" terminate optimization process.  As  result,  optimizer
6810 stops at point which was "current accepted" when termination  request  was
6811 submitted and returns error code 8 (successful termination).
6812 
6813 INPUT PARAMETERS:
6814     State   -   optimizer structure
6815 
6816 NOTE: after  request  for  termination  optimizer  may   perform   several
6817       additional calls to user-supplied callbacks. It does  NOT  guarantee
6818       to stop immediately - it just guarantees that these additional calls
6819       will be discarded later.
6820 
6821 NOTE: calling this function on optimizer which is NOT running will have no
6822       effect.
6823 
6824 NOTE: multiple calls to this function are possible. First call is counted,
6825       subsequent calls are silently ignored.
6826 
6827   -- ALGLIB --
6828      Copyright 08.10.2014 by Bochkanov Sergey
6829 *************************************************************************/
6830 void minnlcrequesttermination(const minnlcstate &state, const xparams _xparams = alglib::xdefault);
6831 
6832 
6833 /*************************************************************************
6834 This subroutine restarts algorithm from new point.
6835 All optimization parameters (including constraints) are left unchanged.
6836 
6837 This  function  allows  to  solve multiple  optimization  problems  (which
6838 must have  same number of dimensions) without object reallocation penalty.
6839 
6840 INPUT PARAMETERS:
6841     State   -   structure previously allocated with MinNLCCreate call.
6842     X       -   new starting point.
6843 
6844   -- ALGLIB --
6845      Copyright 28.11.2010 by Bochkanov Sergey
6846 *************************************************************************/
6847 void minnlcrestartfrom(const minnlcstate &state, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
6848 #endif
6849 
6850 #if defined(AE_COMPILE_MINBC) || !defined(AE_PARTIAL_BUILD)
6851 /*************************************************************************
6852                      BOX CONSTRAINED OPTIMIZATION
6853           WITH FAST ACTIVATION OF MULTIPLE BOX CONSTRAINTS
6854 
6855 DESCRIPTION:
6856 The  subroutine  minimizes  function   F(x) of N arguments subject  to box
6857 constraints (with some of box constraints actually being equality ones).
6858 
6859 This optimizer uses algorithm similar to that of MinBLEIC (optimizer  with
6860 general linear constraints), but presence of box-only  constraints  allows
6861 us to use faster constraint activation strategies. On large-scale problems,
6862 with multiple constraints active at the solution, this  optimizer  can  be
6863 several times faster than BLEIC.
6864 
6865 REQUIREMENTS:
6866 * user must provide function value and gradient
6867 * starting point X0 must be feasible or
6868   not too far away from the feasible set
6869 * grad(f) must be Lipschitz continuous on a level set:
6870   L = { x : f(x)<=f(x0) }
6871 * function must be defined everywhere on the feasible set F
6872 
6873 USAGE:
6874 
6875 Constrained optimization if far more complex than the unconstrained one.
6876 Here we give very brief outline of the BC optimizer. We strongly recommend
6877 you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide
6878 on optimization, which is available at http://www.alglib.net/optimization/
6879 
6880 1. User initializes algorithm state with MinBCCreate() call
6881 
6882 2. USer adds box constraints by calling MinBCSetBC() function.
6883 
6884 3. User sets stopping conditions with MinBCSetCond().
6885 
6886 4. User calls MinBCOptimize() function which takes algorithm  state and
6887    pointer (delegate, etc.) to callback function which calculates F/G.
6888 
6889 5. User calls MinBCResults() to get solution
6890 
6891 6. Optionally user may call MinBCRestartFrom() to solve another problem
6892    with same N but another starting point.
6893    MinBCRestartFrom() allows to reuse already initialized structure.
6894 
6895 
6896 INPUT PARAMETERS:
6897     N       -   problem dimension, N>0:
6898                 * if given, only leading N elements of X are used
6899                 * if not given, automatically determined from size ofX
6900     X       -   starting point, array[N]:
6901                 * it is better to set X to a feasible point
6902                 * but X can be infeasible, in which case algorithm will try
6903                   to find feasible point first, using X as initial
6904                   approximation.
6905 
6906 OUTPUT PARAMETERS:
6907     State   -   structure stores algorithm state
6908 
6909   -- ALGLIB --
6910      Copyright 28.11.2010 by Bochkanov Sergey
6911 *************************************************************************/
6912 void minbccreate(const ae_int_t n, const real_1d_array &x, minbcstate &state, const xparams _xparams = alglib::xdefault);
6913 void minbccreate(const real_1d_array &x, minbcstate &state, const xparams _xparams = alglib::xdefault);
6914 
6915 
6916 /*************************************************************************
6917 The subroutine is finite difference variant of MinBCCreate().  It  uses
6918 finite differences in order to differentiate target function.
6919 
6920 Description below contains information which is specific to  this function
6921 only. We recommend to read comments on MinBCCreate() in  order  to  get
6922 more information about creation of BC optimizer.
6923 
6924 INPUT PARAMETERS:
6925     N       -   problem dimension, N>0:
6926                 * if given, only leading N elements of X are used
6927                 * if not given, automatically determined from size of X
6928     X       -   starting point, array[0..N-1].
6929     DiffStep-   differentiation step, >0
6930 
6931 OUTPUT PARAMETERS:
6932     State   -   structure which stores algorithm state
6933 
6934 NOTES:
6935 1. algorithm uses 4-point central formula for differentiation.
6936 2. differentiation step along I-th axis is equal to DiffStep*S[I] where
6937    S[] is scaling vector which can be set by MinBCSetScale() call.
6938 3. we recommend you to use moderate values of  differentiation  step.  Too
6939    large step will result in too large truncation  errors, while too small
6940    step will result in too large numerical  errors.  1.0E-6  can  be  good
6941    value to start with.
6942 4. Numerical  differentiation  is   very   inefficient  -   one   gradient
6943    calculation needs 4*N function evaluations. This function will work for
6944    any N - either small (1...10), moderate (10...100) or  large  (100...).
6945    However, performance penalty will be too severe for any N's except  for
6946    small ones.
6947    We should also say that code which relies on numerical  differentiation
6948    is  less  robust and precise. CG needs exact gradient values. Imprecise
6949    gradient may slow  down  convergence, especially  on  highly  nonlinear
6950    problems.
6951    Thus  we  recommend to use this function for fast prototyping on small-
6952    dimensional problems only, and to implement analytical gradient as soon
6953    as possible.
6954 
6955   -- ALGLIB --
6956      Copyright 16.05.2011 by Bochkanov Sergey
6957 *************************************************************************/
6958 void minbccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minbcstate &state, const xparams _xparams = alglib::xdefault);
6959 void minbccreatef(const real_1d_array &x, const double diffstep, minbcstate &state, const xparams _xparams = alglib::xdefault);
6960 
6961 
6962 /*************************************************************************
6963 This function sets boundary constraints for BC optimizer.
6964 
6965 Boundary constraints are inactive by default (after initial creation).
6966 They are preserved after algorithm restart with MinBCRestartFrom().
6967 
6968 INPUT PARAMETERS:
6969     State   -   structure stores algorithm state
6970     BndL    -   lower bounds, array[N].
6971                 If some (all) variables are unbounded, you may specify
6972                 very small number or -INF.
6973     BndU    -   upper bounds, array[N].
6974                 If some (all) variables are unbounded, you may specify
6975                 very large number or +INF.
6976 
6977 NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
6978 variable will be "frozen" at X[i]=BndL[i]=BndU[i].
6979 
6980 NOTE 2: this solver has following useful properties:
6981 * bound constraints are always satisfied exactly
6982 * function is evaluated only INSIDE area specified by  bound  constraints,
6983   even  when  numerical  differentiation is used (algorithm adjusts  nodes
6984   according to boundary constraints)
6985 
6986   -- ALGLIB --
6987      Copyright 28.11.2010 by Bochkanov Sergey
6988 *************************************************************************/
6989 void minbcsetbc(const minbcstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams = alglib::xdefault);
6990 
6991 
6992 /*************************************************************************
6993 This function sets stopping conditions for the optimizer.
6994 
6995 INPUT PARAMETERS:
6996     State   -   structure which stores algorithm state
6997     EpsG    -   >=0
6998                 The  subroutine  finishes  its  work   if   the  condition
6999                 |v|<EpsG is satisfied, where:
7000                 * |.| means Euclidian norm
7001                 * v - scaled gradient vector, v[i]=g[i]*s[i]
7002                 * g - gradient
7003                 * s - scaling coefficients set by MinBCSetScale()
7004     EpsF    -   >=0
7005                 The  subroutine  finishes  its work if on k+1-th iteration
7006                 the  condition  |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
7007                 is satisfied.
7008     EpsX    -   >=0
7009                 The subroutine finishes its work if  on  k+1-th  iteration
7010                 the condition |v|<=EpsX is fulfilled, where:
7011                 * |.| means Euclidian norm
7012                 * v - scaled step vector, v[i]=dx[i]/s[i]
7013                 * dx - step vector, dx=X(k+1)-X(k)
7014                 * s - scaling coefficients set by MinBCSetScale()
7015     MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
7016                 iterations is unlimited.
7017 
7018 Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
7019 to automatic stopping criterion selection.
7020 
7021 NOTE: when SetCond() called with non-zero MaxIts, BC solver may perform
7022       slightly more than MaxIts iterations. I.e., MaxIts  sets  non-strict
7023       limit on iterations count.
7024 
7025   -- ALGLIB --
7026      Copyright 28.11.2010 by Bochkanov Sergey
7027 *************************************************************************/
7028 void minbcsetcond(const minbcstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
7029 
7030 
7031 /*************************************************************************
7032 This function sets scaling coefficients for BC optimizer.
7033 
7034 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
7035 size and gradient are scaled before comparison with tolerances).  Scale of
7036 the I-th variable is a translation invariant measure of:
7037 a) "how large" the variable is
7038 b) how large the step should be to make significant changes in the function
7039 
7040 Scaling is also used by finite difference variant of the optimizer  - step
7041 along I-th axis is equal to DiffStep*S[I].
7042 
7043 In  most  optimizers  (and  in  the  BC  too)  scaling is NOT a form of
7044 preconditioning. It just  affects  stopping  conditions.  You  should  set
7045 preconditioner  by  separate  call  to  one  of  the  MinBCSetPrec...()
7046 functions.
7047 
7048 There is a special  preconditioning  mode, however,  which  uses   scaling
7049 coefficients to form diagonal preconditioning matrix. You  can  turn  this
7050 mode on, if you want.   But  you should understand that scaling is not the
7051 same thing as preconditioning - these are two different, although  related
7052 forms of tuning solver.
7053 
7054 INPUT PARAMETERS:
7055     State   -   structure stores algorithm state
7056     S       -   array[N], non-zero scaling coefficients
7057                 S[i] may be negative, sign doesn't matter.
7058 
7059   -- ALGLIB --
7060      Copyright 14.01.2011 by Bochkanov Sergey
7061 *************************************************************************/
7062 void minbcsetscale(const minbcstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
7063 
7064 
7065 /*************************************************************************
7066 Modification of the preconditioner: preconditioning is turned off.
7067 
7068 INPUT PARAMETERS:
7069     State   -   structure which stores algorithm state
7070 
7071   -- ALGLIB --
7072      Copyright 13.10.2010 by Bochkanov Sergey
7073 *************************************************************************/
7074 void minbcsetprecdefault(const minbcstate &state, const xparams _xparams = alglib::xdefault);
7075 
7076 
7077 /*************************************************************************
7078 Modification  of  the  preconditioner:  diagonal of approximate Hessian is
7079 used.
7080 
7081 INPUT PARAMETERS:
7082     State   -   structure which stores algorithm state
7083     D       -   diagonal of the approximate Hessian, array[0..N-1],
7084                 (if larger, only leading N elements are used).
7085 
7086 NOTE 1: D[i] should be positive. Exception will be thrown otherwise.
7087 
7088 NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
7089 
7090   -- ALGLIB --
7091      Copyright 13.10.2010 by Bochkanov Sergey
7092 *************************************************************************/
7093 void minbcsetprecdiag(const minbcstate &state, const real_1d_array &d, const xparams _xparams = alglib::xdefault);
7094 
7095 
7096 /*************************************************************************
7097 Modification of the preconditioner: scale-based diagonal preconditioning.
7098 
7099 This preconditioning mode can be useful when you  don't  have  approximate
7100 diagonal of Hessian, but you know that your  variables  are  badly  scaled
7101 (for  example,  one  variable is in [1,10], and another in [1000,100000]),
7102 and most part of the ill-conditioning comes from different scales of vars.
7103 
7104 In this case simple  scale-based  preconditioner,  with H[i] = 1/(s[i]^2),
7105 can greatly improve convergence.
7106 
7107 IMPRTANT: you should set scale of your variables  with  MinBCSetScale()
7108 call  (before  or after MinBCSetPrecScale() call). Without knowledge of
7109 the scale of your variables scale-based preconditioner will be  just  unit
7110 matrix.
7111 
7112 INPUT PARAMETERS:
7113     State   -   structure which stores algorithm state
7114 
7115   -- ALGLIB --
7116      Copyright 13.10.2010 by Bochkanov Sergey
7117 *************************************************************************/
7118 void minbcsetprecscale(const minbcstate &state, const xparams _xparams = alglib::xdefault);
7119 
7120 
7121 /*************************************************************************
7122 This function turns on/off reporting.
7123 
7124 INPUT PARAMETERS:
7125     State   -   structure which stores algorithm state
7126     NeedXRep-   whether iteration reports are needed or not
7127 
7128 If NeedXRep is True, algorithm will call rep() callback function if  it is
7129 provided to MinBCOptimize().
7130 
7131   -- ALGLIB --
7132      Copyright 28.11.2010 by Bochkanov Sergey
7133 *************************************************************************/
7134 void minbcsetxrep(const minbcstate &state, const bool needxrep, const xparams _xparams = alglib::xdefault);
7135 
7136 
7137 /*************************************************************************
7138 This function sets maximum step length
7139 
7140 INPUT PARAMETERS:
7141     State   -   structure which stores algorithm state
7142     StpMax  -   maximum step length, >=0. Set StpMax to 0.0,  if you don't
7143                 want to limit step length.
7144 
7145 Use this subroutine when you optimize target function which contains exp()
7146 or  other  fast  growing  functions,  and optimization algorithm makes too
7147 large  steps  which  lead   to overflow. This function allows us to reject
7148 steps  that  are  too  large  (and  therefore  expose  us  to the possible
7149 overflow) without actually calculating function value at the x+stp*d.
7150 
7151   -- ALGLIB --
7152      Copyright 02.04.2010 by Bochkanov Sergey
7153 *************************************************************************/
7154 void minbcsetstpmax(const minbcstate &state, const double stpmax, const xparams _xparams = alglib::xdefault);
7155 
7156 
7157 /*************************************************************************
7158 This function provides reverse communication interface
7159 Reverse communication interface is not documented or recommended to use.
7160 See below for functions which provide better documented API
7161 *************************************************************************/
7162 bool minbciteration(const minbcstate &state, const xparams _xparams = alglib::xdefault);
7163 
7164 
7165 /*************************************************************************
7166 This family of functions is used to launcn iterations of nonlinear optimizer
7167 
7168 These functions accept following parameters:
7169     state   -   algorithm state
7170     func    -   callback which calculates function (or merit function)
7171                 value func at given point x
7172     grad    -   callback which calculates function (or merit function)
7173                 value func and gradient grad at given point x
7174     rep     -   optional callback which is called after each iteration
7175                 can be NULL
7176     ptr     -   optional pointer which is passed to func/grad/hess/jac/rep
7177                 can be NULL
7178 
7179 NOTES:
7180 
7181 1. This function has two different implementations: one which  uses  exact
7182    (analytical) user-supplied gradient,  and one which uses function value
7183    only  and  numerically  differentiates  function  in  order  to  obtain
7184    gradient.
7185 
7186    Depending  on  the  specific  function  used to create optimizer object
7187    (either  MinBCCreate() for analytical gradient or  MinBCCreateF()
7188    for numerical differentiation) you should choose appropriate variant of
7189    MinBCOptimize() - one  which  accepts  function  AND gradient or one
7190    which accepts function ONLY.
7191 
7192    Be careful to choose variant of MinBCOptimize() which corresponds to
7193    your optimization scheme! Table below lists different  combinations  of
7194    callback (function/gradient) passed to MinBCOptimize()  and specific
7195    function used to create optimizer.
7196 
7197 
7198                      |         USER PASSED TO MinBCOptimize()
7199    CREATED WITH      |  function only   |  function and gradient
7200    ------------------------------------------------------------
7201    MinBCCreateF()    |     works               FAILS
7202    MinBCCreate()     |     FAILS               works
7203 
7204    Here "FAIL" denotes inappropriate combinations  of  optimizer  creation
7205    function  and  MinBCOptimize()  version.   Attemps   to   use   such
7206    combination (for  example,  to  create optimizer with MinBCCreateF()
7207    and  to  pass  gradient  information  to  MinCGOptimize()) will lead to
7208    exception being thrown. Either  you  did  not pass gradient when it WAS
7209    needed or you passed gradient when it was NOT needed.
7210 
7211   -- ALGLIB --
7212      Copyright 28.11.2010 by Bochkanov Sergey
7213 
7214 *************************************************************************/
7215 void minbcoptimize(minbcstate &state,
7216     void (*func)(const real_1d_array &x, double &func, void *ptr),
7217     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
7218     void *ptr = NULL,
7219     const xparams _xparams = alglib::xdefault);
7220 void minbcoptimize(minbcstate &state,
7221     void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
7222     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
7223     void *ptr = NULL,
7224     const xparams _xparams = alglib::xdefault);
7225 
7226 
7227 /*************************************************************************
7228 This  function  activates/deactivates verification  of  the  user-supplied
7229 analytic gradient.
7230 
7231 Upon  activation  of  this  option  OptGuard  integrity  checker  performs
7232 numerical differentiation of your target function  at  the  initial  point
7233 (note: future versions may also perform check  at  the  final  point)  and
7234 compares numerical gradient with analytic one provided by you.
7235 
7236 If difference is too large, an error flag is set and optimization  session
7237 continues. After optimization session is over, you can retrieve the report
7238 which  stores  both  gradients  and  specific  components  highlighted  as
7239 suspicious by the OptGuard.
7240 
7241 The primary OptGuard report can be retrieved with minbcoptguardresults().
7242 
7243 IMPORTANT: gradient check is a high-overhead option which  will  cost  you
7244            about 3*N additional function evaluations. In many cases it may
7245            cost as much as the rest of the optimization session.
7246 
7247            YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO
7248            CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY.
7249 
7250 NOTE: unlike previous incarnation of the gradient checking code,  OptGuard
7251       does NOT interrupt optimization even if it discovers bad gradient.
7252 
7253 INPUT PARAMETERS:
7254     State       -   structure used to store algorithm state
7255     TestStep    -   verification step used for numerical differentiation:
7256                     * TestStep=0 turns verification off
7257                     * TestStep>0 activates verification
7258                     You should carefully choose TestStep. Value  which  is
7259                     too large (so large that  function  behavior  is  non-
7260                     cubic at this scale) will lead  to  false  alarms. Too
7261                     short step will result in rounding  errors  dominating
7262                     numerical derivative.
7263 
7264                     You may use different step for different parameters by
7265                     means of setting scale with minbcsetscale().
7266 
7267 === EXPLANATION ==========================================================
7268 
7269 In order to verify gradient algorithm performs following steps:
7270   * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
7271     where X[i] is i-th component of the initial point and S[i] is a  scale
7272     of i-th parameter
7273   * F(X) is evaluated at these trial points
7274   * we perform one more evaluation in the middle point of the interval
7275   * we  build  cubic  model using function values and derivatives at trial
7276     points and we compare its prediction with actual value in  the  middle
7277     point
7278 
7279   -- ALGLIB --
7280      Copyright 15.06.2014 by Bochkanov Sergey
7281 *************************************************************************/
7282 void minbcoptguardgradient(const minbcstate &state, const double teststep, const xparams _xparams = alglib::xdefault);
7283 
7284 
7285 /*************************************************************************
7286 This  function  activates/deactivates nonsmoothness monitoring  option  of
7287 the  OptGuard  integrity  checker. Smoothness  monitor  silently  observes
7288 solution process and tries to detect ill-posed problems, i.e. ones with:
7289 a) discontinuous target function (non-C0)
7290 b) nonsmooth     target function (non-C1)
7291 
7292 Smoothness monitoring does NOT interrupt optimization  even if it suspects
7293 that your problem is nonsmooth. It just sets corresponding  flags  in  the
7294 OptGuard report which can be retrieved after optimization is over.
7295 
7296 Smoothness monitoring is a moderate overhead option which often adds  less
7297 than 1% to the optimizer running time. Thus, you can use it even for large
7298 scale problems.
7299 
7300 NOTE: OptGuard does  NOT  guarantee  that  it  will  always  detect  C0/C1
7301       continuity violations.
7302 
7303       First, minor errors are hard to  catch - say, a 0.0001 difference in
7304       the model values at two sides of the gap may be due to discontinuity
7305       of the model - or simply because the model has changed.
7306 
7307       Second, C1-violations  are  especially  difficult  to  detect  in  a
7308       noninvasive way. The optimizer usually  performs  very  short  steps
7309       near the nonsmoothness, and differentiation  usually   introduces  a
7310       lot of numerical noise.  It  is  hard  to  tell  whether  some  tiny
7311       discontinuity in the slope is due to real nonsmoothness or just  due
7312       to numerical noise alone.
7313 
7314       Our top priority was to avoid false positives, so in some rare cases
7315       minor errors may went unnoticed (however, in most cases they can  be
7316       spotted with restart from different initial point).
7317 
7318 INPUT PARAMETERS:
7319     state   -   algorithm state
7320     level   -   monitoring level:
7321                 * 0 - monitoring is disabled
7322                 * 1 - noninvasive low-overhead monitoring; function values
7323                       and/or gradients are recorded, but OptGuard does not
7324                       try to perform additional evaluations  in  order  to
7325                       get more information about suspicious locations.
7326 
7327 === EXPLANATION ==========================================================
7328 
7329 One major source of headache during optimization  is  the  possibility  of
7330 the coding errors in the target function/constraints (or their gradients).
7331 Such  errors   most   often   manifest   themselves  as  discontinuity  or
7332 nonsmoothness of the target/constraints.
7333 
7334 Another frequent situation is when you try to optimize something involving
7335 lots of min() and max() operations, i.e. nonsmooth target. Although not  a
7336 coding error, it is nonsmoothness anyway - and smooth  optimizers  usually
7337 stop right after encountering nonsmoothness, well before reaching solution.
7338 
7339 OptGuard integrity checker helps you to catch such situations: it monitors
7340 function values/gradients being passed  to  the  optimizer  and  tries  to
7341 errors. Upon discovering suspicious pair of points it  raises  appropriate
7342 flag (and allows you to continue optimization). When optimization is done,
7343 you can study OptGuard result.
7344 
7345   -- ALGLIB --
7346      Copyright 21.11.2018 by Bochkanov Sergey
7347 *************************************************************************/
7348 void minbcoptguardsmoothness(const minbcstate &state, const ae_int_t level, const xparams _xparams = alglib::xdefault);
7349 void minbcoptguardsmoothness(const minbcstate &state, const xparams _xparams = alglib::xdefault);
7350 
7351 
7352 /*************************************************************************
7353 Results of OptGuard integrity check, should be called  after  optimization
7354 session is over.
7355 
7356 === PRIMARY REPORT =======================================================
7357 
7358 OptGuard performs several checks which are intended to catch common errors
7359 in the implementation of nonlinear function/gradient:
7360 * incorrect analytic gradient
7361 * discontinuous (non-C0) target functions (constraints)
7362 * nonsmooth     (non-C1) target functions (constraints)
7363 
7364 Each of these checks is activated with appropriate function:
7365 * minbcoptguardgradient() for gradient verification
7366 * minbcoptguardsmoothness() for C0/C1 checks
7367 
7368 Following flags are set when these errors are suspected:
7369 * rep.badgradsuspected, and additionally:
7370   * rep.badgradvidx for specific variable (gradient element) suspected
7371   * rep.badgradxbase, a point where gradient is tested
7372   * rep.badgraduser, user-provided gradient  (stored  as  2D  matrix  with
7373     single row in order to make  report  structure  compatible  with  more
7374     complex optimizers like MinNLC or MinLM)
7375   * rep.badgradnum,   reference    gradient    obtained    via   numerical
7376     differentiation (stored as  2D matrix with single row in order to make
7377     report structure compatible with more complex optimizers  like  MinNLC
7378     or MinLM)
7379 * rep.nonc0suspected
7380 * rep.nonc1suspected
7381 
7382 === ADDITIONAL REPORTS/LOGS ==============================================
7383 
7384 Several different tests are performed to catch C0/C1 errors, you can  find
7385 out specific test signaled error by looking to:
7386 * rep.nonc0test0positive, for non-C0 test #0
7387 * rep.nonc1test0positive, for non-C1 test #0
7388 * rep.nonc1test1positive, for non-C1 test #1
7389 
7390 Additional information (including line search logs)  can  be  obtained  by
7391 means of:
7392 * minbcoptguardnonc1test0results()
7393 * minbcoptguardnonc1test1results()
7394 which return detailed error reports, specific points where discontinuities
7395 were found, and so on.
7396 
7397 ==========================================================================
7398 
7399 INPUT PARAMETERS:
7400     state   -   algorithm state
7401 
7402 OUTPUT PARAMETERS:
7403     rep     -   generic OptGuard report;  more  detailed  reports  can  be
7404                 retrieved with other functions.
7405 
7406 NOTE: false negatives (nonsmooth problems are not identified as  nonsmooth
7407       ones) are possible although unlikely.
7408 
7409       The reason  is  that  you  need  to  make several evaluations around
7410       nonsmoothness  in  order  to  accumulate  enough  information  about
7411       function curvature. Say, if you start right from the nonsmooth point,
7412       optimizer simply won't get enough data to understand what  is  going
7413       wrong before it terminates due to abrupt changes in the  derivative.
7414       It is also  possible  that  "unlucky"  step  will  move  us  to  the
7415       termination too quickly.
7416 
7417       Our current approach is to have less than 0.1%  false  negatives  in
7418       our test examples  (measured  with  multiple  restarts  from  random
7419       points), and to have exactly 0% false positives.
7420 
7421   -- ALGLIB --
7422      Copyright 21.11.2018 by Bochkanov Sergey
7423 *************************************************************************/
7424 void minbcoptguardresults(const minbcstate &state, optguardreport &rep, const xparams _xparams = alglib::xdefault);
7425 
7426 
7427 /*************************************************************************
7428 Detailed results of the OptGuard integrity check for nonsmoothness test #0
7429 
7430 Nonsmoothness (non-C1) test #0 studies  function  values  (not  gradient!)
7431 obtained during line searches and monitors  behavior  of  the  directional
7432 derivative estimate.
7433 
7434 This test is less powerful than test #1, but it does  not  depend  on  the
7435 gradient values and thus it is more robust against artifacts introduced by
7436 numerical differentiation.
7437 
7438 Two reports are returned:
7439 * a "strongest" one, corresponding  to  line   search  which  had  highest
7440   value of the nonsmoothness indicator
7441 * a "longest" one, corresponding to line search which  had  more  function
7442   evaluations, and thus is more detailed
7443 
7444 In both cases following fields are returned:
7445 
7446 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
7447   did not notice anything (in the latter cases fields below are empty).
7448 * x0[], d[] - arrays of length N which store initial point  and  direction
7449   for line search (d[] can be normalized, but does not have to)
7450 * stp[], f[] - arrays of length CNT which store step lengths and  function
7451   values at these points; f[i] is evaluated in x0+stp[i]*d.
7452 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
7453   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
7454   with  most  likely  position  of  the  violation  between  stpidxa+1 and
7455   stpidxa+2.
7456 
7457 ==========================================================================
7458 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
7459 =                   see where C1 continuity is violated.
7460 ==========================================================================
7461 
7462 INPUT PARAMETERS:
7463     state   -   algorithm state
7464 
7465 OUTPUT PARAMETERS:
7466     strrep  -   C1 test #0 "strong" report
7467     lngrep  -   C1 test #0 "long" report
7468 
7469   -- ALGLIB --
7470      Copyright 21.11.2018 by Bochkanov Sergey
7471 *************************************************************************/
7472 void minbcoptguardnonc1test0results(const minbcstate &state, optguardnonc1test0report &strrep, optguardnonc1test0report &lngrep, const xparams _xparams = alglib::xdefault);
7473 
7474 
7475 /*************************************************************************
7476 Detailed results of the OptGuard integrity check for nonsmoothness test #1
7477 
7478 Nonsmoothness (non-C1)  test  #1  studies  individual  components  of  the
7479 gradient computed during line search.
7480 
7481 When precise analytic gradient is provided this test is more powerful than
7482 test #0  which  works  with  function  values  and  ignores  user-provided
7483 gradient.  However,  test  #0  becomes  more   powerful   when   numerical
7484 differentiation is employed (in such cases test #1 detects  higher  levels
7485 of numerical noise and becomes too conservative).
7486 
7487 This test also tells specific components of the gradient which violate  C1
7488 continuity, which makes it more informative than #0, which just tells that
7489 continuity is violated.
7490 
7491 Two reports are returned:
7492 * a "strongest" one, corresponding  to  line   search  which  had  highest
7493   value of the nonsmoothness indicator
7494 * a "longest" one, corresponding to line search which  had  more  function
7495   evaluations, and thus is more detailed
7496 
7497 In both cases following fields are returned:
7498 
7499 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
7500   did not notice anything (in the latter cases fields below are empty).
7501 * vidx - is an index of the variable in [0,N) with nonsmooth derivative
7502 * x0[], d[] - arrays of length N which store initial point  and  direction
7503   for line search (d[] can be normalized, but does not have to)
7504 * stp[], g[] - arrays of length CNT which store step lengths and  gradient
7505   values at these points; g[i] is evaluated in  x0+stp[i]*d  and  contains
7506   vidx-th component of the gradient.
7507 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
7508   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
7509   with  most  likely  position  of  the  violation  between  stpidxa+1 and
7510   stpidxa+2.
7511 
7512 ==========================================================================
7513 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
7514 =                   see where C1 continuity is violated.
7515 ==========================================================================
7516 
7517 INPUT PARAMETERS:
7518     state   -   algorithm state
7519 
7520 OUTPUT PARAMETERS:
7521     strrep  -   C1 test #1 "strong" report
7522     lngrep  -   C1 test #1 "long" report
7523 
7524   -- ALGLIB --
7525      Copyright 21.11.2018 by Bochkanov Sergey
7526 *************************************************************************/
7527 void minbcoptguardnonc1test1results(const minbcstate &state, optguardnonc1test1report &strrep, optguardnonc1test1report &lngrep, const xparams _xparams = alglib::xdefault);
7528 
7529 
7530 /*************************************************************************
7531 BC results
7532 
7533 INPUT PARAMETERS:
7534     State   -   algorithm state
7535 
7536 OUTPUT PARAMETERS:
7537     X       -   array[0..N-1], solution
7538     Rep     -   optimization report. You should check Rep.TerminationType
7539                 in  order  to  distinguish  successful  termination  from
7540                 unsuccessful one:
7541                 * -8    internal integrity control  detected  infinite or
7542                         NAN   values   in   function/gradient.   Abnormal
7543                         termination signalled.
7544                 * -3   inconsistent constraints.
7545                 *  1   relative function improvement is no more than EpsF.
7546                 *  2   scaled step is no more than EpsX.
7547                 *  4   scaled gradient norm is no more than EpsG.
7548                 *  5   MaxIts steps was taken
7549                 *  8   terminated by user who called minbcrequesttermination().
7550                        X contains point which was "current accepted"  when
7551                        termination request was submitted.
7552                 More information about fields of this  structure  can  be
7553                 found in the comments on MinBCReport datatype.
7554 
7555   -- ALGLIB --
7556      Copyright 28.11.2010 by Bochkanov Sergey
7557 *************************************************************************/
7558 void minbcresults(const minbcstate &state, real_1d_array &x, minbcreport &rep, const xparams _xparams = alglib::xdefault);
7559 
7560 
7561 /*************************************************************************
7562 BC results
7563 
7564 Buffered implementation of MinBCResults() which uses pre-allocated buffer
7565 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
7566 intended to be used in the inner cycles of performance critical algorithms
7567 where array reallocation penalty is too large to be ignored.
7568 
7569   -- ALGLIB --
7570      Copyright 28.11.2010 by Bochkanov Sergey
7571 *************************************************************************/
7572 void minbcresultsbuf(const minbcstate &state, real_1d_array &x, minbcreport &rep, const xparams _xparams = alglib::xdefault);
7573 
7574 
7575 /*************************************************************************
7576 This subroutine restarts algorithm from new point.
7577 All optimization parameters (including constraints) are left unchanged.
7578 
7579 This  function  allows  to  solve multiple  optimization  problems  (which
7580 must have  same number of dimensions) without object reallocation penalty.
7581 
7582 INPUT PARAMETERS:
7583     State   -   structure previously allocated with MinBCCreate call.
7584     X       -   new starting point.
7585 
7586   -- ALGLIB --
7587      Copyright 28.11.2010 by Bochkanov Sergey
7588 *************************************************************************/
7589 void minbcrestartfrom(const minbcstate &state, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
7590 
7591 
7592 /*************************************************************************
7593 This subroutine submits request for termination of running  optimizer.  It
7594 should be called from user-supplied callback when user decides that it  is
7595 time to "smoothly" terminate optimization process.  As  result,  optimizer
7596 stops at point which was "current accepted" when termination  request  was
7597 submitted and returns error code 8 (successful termination).
7598 
7599 INPUT PARAMETERS:
7600     State   -   optimizer structure
7601 
7602 NOTE: after  request  for  termination  optimizer  may   perform   several
7603       additional calls to user-supplied callbacks. It does  NOT  guarantee
7604       to stop immediately - it just guarantees that these additional calls
7605       will be discarded later.
7606 
7607 NOTE: calling this function on optimizer which is NOT running will have no
7608       effect.
7609 
7610 NOTE: multiple calls to this function are possible. First call is counted,
7611       subsequent calls are silently ignored.
7612 
7613   -- ALGLIB --
7614      Copyright 08.10.2014 by Bochkanov Sergey
7615 *************************************************************************/
7616 void minbcrequesttermination(const minbcstate &state, const xparams _xparams = alglib::xdefault);
7617 #endif
7618 
7619 #if defined(AE_COMPILE_MINNS) || !defined(AE_PARTIAL_BUILD)
7620 /*************************************************************************
7621                   NONSMOOTH NONCONVEX OPTIMIZATION
7622             SUBJECT TO BOX/LINEAR/NONLINEAR-NONSMOOTH CONSTRAINTS
7623 
7624 DESCRIPTION:
7625 
7626 The  subroutine  minimizes  function   F(x)  of N arguments subject to any
7627 combination of:
7628 * bound constraints
7629 * linear inequality constraints
7630 * linear equality constraints
7631 * nonlinear equality constraints Gi(x)=0
7632 * nonlinear inequality constraints Hi(x)<=0
7633 
7634 IMPORTANT: see MinNSSetAlgoAGS for important  information  on  performance
7635            restrictions of AGS solver.
7636 
7637 REQUIREMENTS:
7638 * starting point X0 must be feasible or not too far away from the feasible
7639   set
7640 * F(), G(), H() are continuous, locally Lipschitz  and  continuously  (but
7641   not necessarily twice) differentiable in an open dense  subset  of  R^N.
7642   Functions F(), G() and H() may be nonsmooth and non-convex.
7643   Informally speaking, it means  that  functions  are  composed  of  large
7644   differentiable "patches" with nonsmoothness having  place  only  at  the
7645   boundaries between these "patches".
7646   Most real-life nonsmooth  functions  satisfy  these  requirements.  Say,
7647   anything which involves finite number of abs(), min() and max() is  very
7648   likely to pass the test.
7649   Say, it is possible to optimize anything of the following:
7650   * f=abs(x0)+2*abs(x1)
7651   * f=max(x0,x1)
7652   * f=sin(max(x0,x1)+abs(x2))
7653 * for nonlinearly constrained problems: F()  must  be  bounded from  below
7654   without nonlinear constraints (this requirement is due to the fact that,
7655   contrary to box and linear constraints, nonlinear ones  require  special
7656   handling).
7657 * user must provide function value and gradient for F(), H(), G()  at  all
7658   points where function/gradient can be calculated. If optimizer  requires
7659   value exactly at the boundary between "patches" (say, at x=0 for f=abs(x)),
7660   where gradient is not defined, user may resolve tie arbitrarily (in  our
7661   case - return +1 or -1 at its discretion).
7662 * NS solver supports numerical differentiation, i.e. it may  differentiate
7663   your function for you,  but  it  results  in  2N  increase  of  function
7664   evaluations. Not recommended unless you solve really small problems. See
7665   minnscreatef() for more information on this functionality.
7666 
7667 USAGE:
7668 
7669 1. User initializes algorithm state with MinNSCreate() call  and   chooses
7670    what NLC solver to use. There is some solver which is used by  default,
7671    with default settings, but you should NOT rely on  default  choice.  It
7672    may change in future releases of ALGLIB without notice, and no one  can
7673    guarantee that new solver will be  able  to  solve  your  problem  with
7674    default settings.
7675 
7676    From the other side, if you choose solver explicitly, you can be pretty
7677    sure that it will work with new ALGLIB releases.
7678 
7679    In the current release following solvers can be used:
7680    * AGS solver (activated with MinNSSetAlgoAGS() function)
7681 
7682 2. User adds boundary and/or linear and/or nonlinear constraints by  means
7683    of calling one of the following functions:
7684    a) MinNSSetBC() for boundary constraints
7685    b) MinNSSetLC() for linear constraints
7686    c) MinNSSetNLC() for nonlinear constraints
7687    You may combine (a), (b) and (c) in one optimization problem.
7688 
7689 3. User sets scale of the variables with MinNSSetScale() function. It   is
7690    VERY important to set  scale  of  the  variables,  because  nonlinearly
7691    constrained problems are hard to solve when variables are badly scaled.
7692 
7693 4. User sets stopping conditions with MinNSSetCond().
7694 
7695 5. Finally, user calls MinNSOptimize()  function  which  takes   algorithm
7696    state and pointer (delegate, etc) to callback function which calculates
7697    F/G/H.
7698 
7699 7. User calls MinNSResults() to get solution
7700 
7701 8. Optionally user may call MinNSRestartFrom() to solve   another  problem
7702    with same N but another starting point. MinNSRestartFrom()  allows   to
7703    reuse already initialized structure.
7704 
7705 
7706 INPUT PARAMETERS:
7707     N       -   problem dimension, N>0:
7708                 * if given, only leading N elements of X are used
7709                 * if not given, automatically determined from size of X
7710     X       -   starting point, array[N]:
7711                 * it is better to set X to a feasible point
7712                 * but X can be infeasible, in which case algorithm will try
7713                   to find feasible point first, using X as initial
7714                   approximation.
7715 
7716 OUTPUT PARAMETERS:
7717     State   -   structure stores algorithm state
7718 
7719 NOTE: minnscreatef() function may be used if  you  do  not  have  analytic
7720       gradient.   This   function  creates  solver  which  uses  numerical
7721       differentiation with user-specified step.
7722 
7723   -- ALGLIB --
7724      Copyright 18.05.2015 by Bochkanov Sergey
7725 *************************************************************************/
7726 void minnscreate(const ae_int_t n, const real_1d_array &x, minnsstate &state, const xparams _xparams = alglib::xdefault);
7727 void minnscreate(const real_1d_array &x, minnsstate &state, const xparams _xparams = alglib::xdefault);
7728 
7729 
7730 /*************************************************************************
7731 Version of minnscreatef() which uses numerical differentiation. I.e.,  you
7732 do not have to calculate derivatives yourself. However, this version needs
7733 2N times more function evaluations.
7734 
7735 2-point differentiation formula is  used,  because  more  precise  4-point
7736 formula is unstable when used on non-smooth functions.
7737 
7738 INPUT PARAMETERS:
7739     N       -   problem dimension, N>0:
7740                 * if given, only leading N elements of X are used
7741                 * if not given, automatically determined from size of X
7742     X       -   starting point, array[N]:
7743                 * it is better to set X to a feasible point
7744                 * but X can be infeasible, in which case algorithm will try
7745                   to find feasible point first, using X as initial
7746                   approximation.
7747     DiffStep-   differentiation  step,  DiffStep>0.   Algorithm   performs
7748                 numerical differentiation  with  step  for  I-th  variable
7749                 being equal to DiffStep*S[I] (here S[] is a  scale vector,
7750                 set by minnssetscale() function).
7751                 Do not use  too  small  steps,  because  it  may  lead  to
7752                 catastrophic cancellation during intermediate calculations.
7753 
7754 OUTPUT PARAMETERS:
7755     State   -   structure stores algorithm state
7756 
7757   -- ALGLIB --
7758      Copyright 18.05.2015 by Bochkanov Sergey
7759 *************************************************************************/
7760 void minnscreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minnsstate &state, const xparams _xparams = alglib::xdefault);
7761 void minnscreatef(const real_1d_array &x, const double diffstep, minnsstate &state, const xparams _xparams = alglib::xdefault);
7762 
7763 
7764 /*************************************************************************
7765 This function sets boundary constraints.
7766 
7767 Boundary constraints are inactive by default (after initial creation).
7768 They are preserved after algorithm restart with minnsrestartfrom().
7769 
7770 INPUT PARAMETERS:
7771     State   -   structure stores algorithm state
7772     BndL    -   lower bounds, array[N].
7773                 If some (all) variables are unbounded, you may specify
7774                 very small number or -INF.
7775     BndU    -   upper bounds, array[N].
7776                 If some (all) variables are unbounded, you may specify
7777                 very large number or +INF.
7778 
7779 NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
7780 variable will be "frozen" at X[i]=BndL[i]=BndU[i].
7781 
7782 NOTE 2: AGS solver has following useful properties:
7783 * bound constraints are always satisfied exactly
7784 * function is evaluated only INSIDE area specified by  bound  constraints,
7785   even  when  numerical  differentiation is used (algorithm adjusts  nodes
7786   according to boundary constraints)
7787 
7788   -- ALGLIB --
7789      Copyright 18.05.2015 by Bochkanov Sergey
7790 *************************************************************************/
7791 void minnssetbc(const minnsstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams = alglib::xdefault);
7792 
7793 
7794 /*************************************************************************
7795 This function sets linear constraints.
7796 
7797 Linear constraints are inactive by default (after initial creation).
7798 They are preserved after algorithm restart with minnsrestartfrom().
7799 
7800 INPUT PARAMETERS:
7801     State   -   structure previously allocated with minnscreate() call.
7802     C       -   linear constraints, array[K,N+1].
7803                 Each row of C represents one constraint, either equality
7804                 or inequality (see below):
7805                 * first N elements correspond to coefficients,
7806                 * last element corresponds to the right part.
7807                 All elements of C (including right part) must be finite.
7808     CT      -   type of constraints, array[K]:
7809                 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
7810                 * if CT[i]=0, then I-th constraint is C[i,*]*x  = C[i,n+1]
7811                 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
7812     K       -   number of equality/inequality constraints, K>=0:
7813                 * if given, only leading K elements of C/CT are used
7814                 * if not given, automatically determined from sizes of C/CT
7815 
7816 NOTE: linear (non-bound) constraints are satisfied only approximately:
7817 
7818 * there always exists some minor violation (about current sampling  radius
7819   in magnitude during optimization, about EpsX in the solution) due to use
7820   of penalty method to handle constraints.
7821 * numerical differentiation, if used, may  lead  to  function  evaluations
7822   outside  of the feasible  area,   because   algorithm  does  NOT  change
7823   numerical differentiation formula according to linear constraints.
7824 
7825 If you want constraints to be  satisfied  exactly, try to reformulate your
7826 problem  in  such  manner  that  all constraints will become boundary ones
7827 (this kind of constraints is always satisfied exactly, both in  the  final
7828 solution and in all intermediate points).
7829 
7830   -- ALGLIB --
7831      Copyright 18.05.2015 by Bochkanov Sergey
7832 *************************************************************************/
7833 void minnssetlc(const minnsstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams = alglib::xdefault);
7834 void minnssetlc(const minnsstate &state, const real_2d_array &c, const integer_1d_array &ct, const xparams _xparams = alglib::xdefault);
7835 
7836 
7837 /*************************************************************************
7838 This function sets nonlinear constraints.
7839 
7840 In fact, this function sets NUMBER of nonlinear  constraints.  Constraints
7841 itself (constraint functions) are passed to minnsoptimize() method.   This
7842 method requires user-defined vector function F[]  and  its  Jacobian  J[],
7843 where:
7844 * first component of F[] and first row  of  Jacobian  J[]  correspond   to
7845   function being minimized
7846 * next NLEC components of F[] (and rows  of  J)  correspond  to  nonlinear
7847   equality constraints G_i(x)=0
7848 * next NLIC components of F[] (and rows  of  J)  correspond  to  nonlinear
7849   inequality constraints H_i(x)<=0
7850 
7851 NOTE: you may combine nonlinear constraints with linear/boundary ones.  If
7852       your problem has mixed constraints, you  may explicitly specify some
7853       of them as linear ones. It may help optimizer to  handle  them  more
7854       efficiently.
7855 
7856 INPUT PARAMETERS:
7857     State   -   structure previously allocated with minnscreate() call.
7858     NLEC    -   number of Non-Linear Equality Constraints (NLEC), >=0
7859     NLIC    -   number of Non-Linear Inquality Constraints (NLIC), >=0
7860 
7861 NOTE 1: nonlinear constraints are satisfied only  approximately!   It   is
7862         possible   that  algorithm  will  evaluate  function  outside   of
7863         the feasible area!
7864 
7865 NOTE 2: algorithm scales variables  according  to   scale   specified   by
7866         minnssetscale()  function,  so  it can handle problems with  badly
7867         scaled variables (as long as we KNOW their scales).
7868 
7869         However,  there  is  no  way  to  automatically  scale   nonlinear
7870         constraints Gi(x) and Hi(x). Inappropriate scaling  of  Gi/Hi  may
7871         ruin convergence. Solving problem with  constraint  "1000*G0(x)=0"
7872         is NOT same as solving it with constraint "0.001*G0(x)=0".
7873 
7874         It  means  that  YOU  are  the  one who is responsible for correct
7875         scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you
7876         to scale nonlinear constraints in such way that I-th component  of
7877         dG/dX (or dH/dx) has approximately unit  magnitude  (for  problems
7878         with unit scale)  or  has  magnitude approximately equal to 1/S[i]
7879         (where S is a scale set by minnssetscale() function).
7880 
7881 NOTE 3: nonlinear constraints are always hard to handle,  no  matter  what
7882         algorithm you try to use. Even basic box/linear constraints modify
7883         function  curvature   by  adding   valleys  and  ridges.  However,
7884         nonlinear constraints add valleys which are very  hard  to  follow
7885         due to their "curved" nature.
7886 
7887         It means that optimization with single nonlinear constraint may be
7888         significantly slower than optimization with multiple linear  ones.
7889         It is normal situation, and we recommend you to  carefully  choose
7890         Rho parameter of minnssetalgoags(), because too  large  value  may
7891         slow down convergence.
7892 
7893 
7894   -- ALGLIB --
7895      Copyright 18.05.2015 by Bochkanov Sergey
7896 *************************************************************************/
7897 void minnssetnlc(const minnsstate &state, const ae_int_t nlec, const ae_int_t nlic, const xparams _xparams = alglib::xdefault);
7898 
7899 
7900 /*************************************************************************
7901 This function sets stopping conditions for iterations of optimizer.
7902 
7903 INPUT PARAMETERS:
7904     State   -   structure which stores algorithm state
7905     EpsX    -   >=0
7906                 The AGS solver finishes its work if  on  k+1-th  iteration
7907                 sampling radius decreases below EpsX.
7908     MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
7909                 iterations is unlimited.
7910 
7911 Passing EpsX=0  and  MaxIts=0  (simultaneously)  will  lead  to  automatic
7912 stopping criterion selection. We do not recommend you to rely  on  default
7913 choice in production code.
7914 
7915   -- ALGLIB --
7916      Copyright 18.05.2015 by Bochkanov Sergey
7917 *************************************************************************/
7918 void minnssetcond(const minnsstate &state, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
7919 
7920 
7921 /*************************************************************************
7922 This function sets scaling coefficients for NLC optimizer.
7923 
7924 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
7925 size and gradient are scaled before comparison with tolerances).  Scale of
7926 the I-th variable is a translation invariant measure of:
7927 a) "how large" the variable is
7928 b) how large the step should be to make significant changes in the function
7929 
7930 Scaling is also used by finite difference variant of the optimizer  - step
7931 along I-th axis is equal to DiffStep*S[I].
7932 
7933 INPUT PARAMETERS:
7934     State   -   structure stores algorithm state
7935     S       -   array[N], non-zero scaling coefficients
7936                 S[i] may be negative, sign doesn't matter.
7937 
7938   -- ALGLIB --
7939      Copyright 18.05.2015 by Bochkanov Sergey
7940 *************************************************************************/
7941 void minnssetscale(const minnsstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
7942 
7943 
7944 /*************************************************************************
7945 This function tells MinNS unit to use  AGS  (adaptive  gradient  sampling)
7946 algorithm for nonsmooth constrained  optimization.  This  algorithm  is  a
7947 slight modification of one described in  "An  Adaptive  Gradient  Sampling
7948 Algorithm for Nonsmooth Optimization" by Frank E. Curtisy and Xiaocun Quez.
7949 
7950 This optimizer has following benefits and drawbacks:
7951 + robustness; it can be used with nonsmooth and nonconvex functions.
7952 + relatively easy tuning; most of the metaparameters are easy to select.
7953 - it has convergence of steepest descent, slower than CG/LBFGS.
7954 - each iteration involves evaluation of ~2N gradient values  and  solution
7955   of 2Nx2N quadratic programming problem, which  limits  applicability  of
7956   algorithm by small-scale problems (up to 50-100).
7957 
7958 IMPORTANT: this  algorithm  has  convergence  guarantees,   i.e.  it  will
7959            steadily move towards some stationary point of the function.
7960 
7961            However, "stationary point" does not  always  mean  "solution".
7962            Nonsmooth problems often have "flat spots",  i.e.  areas  where
7963            function do not change at all. Such "flat spots" are stationary
7964            points by definition, and algorithm may be caught here.
7965 
7966            Nonsmooth CONVEX tasks are not prone to  this  problem. Say, if
7967            your function has form f()=MAX(f0,f1,...), and f_i are  convex,
7968            then f() is convex too and you have guaranteed  convergence  to
7969            solution.
7970 
7971 INPUT PARAMETERS:
7972     State   -   structure which stores algorithm state
7973     Radius  -   initial sampling radius, >=0.
7974 
7975                 Internally multiplied  by  vector of  per-variable  scales
7976                 specified by minnssetscale()).
7977 
7978                 You should select relatively large sampling radius, roughly
7979                 proportional to scaled length of the first  steps  of  the
7980                 algorithm. Something close to 0.1 in magnitude  should  be
7981                 good for most problems.
7982 
7983                 AGS solver can automatically decrease radius, so too large
7984                 radius is  not a problem (assuming that you  won't  choose
7985                 so large radius that algorithm  will  sample  function  in
7986                 too far away points, where gradient value is irrelevant).
7987 
7988                 Too small radius won't cause algorithm to fail, but it may
7989                 slow down algorithm (it may  have  to  perform  too  short
7990                 steps).
7991     Penalty -   penalty coefficient for nonlinear constraints:
7992                 * for problem with nonlinear constraints  should  be  some
7993                   problem-specific  positive   value,  large  enough  that
7994                   penalty term changes shape of the function.
7995                   Starting  from  some  problem-specific   value   penalty
7996                   coefficient becomes  large  enough  to  exactly  enforce
7997                   nonlinear constraints;  larger  values  do  not  improve
7998                   precision.
7999                   Increasing it too much may slow down convergence, so you
8000                   should choose it carefully.
8001                 * can be zero for problems WITHOUT  nonlinear  constraints
8002                   (i.e. for unconstrained ones or ones with  just  box  or
8003                   linear constraints)
8004                 * if you specify zero value for problem with at least  one
8005                   nonlinear  constraint,  algorithm  will  terminate  with
8006                   error code -1.
8007 
8008 ALGORITHM OUTLINE
8009 
8010 The very basic outline of unconstrained AGS algorithm is given below:
8011 
8012 0. If sampling radius is below EpsX  or  we  performed  more  then  MaxIts
8013    iterations - STOP.
8014 1. sample O(N) gradient values at random locations  around  current point;
8015    informally speaking, this sample is an implicit piecewise  linear model
8016    of the function, although algorithm formulation does  not  mention that
8017    explicitly
8018 2. solve quadratic programming problem in order to find descent direction
8019 3. if QP solver tells us that we  are  near  solution,  decrease  sampling
8020    radius and move to (0)
8021 4. perform backtracking line search
8022 5. after moving to new point, goto (0)
8023 
8024 As for the constraints:
8025 * box constraints are handled exactly  by  modification  of  the  function
8026   being minimized
8027 * linear/nonlinear constraints are handled by adding L1  penalty.  Because
8028   our solver can handle nonsmoothness, we can  use  L1  penalty  function,
8029   which is an exact one  (i.e.  exact  solution  is  returned  under  such
8030   penalty).
8031 * penalty coefficient for  linear  constraints  is  chosen  automatically;
8032   however, penalty coefficient for nonlinear constraints must be specified
8033   by user.
8034 
8035   -- ALGLIB --
8036      Copyright 18.05.2015 by Bochkanov Sergey
8037 *************************************************************************/
8038 void minnssetalgoags(const minnsstate &state, const double radius, const double penalty, const xparams _xparams = alglib::xdefault);
8039 
8040 
8041 /*************************************************************************
8042 This function turns on/off reporting.
8043 
8044 INPUT PARAMETERS:
8045     State   -   structure which stores algorithm state
8046     NeedXRep-   whether iteration reports are needed or not
8047 
8048 If NeedXRep is True, algorithm will call rep() callback function if  it is
8049 provided to minnsoptimize().
8050 
8051   -- ALGLIB --
8052      Copyright 28.11.2010 by Bochkanov Sergey
8053 *************************************************************************/
8054 void minnssetxrep(const minnsstate &state, const bool needxrep, const xparams _xparams = alglib::xdefault);
8055 
8056 
8057 /*************************************************************************
8058 This subroutine submits request for termination of running  optimizer.  It
8059 should be called from user-supplied callback when user decides that it  is
8060 time to "smoothly" terminate optimization process.  As  result,  optimizer
8061 stops at point which was "current accepted" when termination  request  was
8062 submitted and returns error code 8 (successful termination).
8063 
8064 INPUT PARAMETERS:
8065     State   -   optimizer structure
8066 
8067 NOTE: after  request  for  termination  optimizer  may   perform   several
8068       additional calls to user-supplied callbacks. It does  NOT  guarantee
8069       to stop immediately - it just guarantees that these additional calls
8070       will be discarded later.
8071 
8072 NOTE: calling this function on optimizer which is NOT running will have no
8073       effect.
8074 
8075 NOTE: multiple calls to this function are possible. First call is counted,
8076       subsequent calls are silently ignored.
8077 
8078   -- ALGLIB --
8079      Copyright 18.05.2015 by Bochkanov Sergey
8080 *************************************************************************/
8081 void minnsrequesttermination(const minnsstate &state, const xparams _xparams = alglib::xdefault);
8082 
8083 
8084 /*************************************************************************
8085 This function provides reverse communication interface
8086 Reverse communication interface is not documented or recommended to use.
8087 See below for functions which provide better documented API
8088 *************************************************************************/
8089 bool minnsiteration(const minnsstate &state, const xparams _xparams = alglib::xdefault);
8090 
8091 
8092 /*************************************************************************
8093 This family of functions is used to launcn iterations of nonlinear optimizer
8094 
8095 These functions accept following parameters:
8096     state   -   algorithm state
8097     fvec    -   callback which calculates function vector fi[]
8098                 at given point x
8099     jac     -   callback which calculates function vector fi[]
8100                 and Jacobian jac at given point x
8101     rep     -   optional callback which is called after each iteration
8102                 can be NULL
8103     ptr     -   optional pointer which is passed to func/grad/hess/jac/rep
8104                 can be NULL
8105 
8106 
8107 NOTES:
8108 
8109 1. This function has two different implementations: one which  uses  exact
8110    (analytical) user-supplied Jacobian, and one which uses  only  function
8111    vector and numerically  differentiates  function  in  order  to  obtain
8112    gradient.
8113 
8114    Depending  on  the  specific  function  used to create optimizer object
8115    you should choose appropriate variant of  minnsoptimize() -  one  which
8116    accepts function AND Jacobian or one which accepts ONLY function.
8117 
8118    Be careful to choose variant of minnsoptimize()  which  corresponds  to
8119    your optimization scheme! Table below lists different  combinations  of
8120    callback (function/gradient) passed to minnsoptimize()    and  specific
8121    function used to create optimizer.
8122 
8123 
8124                      |         USER PASSED TO minnsoptimize()
8125    CREATED WITH      |  function only   |  function and gradient
8126    ------------------------------------------------------------
8127    minnscreatef()    |     works               FAILS
8128    minnscreate()     |     FAILS               works
8129 
8130    Here "FAILS" denotes inappropriate combinations  of  optimizer creation
8131    function  and  minnsoptimize()  version.   Attemps   to    use     such
8132    combination will lead to exception. Either  you  did  not pass gradient
8133    when it WAS needed or you passed gradient when it was NOT needed.
8134 
8135   -- ALGLIB --
8136      Copyright 18.05.2015 by Bochkanov Sergey
8137 
8138 *************************************************************************/
8139 void minnsoptimize(minnsstate &state,
8140     void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
8141     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
8142     void *ptr = NULL,
8143     const xparams _xparams = alglib::xdefault);
8144 void minnsoptimize(minnsstate &state,
8145     void  (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
8146     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
8147     void *ptr = NULL,
8148     const xparams _xparams = alglib::xdefault);
8149 
8150 
8151 /*************************************************************************
8152 MinNS results
8153 
8154 INPUT PARAMETERS:
8155     State   -   algorithm state
8156 
8157 OUTPUT PARAMETERS:
8158     X       -   array[0..N-1], solution
8159     Rep     -   optimization report. You should check Rep.TerminationType
8160                 in  order  to  distinguish  successful  termination  from
8161                 unsuccessful one:
8162                 * -8   internal integrity control  detected  infinite  or
8163                        NAN   values   in   function/gradient.    Abnormal
8164                        termination signalled.
8165                 * -3   box constraints are inconsistent
8166                 * -1   inconsistent parameters were passed:
8167                        * penalty parameter for minnssetalgoags() is zero,
8168                          but we have nonlinear constraints set by minnssetnlc()
8169                 *  2   sampling radius decreased below epsx
8170                 *  7    stopping conditions are too stringent,
8171                         further improvement is impossible,
8172                         X contains best point found so far.
8173                 *  8    User requested termination via minnsrequesttermination()
8174 
8175   -- ALGLIB --
8176      Copyright 18.05.2015 by Bochkanov Sergey
8177 *************************************************************************/
8178 void minnsresults(const minnsstate &state, real_1d_array &x, minnsreport &rep, const xparams _xparams = alglib::xdefault);
8179 
8180 
8181 /*************************************************************************
8182 
8183 Buffered implementation of minnsresults() which uses pre-allocated  buffer
8184 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
8185 intended to be used in the inner cycles of performance critical algorithms
8186 where array reallocation penalty is too large to be ignored.
8187 
8188   -- ALGLIB --
8189      Copyright 18.05.2015 by Bochkanov Sergey
8190 *************************************************************************/
8191 void minnsresultsbuf(const minnsstate &state, real_1d_array &x, minnsreport &rep, const xparams _xparams = alglib::xdefault);
8192 
8193 
8194 /*************************************************************************
8195 This subroutine restarts algorithm from new point.
8196 All optimization parameters (including constraints) are left unchanged.
8197 
8198 This  function  allows  to  solve multiple  optimization  problems  (which
8199 must have  same number of dimensions) without object reallocation penalty.
8200 
8201 INPUT PARAMETERS:
8202     State   -   structure previously allocated with minnscreate() call.
8203     X       -   new starting point.
8204 
8205   -- ALGLIB --
8206      Copyright 18.05.2015 by Bochkanov Sergey
8207 *************************************************************************/
8208 void minnsrestartfrom(const minnsstate &state, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
8209 #endif
8210 
8211 #if defined(AE_COMPILE_MINCOMP) || !defined(AE_PARTIAL_BUILD)
8212 /*************************************************************************
8213 Obsolete function, use MinLBFGSSetPrecDefault() instead.
8214 
8215   -- ALGLIB --
8216      Copyright 13.10.2010 by Bochkanov Sergey
8217 *************************************************************************/
8218 void minlbfgssetdefaultpreconditioner(const minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
8219 
8220 
8221 /*************************************************************************
8222 Obsolete function, use MinLBFGSSetCholeskyPreconditioner() instead.
8223 
8224   -- ALGLIB --
8225      Copyright 13.10.2010 by Bochkanov Sergey
8226 *************************************************************************/
8227 void minlbfgssetcholeskypreconditioner(const minlbfgsstate &state, const real_2d_array &p, const bool isupper, const xparams _xparams = alglib::xdefault);
8228 
8229 
8230 /*************************************************************************
8231 This is obsolete function which was used by previous version of the  BLEIC
8232 optimizer. It does nothing in the current version of BLEIC.
8233 
8234   -- ALGLIB --
8235      Copyright 28.11.2010 by Bochkanov Sergey
8236 *************************************************************************/
8237 void minbleicsetbarrierwidth(const minbleicstate &state, const double mu, const xparams _xparams = alglib::xdefault);
8238 
8239 
8240 /*************************************************************************
8241 This is obsolete function which was used by previous version of the  BLEIC
8242 optimizer. It does nothing in the current version of BLEIC.
8243 
8244   -- ALGLIB --
8245      Copyright 28.11.2010 by Bochkanov Sergey
8246 *************************************************************************/
8247 void minbleicsetbarrierdecay(const minbleicstate &state, const double mudecay, const xparams _xparams = alglib::xdefault);
8248 
8249 
8250 /*************************************************************************
8251 Obsolete optimization algorithm.
8252 Was replaced by MinBLEIC subpackage.
8253 
8254   -- ALGLIB --
8255      Copyright 25.03.2010 by Bochkanov Sergey
8256 *************************************************************************/
8257 void minasacreate(const ae_int_t n, const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu, minasastate &state, const xparams _xparams = alglib::xdefault);
8258 void minasacreate(const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu, minasastate &state, const xparams _xparams = alglib::xdefault);
8259 
8260 
8261 /*************************************************************************
8262 Obsolete optimization algorithm.
8263 Was replaced by MinBLEIC subpackage.
8264 
8265   -- ALGLIB --
8266      Copyright 02.04.2010 by Bochkanov Sergey
8267 *************************************************************************/
8268 void minasasetcond(const minasastate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
8269 
8270 
8271 /*************************************************************************
8272 Obsolete optimization algorithm.
8273 Was replaced by MinBLEIC subpackage.
8274 
8275   -- ALGLIB --
8276      Copyright 02.04.2010 by Bochkanov Sergey
8277 *************************************************************************/
8278 void minasasetxrep(const minasastate &state, const bool needxrep, const xparams _xparams = alglib::xdefault);
8279 
8280 
8281 /*************************************************************************
8282 Obsolete optimization algorithm.
8283 Was replaced by MinBLEIC subpackage.
8284 
8285   -- ALGLIB --
8286      Copyright 02.04.2010 by Bochkanov Sergey
8287 *************************************************************************/
8288 void minasasetalgorithm(const minasastate &state, const ae_int_t algotype, const xparams _xparams = alglib::xdefault);
8289 
8290 
8291 /*************************************************************************
8292 Obsolete optimization algorithm.
8293 Was replaced by MinBLEIC subpackage.
8294 
8295   -- ALGLIB --
8296      Copyright 02.04.2010 by Bochkanov Sergey
8297 *************************************************************************/
8298 void minasasetstpmax(const minasastate &state, const double stpmax, const xparams _xparams = alglib::xdefault);
8299 
8300 
8301 /*************************************************************************
8302 This function provides reverse communication interface
8303 Reverse communication interface is not documented or recommended to use.
8304 See below for functions which provide better documented API
8305 *************************************************************************/
8306 bool minasaiteration(const minasastate &state, const xparams _xparams = alglib::xdefault);
8307 
8308 
8309 /*************************************************************************
8310 This family of functions is used to launcn iterations of nonlinear optimizer
8311 
8312 These functions accept following parameters:
8313     state   -   algorithm state
8314     grad    -   callback which calculates function (or merit function)
8315                 value func and gradient grad at given point x
8316     rep     -   optional callback which is called after each iteration
8317                 can be NULL
8318     ptr     -   optional pointer which is passed to func/grad/hess/jac/rep
8319                 can be NULL
8320 
8321 
8322   -- ALGLIB --
8323      Copyright 20.03.2009 by Bochkanov Sergey
8324 
8325 *************************************************************************/
8326 void minasaoptimize(minasastate &state,
8327     void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
8328     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
8329     void *ptr = NULL,
8330     const xparams _xparams = alglib::xdefault);
8331 
8332 
8333 /*************************************************************************
8334 Obsolete optimization algorithm.
8335 Was replaced by MinBLEIC subpackage.
8336 
8337   -- ALGLIB --
8338      Copyright 20.03.2009 by Bochkanov Sergey
8339 *************************************************************************/
8340 void minasaresults(const minasastate &state, real_1d_array &x, minasareport &rep, const xparams _xparams = alglib::xdefault);
8341 
8342 
8343 /*************************************************************************
8344 Obsolete optimization algorithm.
8345 Was replaced by MinBLEIC subpackage.
8346 
8347   -- ALGLIB --
8348      Copyright 20.03.2009 by Bochkanov Sergey
8349 *************************************************************************/
8350 void minasaresultsbuf(const minasastate &state, real_1d_array &x, minasareport &rep, const xparams _xparams = alglib::xdefault);
8351 
8352 
8353 /*************************************************************************
8354 Obsolete optimization algorithm.
8355 Was replaced by MinBLEIC subpackage.
8356 
8357   -- ALGLIB --
8358      Copyright 30.07.2010 by Bochkanov Sergey
8359 *************************************************************************/
8360 void minasarestartfrom(const minasastate &state, const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams = alglib::xdefault);
8361 #endif
8362 
8363 #if defined(AE_COMPILE_MINCG) || !defined(AE_PARTIAL_BUILD)
8364 /*************************************************************************
8365         NONLINEAR CONJUGATE GRADIENT METHOD
8366 
8367 DESCRIPTION:
8368 The subroutine minimizes function F(x) of N arguments by using one of  the
8369 nonlinear conjugate gradient methods.
8370 
8371 These CG methods are globally convergent (even on non-convex functions) as
8372 long as grad(f) is Lipschitz continuous in  a  some  neighborhood  of  the
8373 L = { x : f(x)<=f(x0) }.
8374 
8375 
8376 REQUIREMENTS:
8377 Algorithm will request following information during its operation:
8378 * function value F and its gradient G (simultaneously) at given point X
8379 
8380 
8381 USAGE:
8382 1. User initializes algorithm state with MinCGCreate() call
8383 2. User tunes solver parameters with MinCGSetCond(), MinCGSetStpMax() and
8384    other functions
8385 3. User calls MinCGOptimize() function which takes algorithm  state   and
8386    pointer (delegate, etc.) to callback function which calculates F/G.
8387 4. User calls MinCGResults() to get solution
8388 5. Optionally, user may call MinCGRestartFrom() to solve another  problem
8389    with same N but another starting point and/or another function.
8390    MinCGRestartFrom() allows to reuse already initialized structure.
8391 
8392 
8393 INPUT PARAMETERS:
8394     N       -   problem dimension, N>0:
8395                 * if given, only leading N elements of X are used
8396                 * if not given, automatically determined from size of X
8397     X       -   starting point, array[0..N-1].
8398 
8399 OUTPUT PARAMETERS:
8400     State   -   structure which stores algorithm state
8401 
8402   -- ALGLIB --
8403      Copyright 25.03.2010 by Bochkanov Sergey
8404 *************************************************************************/
8405 void mincgcreate(const ae_int_t n, const real_1d_array &x, mincgstate &state, const xparams _xparams = alglib::xdefault);
8406 void mincgcreate(const real_1d_array &x, mincgstate &state, const xparams _xparams = alglib::xdefault);
8407 
8408 
8409 /*************************************************************************
8410 The subroutine is finite difference variant of MinCGCreate(). It uses
8411 finite differences in order to differentiate target function.
8412 
8413 Description below contains information which is specific to this function
8414 only. We recommend to read comments on MinCGCreate() in order to get more
8415 information about creation of CG optimizer.
8416 
8417 INPUT PARAMETERS:
8418     N       -   problem dimension, N>0:
8419                 * if given, only leading N elements of X are used
8420                 * if not given, automatically determined from size of X
8421     X       -   starting point, array[0..N-1].
8422     DiffStep-   differentiation step, >0
8423 
8424 OUTPUT PARAMETERS:
8425     State   -   structure which stores algorithm state
8426 
8427 NOTES:
8428 1. algorithm uses 4-point central formula for differentiation.
8429 2. differentiation step along I-th axis is equal to DiffStep*S[I] where
8430    S[] is scaling vector which can be set by MinCGSetScale() call.
8431 3. we recommend you to use moderate values of  differentiation  step.  Too
8432    large step will result in too large truncation  errors, while too small
8433    step will result in too large numerical  errors.  1.0E-6  can  be  good
8434    value to start with.
8435 4. Numerical  differentiation  is   very   inefficient  -   one   gradient
8436    calculation needs 4*N function evaluations. This function will work for
8437    any N - either small (1...10), moderate (10...100) or  large  (100...).
8438    However, performance penalty will be too severe for any N's except  for
8439    small ones.
8440    We should also say that code which relies on numerical  differentiation
8441    is  less  robust  and  precise.  L-BFGS  needs  exact  gradient values.
8442    Imprecise  gradient may slow down  convergence,  especially  on  highly
8443    nonlinear problems.
8444    Thus  we  recommend to use this function for fast prototyping on small-
8445    dimensional problems only, and to implement analytical gradient as soon
8446    as possible.
8447 
8448   -- ALGLIB --
8449      Copyright 16.05.2011 by Bochkanov Sergey
8450 *************************************************************************/
8451 void mincgcreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, mincgstate &state, const xparams _xparams = alglib::xdefault);
8452 void mincgcreatef(const real_1d_array &x, const double diffstep, mincgstate &state, const xparams _xparams = alglib::xdefault);
8453 
8454 
8455 /*************************************************************************
8456 This function sets stopping conditions for CG optimization algorithm.
8457 
8458 INPUT PARAMETERS:
8459     State   -   structure which stores algorithm state
8460     EpsG    -   >=0
8461                 The  subroutine  finishes  its  work   if   the  condition
8462                 |v|<EpsG is satisfied, where:
8463                 * |.| means Euclidian norm
8464                 * v - scaled gradient vector, v[i]=g[i]*s[i]
8465                 * g - gradient
8466                 * s - scaling coefficients set by MinCGSetScale()
8467     EpsF    -   >=0
8468                 The  subroutine  finishes  its work if on k+1-th iteration
8469                 the  condition  |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
8470                 is satisfied.
8471     EpsX    -   >=0
8472                 The subroutine finishes its work if  on  k+1-th  iteration
8473                 the condition |v|<=EpsX is fulfilled, where:
8474                 * |.| means Euclidian norm
8475                 * v - scaled step vector, v[i]=dx[i]/s[i]
8476                 * dx - ste pvector, dx=X(k+1)-X(k)
8477                 * s - scaling coefficients set by MinCGSetScale()
8478     MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
8479                 iterations is unlimited.
8480 
8481 Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to
8482 automatic stopping criterion selection (small EpsX).
8483 
8484   -- ALGLIB --
8485      Copyright 02.04.2010 by Bochkanov Sergey
8486 *************************************************************************/
8487 void mincgsetcond(const mincgstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
8488 
8489 
8490 /*************************************************************************
8491 This function sets scaling coefficients for CG optimizer.
8492 
8493 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
8494 size and gradient are scaled before comparison with tolerances).  Scale of
8495 the I-th variable is a translation invariant measure of:
8496 a) "how large" the variable is
8497 b) how large the step should be to make significant changes in the function
8498 
8499 Scaling is also used by finite difference variant of CG optimizer  -  step
8500 along I-th axis is equal to DiffStep*S[I].
8501 
8502 In   most   optimizers  (and  in  the  CG  too)  scaling is NOT a form  of
8503 preconditioning. It just  affects  stopping  conditions.  You  should  set
8504 preconditioner by separate call to one of the MinCGSetPrec...() functions.
8505 
8506 There  is  special  preconditioning  mode, however,  which  uses   scaling
8507 coefficients to form diagonal preconditioning matrix. You  can  turn  this
8508 mode on, if you want.   But  you should understand that scaling is not the
8509 same thing as preconditioning - these are two different, although  related
8510 forms of tuning solver.
8511 
8512 INPUT PARAMETERS:
8513     State   -   structure stores algorithm state
8514     S       -   array[N], non-zero scaling coefficients
8515                 S[i] may be negative, sign doesn't matter.
8516 
8517   -- ALGLIB --
8518      Copyright 14.01.2011 by Bochkanov Sergey
8519 *************************************************************************/
8520 void mincgsetscale(const mincgstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
8521 
8522 
8523 /*************************************************************************
8524 This function turns on/off reporting.
8525 
8526 INPUT PARAMETERS:
8527     State   -   structure which stores algorithm state
8528     NeedXRep-   whether iteration reports are needed or not
8529 
8530 If NeedXRep is True, algorithm will call rep() callback function if  it is
8531 provided to MinCGOptimize().
8532 
8533   -- ALGLIB --
8534      Copyright 02.04.2010 by Bochkanov Sergey
8535 *************************************************************************/
8536 void mincgsetxrep(const mincgstate &state, const bool needxrep, const xparams _xparams = alglib::xdefault);
8537 
8538 
8539 /*************************************************************************
8540 This function sets CG algorithm.
8541 
8542 INPUT PARAMETERS:
8543     State   -   structure which stores algorithm state
8544     CGType  -   algorithm type:
8545                 * -1    automatic selection of the best algorithm
8546                 * 0     DY (Dai and Yuan) algorithm
8547                 * 1     Hybrid DY-HS algorithm
8548 
8549   -- ALGLIB --
8550      Copyright 02.04.2010 by Bochkanov Sergey
8551 *************************************************************************/
8552 void mincgsetcgtype(const mincgstate &state, const ae_int_t cgtype, const xparams _xparams = alglib::xdefault);
8553 
8554 
8555 /*************************************************************************
8556 This function sets maximum step length
8557 
8558 INPUT PARAMETERS:
8559     State   -   structure which stores algorithm state
8560     StpMax  -   maximum step length, >=0. Set StpMax to 0.0,  if you don't
8561                 want to limit step length.
8562 
8563 Use this subroutine when you optimize target function which contains exp()
8564 or  other  fast  growing  functions,  and optimization algorithm makes too
8565 large  steps  which  leads  to overflow. This function allows us to reject
8566 steps  that  are  too  large  (and  therefore  expose  us  to the possible
8567 overflow) without actually calculating function value at the x+stp*d.
8568 
8569   -- ALGLIB --
8570      Copyright 02.04.2010 by Bochkanov Sergey
8571 *************************************************************************/
8572 void mincgsetstpmax(const mincgstate &state, const double stpmax, const xparams _xparams = alglib::xdefault);
8573 
8574 
8575 /*************************************************************************
8576 This function allows to suggest initial step length to the CG algorithm.
8577 
8578 Suggested  step  length  is used as starting point for the line search. It
8579 can be useful when you have  badly  scaled  problem,  i.e.  when  ||grad||
8580 (which is used as initial estimate for the first step) is many  orders  of
8581 magnitude different from the desired step.
8582 
8583 Line search  may  fail  on  such problems without good estimate of initial
8584 step length. Imagine, for example, problem with ||grad||=10^50 and desired
8585 step equal to 0.1 Line  search function will use 10^50  as  initial  step,
8586 then  it  will  decrease step length by 2 (up to 20 attempts) and will get
8587 10^44, which is still too large.
8588 
8589 This function allows us to tell than line search should  be  started  from
8590 some moderate step length, like 1.0, so algorithm will be able  to  detect
8591 desired step length in a several searches.
8592 
8593 Default behavior (when no step is suggested) is to use preconditioner,  if
8594 it is available, to generate initial estimate of step length.
8595 
8596 This function influences only first iteration of algorithm. It  should  be
8597 called between MinCGCreate/MinCGRestartFrom() call and MinCGOptimize call.
8598 Suggested step is ignored if you have preconditioner.
8599 
8600 INPUT PARAMETERS:
8601     State   -   structure used to store algorithm state.
8602     Stp     -   initial estimate of the step length.
8603                 Can be zero (no estimate).
8604 
8605   -- ALGLIB --
8606      Copyright 30.07.2010 by Bochkanov Sergey
8607 *************************************************************************/
8608 void mincgsuggeststep(const mincgstate &state, const double stp, const xparams _xparams = alglib::xdefault);
8609 
8610 
8611 /*************************************************************************
8612 Modification of the preconditioner: preconditioning is turned off.
8613 
8614 INPUT PARAMETERS:
8615     State   -   structure which stores algorithm state
8616 
8617 NOTE:  you  can  change  preconditioner  "on  the  fly",  during algorithm
8618 iterations.
8619 
8620   -- ALGLIB --
8621      Copyright 13.10.2010 by Bochkanov Sergey
8622 *************************************************************************/
8623 void mincgsetprecdefault(const mincgstate &state, const xparams _xparams = alglib::xdefault);
8624 
8625 
8626 /*************************************************************************
8627 Modification  of  the  preconditioner:  diagonal of approximate Hessian is
8628 used.
8629 
8630 INPUT PARAMETERS:
8631     State   -   structure which stores algorithm state
8632     D       -   diagonal of the approximate Hessian, array[0..N-1],
8633                 (if larger, only leading N elements are used).
8634 
8635 NOTE:  you  can  change  preconditioner  "on  the  fly",  during algorithm
8636 iterations.
8637 
8638 NOTE 2: D[i] should be positive. Exception will be thrown otherwise.
8639 
8640 NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
8641 
8642   -- ALGLIB --
8643      Copyright 13.10.2010 by Bochkanov Sergey
8644 *************************************************************************/
8645 void mincgsetprecdiag(const mincgstate &state, const real_1d_array &d, const xparams _xparams = alglib::xdefault);
8646 
8647 
8648 /*************************************************************************
8649 Modification of the preconditioner: scale-based diagonal preconditioning.
8650 
8651 This preconditioning mode can be useful when you  don't  have  approximate
8652 diagonal of Hessian, but you know that your  variables  are  badly  scaled
8653 (for  example,  one  variable is in [1,10], and another in [1000,100000]),
8654 and most part of the ill-conditioning comes from different scales of vars.
8655 
8656 In this case simple  scale-based  preconditioner,  with H[i] = 1/(s[i]^2),
8657 can greatly improve convergence.
8658 
8659 IMPRTANT: you should set scale of your variables with MinCGSetScale() call
8660 (before or after MinCGSetPrecScale() call). Without knowledge of the scale
8661 of your variables scale-based preconditioner will be just unit matrix.
8662 
8663 INPUT PARAMETERS:
8664     State   -   structure which stores algorithm state
8665 
8666 NOTE:  you  can  change  preconditioner  "on  the  fly",  during algorithm
8667 iterations.
8668 
8669   -- ALGLIB --
8670      Copyright 13.10.2010 by Bochkanov Sergey
8671 *************************************************************************/
8672 void mincgsetprecscale(const mincgstate &state, const xparams _xparams = alglib::xdefault);
8673 
8674 
8675 /*************************************************************************
8676 This function provides reverse communication interface
8677 Reverse communication interface is not documented or recommended to use.
8678 See below for functions which provide better documented API
8679 *************************************************************************/
8680 bool mincgiteration(const mincgstate &state, const xparams _xparams = alglib::xdefault);
8681 
8682 
8683 /*************************************************************************
8684 This family of functions is used to launcn iterations of nonlinear optimizer
8685 
8686 These functions accept following parameters:
8687     state   -   algorithm state
8688     func    -   callback which calculates function (or merit function)
8689                 value func at given point x
8690     grad    -   callback which calculates function (or merit function)
8691                 value func and gradient grad at given point x
8692     rep     -   optional callback which is called after each iteration
8693                 can be NULL
8694     ptr     -   optional pointer which is passed to func/grad/hess/jac/rep
8695                 can be NULL
8696 
8697 NOTES:
8698 
8699 1. This function has two different implementations: one which  uses  exact
8700    (analytical) user-supplied  gradient, and one which uses function value
8701    only  and  numerically  differentiates  function  in  order  to  obtain
8702    gradient.
8703 
8704    Depending  on  the  specific  function  used to create optimizer object
8705    (either MinCGCreate()  for analytical gradient  or  MinCGCreateF()  for
8706    numerical differentiation) you should  choose  appropriate  variant  of
8707    MinCGOptimize() - one which accepts function AND gradient or one  which
8708    accepts function ONLY.
8709 
8710    Be careful to choose variant of MinCGOptimize()  which  corresponds  to
8711    your optimization scheme! Table below lists different  combinations  of
8712    callback (function/gradient) passed  to  MinCGOptimize()  and  specific
8713    function used to create optimizer.
8714 
8715 
8716                   |         USER PASSED TO MinCGOptimize()
8717    CREATED WITH   |  function only   |  function and gradient
8718    ------------------------------------------------------------
8719    MinCGCreateF() |     work                FAIL
8720    MinCGCreate()  |     FAIL                work
8721 
8722    Here "FAIL" denotes inappropriate combinations  of  optimizer  creation
8723    function and MinCGOptimize() version. Attemps to use  such  combination
8724    (for  example,  to create optimizer with  MinCGCreateF()  and  to  pass
8725    gradient information to MinCGOptimize()) will lead to  exception  being
8726    thrown. Either  you  did  not  pass  gradient when it WAS needed or you
8727    passed gradient when it was NOT needed.
8728 
8729   -- ALGLIB --
8730      Copyright 20.04.2009 by Bochkanov Sergey
8731 
8732 *************************************************************************/
8733 void mincgoptimize(mincgstate &state,
8734     void (*func)(const real_1d_array &x, double &func, void *ptr),
8735     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
8736     void *ptr = NULL,
8737     const xparams _xparams = alglib::xdefault);
8738 void mincgoptimize(mincgstate &state,
8739     void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
8740     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
8741     void *ptr = NULL,
8742     const xparams _xparams = alglib::xdefault);
8743 
8744 
8745 /*************************************************************************
8746 This  function  activates/deactivates verification  of  the  user-supplied
8747 analytic gradient.
8748 
8749 Upon  activation  of  this  option  OptGuard  integrity  checker  performs
8750 numerical differentiation of your target function  at  the  initial  point
8751 (note: future versions may also perform check  at  the  final  point)  and
8752 compares numerical gradient with analytic one provided by you.
8753 
8754 If difference is too large, an error flag is set and optimization  session
8755 continues. After optimization session is over, you can retrieve the report
8756 which  stores  both  gradients  and  specific  components  highlighted  as
8757 suspicious by the OptGuard.
8758 
8759 The primary OptGuard report can be retrieved with mincgoptguardresults().
8760 
8761 IMPORTANT: gradient check is a high-overhead option which  will  cost  you
8762            about 3*N additional function evaluations. In many cases it may
8763            cost as much as the rest of the optimization session.
8764 
8765            YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO
8766            CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY.
8767 
8768 NOTE: unlike previous incarnation of the gradient checking code,  OptGuard
8769       does NOT interrupt optimization even if it discovers bad gradient.
8770 
8771 INPUT PARAMETERS:
8772     State       -   structure used to store algorithm state
8773     TestStep    -   verification step used for numerical differentiation:
8774                     * TestStep=0 turns verification off
8775                     * TestStep>0 activates verification
8776                     You should carefully choose TestStep. Value  which  is
8777                     too large (so large that  function  behavior  is  non-
8778                     cubic at this scale) will lead  to  false  alarms. Too
8779                     short step will result in rounding  errors  dominating
8780                     numerical derivative.
8781 
8782                     You may use different step for different parameters by
8783                     means of setting scale with mincgsetscale().
8784 
8785 === EXPLANATION ==========================================================
8786 
8787 In order to verify gradient algorithm performs following steps:
8788   * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
8789     where X[i] is i-th component of the initial point and S[i] is a  scale
8790     of i-th parameter
8791   * F(X) is evaluated at these trial points
8792   * we perform one more evaluation in the middle point of the interval
8793   * we  build  cubic  model using function values and derivatives at trial
8794     points and we compare its prediction with actual value in  the  middle
8795     point
8796 
8797   -- ALGLIB --
8798      Copyright 15.06.2014 by Bochkanov Sergey
8799 *************************************************************************/
8800 void mincgoptguardgradient(const mincgstate &state, const double teststep, const xparams _xparams = alglib::xdefault);
8801 
8802 
8803 /*************************************************************************
8804 This  function  activates/deactivates nonsmoothness monitoring  option  of
8805 the  OptGuard  integrity  checker. Smoothness  monitor  silently  observes
8806 solution process and tries to detect ill-posed problems, i.e. ones with:
8807 a) discontinuous target function (non-C0)
8808 b) nonsmooth     target function (non-C1)
8809 
8810 Smoothness monitoring does NOT interrupt optimization  even if it suspects
8811 that your problem is nonsmooth. It just sets corresponding  flags  in  the
8812 OptGuard report which can be retrieved after optimization is over.
8813 
8814 Smoothness monitoring is a moderate overhead option which often adds  less
8815 than 1% to the optimizer running time. Thus, you can use it even for large
8816 scale problems.
8817 
8818 NOTE: OptGuard does  NOT  guarantee  that  it  will  always  detect  C0/C1
8819       continuity violations.
8820 
8821       First, minor errors are hard to  catch - say, a 0.0001 difference in
8822       the model values at two sides of the gap may be due to discontinuity
8823       of the model - or simply because the model has changed.
8824 
8825       Second, C1-violations  are  especially  difficult  to  detect  in  a
8826       noninvasive way. The optimizer usually  performs  very  short  steps
8827       near the nonsmoothness, and differentiation  usually   introduces  a
8828       lot of numerical noise.  It  is  hard  to  tell  whether  some  tiny
8829       discontinuity in the slope is due to real nonsmoothness or just  due
8830       to numerical noise alone.
8831 
8832       Our top priority was to avoid false positives, so in some rare cases
8833       minor errors may went unnoticed (however, in most cases they can  be
8834       spotted with restart from different initial point).
8835 
8836 INPUT PARAMETERS:
8837     state   -   algorithm state
8838     level   -   monitoring level:
8839                 * 0 - monitoring is disabled
8840                 * 1 - noninvasive low-overhead monitoring; function values
8841                       and/or gradients are recorded, but OptGuard does not
8842                       try to perform additional evaluations  in  order  to
8843                       get more information about suspicious locations.
8844 
8845 === EXPLANATION ==========================================================
8846 
8847 One major source of headache during optimization  is  the  possibility  of
8848 the coding errors in the target function/constraints (or their gradients).
8849 Such  errors   most   often   manifest   themselves  as  discontinuity  or
8850 nonsmoothness of the target/constraints.
8851 
8852 Another frequent situation is when you try to optimize something involving
8853 lots of min() and max() operations, i.e. nonsmooth target. Although not  a
8854 coding error, it is nonsmoothness anyway - and smooth  optimizers  usually
8855 stop right after encountering nonsmoothness, well before reaching solution.
8856 
8857 OptGuard integrity checker helps you to catch such situations: it monitors
8858 function values/gradients being passed  to  the  optimizer  and  tries  to
8859 errors. Upon discovering suspicious pair of points it  raises  appropriate
8860 flag (and allows you to continue optimization). When optimization is done,
8861 you can study OptGuard result.
8862 
8863   -- ALGLIB --
8864      Copyright 21.11.2018 by Bochkanov Sergey
8865 *************************************************************************/
8866 void mincgoptguardsmoothness(const mincgstate &state, const ae_int_t level, const xparams _xparams = alglib::xdefault);
8867 void mincgoptguardsmoothness(const mincgstate &state, const xparams _xparams = alglib::xdefault);
8868 
8869 
8870 /*************************************************************************
8871 Results of OptGuard integrity check, should be called  after  optimization
8872 session is over.
8873 
8874 === PRIMARY REPORT =======================================================
8875 
8876 OptGuard performs several checks which are intended to catch common errors
8877 in the implementation of nonlinear function/gradient:
8878 * incorrect analytic gradient
8879 * discontinuous (non-C0) target functions (constraints)
8880 * nonsmooth     (non-C1) target functions (constraints)
8881 
8882 Each of these checks is activated with appropriate function:
8883 * mincgoptguardgradient() for gradient verification
8884 * mincgoptguardsmoothness() for C0/C1 checks
8885 
8886 Following flags are set when these errors are suspected:
8887 * rep.badgradsuspected, and additionally:
8888   * rep.badgradvidx for specific variable (gradient element) suspected
8889   * rep.badgradxbase, a point where gradient is tested
8890   * rep.badgraduser, user-provided gradient  (stored  as  2D  matrix  with
8891     single row in order to make  report  structure  compatible  with  more
8892     complex optimizers like MinNLC or MinLM)
8893   * rep.badgradnum,   reference    gradient    obtained    via   numerical
8894     differentiation (stored as  2D matrix with single row in order to make
8895     report structure compatible with more complex optimizers  like  MinNLC
8896     or MinLM)
8897 * rep.nonc0suspected
8898 * rep.nonc1suspected
8899 
8900 === ADDITIONAL REPORTS/LOGS ==============================================
8901 
8902 Several different tests are performed to catch C0/C1 errors, you can  find
8903 out specific test signaled error by looking to:
8904 * rep.nonc0test0positive, for non-C0 test #0
8905 * rep.nonc1test0positive, for non-C1 test #0
8906 * rep.nonc1test1positive, for non-C1 test #1
8907 
8908 Additional information (including line search logs)  can  be  obtained  by
8909 means of:
8910 * mincgoptguardnonc1test0results()
8911 * mincgoptguardnonc1test1results()
8912 which return detailed error reports, specific points where discontinuities
8913 were found, and so on.
8914 
8915 ==========================================================================
8916 
8917 INPUT PARAMETERS:
8918     state   -   algorithm state
8919 
8920 OUTPUT PARAMETERS:
8921     rep     -   generic OptGuard report;  more  detailed  reports  can  be
8922                 retrieved with other functions.
8923 
8924 NOTE: false negatives (nonsmooth problems are not identified as  nonsmooth
8925       ones) are possible although unlikely.
8926 
8927       The reason  is  that  you  need  to  make several evaluations around
8928       nonsmoothness  in  order  to  accumulate  enough  information  about
8929       function curvature. Say, if you start right from the nonsmooth point,
8930       optimizer simply won't get enough data to understand what  is  going
8931       wrong before it terminates due to abrupt changes in the  derivative.
8932       It is also  possible  that  "unlucky"  step  will  move  us  to  the
8933       termination too quickly.
8934 
8935       Our current approach is to have less than 0.1%  false  negatives  in
8936       our test examples  (measured  with  multiple  restarts  from  random
8937       points), and to have exactly 0% false positives.
8938 
8939   -- ALGLIB --
8940      Copyright 21.11.2018 by Bochkanov Sergey
8941 *************************************************************************/
8942 void mincgoptguardresults(const mincgstate &state, optguardreport &rep, const xparams _xparams = alglib::xdefault);
8943 
8944 
8945 /*************************************************************************
8946 Detailed results of the OptGuard integrity check for nonsmoothness test #0
8947 
8948 Nonsmoothness (non-C1) test #0 studies  function  values  (not  gradient!)
8949 obtained during line searches and monitors  behavior  of  the  directional
8950 derivative estimate.
8951 
8952 This test is less powerful than test #1, but it does  not  depend  on  the
8953 gradient values and thus it is more robust against artifacts introduced by
8954 numerical differentiation.
8955 
8956 Two reports are returned:
8957 * a "strongest" one, corresponding  to  line   search  which  had  highest
8958   value of the nonsmoothness indicator
8959 * a "longest" one, corresponding to line search which  had  more  function
8960   evaluations, and thus is more detailed
8961 
8962 In both cases following fields are returned:
8963 
8964 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
8965   did not notice anything (in the latter cases fields below are empty).
8966 * x0[], d[] - arrays of length N which store initial point  and  direction
8967   for line search (d[] can be normalized, but does not have to)
8968 * stp[], f[] - arrays of length CNT which store step lengths and  function
8969   values at these points; f[i] is evaluated in x0+stp[i]*d.
8970 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
8971   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
8972   with  most  likely  position  of  the  violation  between  stpidxa+1 and
8973   stpidxa+2.
8974 
8975 ==========================================================================
8976 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
8977 =                   see where C1 continuity is violated.
8978 ==========================================================================
8979 
8980 INPUT PARAMETERS:
8981     state   -   algorithm state
8982 
8983 OUTPUT PARAMETERS:
8984     strrep  -   C1 test #0 "strong" report
8985     lngrep  -   C1 test #0 "long" report
8986 
8987   -- ALGLIB --
8988      Copyright 21.11.2018 by Bochkanov Sergey
8989 *************************************************************************/
8990 void mincgoptguardnonc1test0results(const mincgstate &state, optguardnonc1test0report &strrep, optguardnonc1test0report &lngrep, const xparams _xparams = alglib::xdefault);
8991 
8992 
8993 /*************************************************************************
8994 Detailed results of the OptGuard integrity check for nonsmoothness test #1
8995 
8996 Nonsmoothness (non-C1)  test  #1  studies  individual  components  of  the
8997 gradient computed during line search.
8998 
8999 When precise analytic gradient is provided this test is more powerful than
9000 test #0  which  works  with  function  values  and  ignores  user-provided
9001 gradient.  However,  test  #0  becomes  more   powerful   when   numerical
9002 differentiation is employed (in such cases test #1 detects  higher  levels
9003 of numerical noise and becomes too conservative).
9004 
9005 This test also tells specific components of the gradient which violate  C1
9006 continuity, which makes it more informative than #0, which just tells that
9007 continuity is violated.
9008 
9009 Two reports are returned:
9010 * a "strongest" one, corresponding  to  line   search  which  had  highest
9011   value of the nonsmoothness indicator
9012 * a "longest" one, corresponding to line search which  had  more  function
9013   evaluations, and thus is more detailed
9014 
9015 In both cases following fields are returned:
9016 
9017 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
9018   did not notice anything (in the latter cases fields below are empty).
9019 * vidx - is an index of the variable in [0,N) with nonsmooth derivative
9020 * x0[], d[] - arrays of length N which store initial point  and  direction
9021   for line search (d[] can be normalized, but does not have to)
9022 * stp[], g[] - arrays of length CNT which store step lengths and  gradient
9023   values at these points; g[i] is evaluated in  x0+stp[i]*d  and  contains
9024   vidx-th component of the gradient.
9025 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
9026   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
9027   with  most  likely  position  of  the  violation  between  stpidxa+1 and
9028   stpidxa+2.
9029 
9030 ==========================================================================
9031 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
9032 =                   see where C1 continuity is violated.
9033 ==========================================================================
9034 
9035 INPUT PARAMETERS:
9036     state   -   algorithm state
9037 
9038 OUTPUT PARAMETERS:
9039     strrep  -   C1 test #1 "strong" report
9040     lngrep  -   C1 test #1 "long" report
9041 
9042   -- ALGLIB --
9043      Copyright 21.11.2018 by Bochkanov Sergey
9044 *************************************************************************/
9045 void mincgoptguardnonc1test1results(const mincgstate &state, optguardnonc1test1report &strrep, optguardnonc1test1report &lngrep, const xparams _xparams = alglib::xdefault);
9046 
9047 
9048 /*************************************************************************
9049 Conjugate gradient results
9050 
9051 INPUT PARAMETERS:
9052     State   -   algorithm state
9053 
9054 OUTPUT PARAMETERS:
9055     X       -   array[0..N-1], solution
9056     Rep     -   optimization report:
9057                 * Rep.TerminationType completetion code:
9058                     * -8    internal integrity control  detected  infinite
9059                             or NAN values in  function/gradient.  Abnormal
9060                             termination signalled.
9061                     * -7    gradient verification failed.
9062                             See MinCGSetGradientCheck() for more information.
9063                     *  1    relative function improvement is no more than
9064                             EpsF.
9065                     *  2    relative step is no more than EpsX.
9066                     *  4    gradient norm is no more than EpsG
9067                     *  5    MaxIts steps was taken
9068                     *  7    stopping conditions are too stringent,
9069                             further improvement is impossible,
9070                             we return best X found so far
9071                     *  8    terminated by user
9072                 * Rep.IterationsCount contains iterations count
9073                 * NFEV countains number of function calculations
9074 
9075   -- ALGLIB --
9076      Copyright 20.04.2009 by Bochkanov Sergey
9077 *************************************************************************/
9078 void mincgresults(const mincgstate &state, real_1d_array &x, mincgreport &rep, const xparams _xparams = alglib::xdefault);
9079 
9080 
9081 /*************************************************************************
9082 Conjugate gradient results
9083 
9084 Buffered implementation of MinCGResults(), which uses pre-allocated buffer
9085 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
9086 intended to be used in the inner cycles of performance critical algorithms
9087 where array reallocation penalty is too large to be ignored.
9088 
9089   -- ALGLIB --
9090      Copyright 20.04.2009 by Bochkanov Sergey
9091 *************************************************************************/
9092 void mincgresultsbuf(const mincgstate &state, real_1d_array &x, mincgreport &rep, const xparams _xparams = alglib::xdefault);
9093 
9094 
9095 /*************************************************************************
9096 This  subroutine  restarts  CG  algorithm from new point. All optimization
9097 parameters are left unchanged.
9098 
9099 This  function  allows  to  solve multiple  optimization  problems  (which
9100 must have same number of dimensions) without object reallocation penalty.
9101 
9102 INPUT PARAMETERS:
9103     State   -   structure used to store algorithm state.
9104     X       -   new starting point.
9105 
9106   -- ALGLIB --
9107      Copyright 30.07.2010 by Bochkanov Sergey
9108 *************************************************************************/
9109 void mincgrestartfrom(const mincgstate &state, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
9110 
9111 
9112 /*************************************************************************
9113 This subroutine submits request for termination of running  optimizer.  It
9114 should be called from user-supplied callback when user decides that it  is
9115 time to "smoothly" terminate optimization process.  As  result,  optimizer
9116 stops at point which was "current accepted" when termination  request  was
9117 submitted and returns error code 8 (successful termination).
9118 
9119 INPUT PARAMETERS:
9120     State   -   optimizer structure
9121 
9122 NOTE: after  request  for  termination  optimizer  may   perform   several
9123       additional calls to user-supplied callbacks. It does  NOT  guarantee
9124       to stop immediately - it just guarantees that these additional calls
9125       will be discarded later.
9126 
9127 NOTE: calling this function on optimizer which is NOT running will have no
9128       effect.
9129 
9130 NOTE: multiple calls to this function are possible. First call is counted,
9131       subsequent calls are silently ignored.
9132 
9133   -- ALGLIB --
9134      Copyright 08.10.2014 by Bochkanov Sergey
9135 *************************************************************************/
9136 void mincgrequesttermination(const mincgstate &state, const xparams _xparams = alglib::xdefault);
9137 #endif
9138 
9139 #if defined(AE_COMPILE_MINLM) || !defined(AE_PARTIAL_BUILD)
9140 /*************************************************************************
9141                 IMPROVED LEVENBERG-MARQUARDT METHOD FOR
9142                  NON-LINEAR LEAST SQUARES OPTIMIZATION
9143 
9144 DESCRIPTION:
9145 This function is used to find minimum of function which is represented  as
9146 sum of squares:
9147     F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1])
9148 using value of function vector f[] and Jacobian of f[].
9149 
9150 
9151 REQUIREMENTS:
9152 This algorithm will request following information during its operation:
9153 
9154 * function vector f[] at given point X
9155 * function vector f[] and Jacobian of f[] (simultaneously) at given point
9156 
9157 There are several overloaded versions of  MinLMOptimize()  function  which
9158 correspond  to  different LM-like optimization algorithms provided by this
9159 unit. You should choose version which accepts fvec()  and jac() callbacks.
9160 First  one  is used to calculate f[] at given point, second one calculates
9161 f[] and Jacobian df[i]/dx[j].
9162 
9163 You can try to initialize MinLMState structure with VJ  function and  then
9164 use incorrect version  of  MinLMOptimize()  (for  example,  version  which
9165 works  with  general  form function and does not provide Jacobian), but it
9166 will  lead  to  exception  being  thrown  after first attempt to calculate
9167 Jacobian.
9168 
9169 
9170 USAGE:
9171 1. User initializes algorithm state with MinLMCreateVJ() call
9172 2. User tunes solver parameters with MinLMSetCond(),  MinLMSetStpMax() and
9173    other functions
9174 3. User calls MinLMOptimize() function which  takes algorithm  state   and
9175    callback functions.
9176 4. User calls MinLMResults() to get solution
9177 5. Optionally, user may call MinLMRestartFrom() to solve  another  problem
9178    with same N/M but another starting point and/or another function.
9179    MinLMRestartFrom() allows to reuse already initialized structure.
9180 
9181 
9182 INPUT PARAMETERS:
9183     N       -   dimension, N>1
9184                 * if given, only leading N elements of X are used
9185                 * if not given, automatically determined from size of X
9186     M       -   number of functions f[i]
9187     X       -   initial solution, array[0..N-1]
9188 
9189 OUTPUT PARAMETERS:
9190     State   -   structure which stores algorithm state
9191 
9192 NOTES:
9193 1. you may tune stopping conditions with MinLMSetCond() function
9194 2. if target function contains exp() or other fast growing functions,  and
9195    optimization algorithm makes too large steps which leads  to  overflow,
9196    use MinLMSetStpMax() function to bound algorithm's steps.
9197 
9198   -- ALGLIB --
9199      Copyright 30.03.2009 by Bochkanov Sergey
9200 *************************************************************************/
9201 void minlmcreatevj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
9202 void minlmcreatevj(const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
9203 
9204 
9205 /*************************************************************************
9206                 IMPROVED LEVENBERG-MARQUARDT METHOD FOR
9207                  NON-LINEAR LEAST SQUARES OPTIMIZATION
9208 
9209 DESCRIPTION:
9210 This function is used to find minimum of function which is represented  as
9211 sum of squares:
9212     F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1])
9213 using value of function vector f[] only. Finite differences  are  used  to
9214 calculate Jacobian.
9215 
9216 
9217 REQUIREMENTS:
9218 This algorithm will request following information during its operation:
9219 * function vector f[] at given point X
9220 
9221 There are several overloaded versions of  MinLMOptimize()  function  which
9222 correspond  to  different LM-like optimization algorithms provided by this
9223 unit. You should choose version which accepts fvec() callback.
9224 
9225 You can try to initialize MinLMState structure with VJ  function and  then
9226 use incorrect version  of  MinLMOptimize()  (for  example,  version  which
9227 works with general form function and does not accept function vector), but
9228 it will  lead  to  exception being thrown after first attempt to calculate
9229 Jacobian.
9230 
9231 
9232 USAGE:
9233 1. User initializes algorithm state with MinLMCreateV() call
9234 2. User tunes solver parameters with MinLMSetCond(),  MinLMSetStpMax() and
9235    other functions
9236 3. User calls MinLMOptimize() function which  takes algorithm  state   and
9237    callback functions.
9238 4. User calls MinLMResults() to get solution
9239 5. Optionally, user may call MinLMRestartFrom() to solve  another  problem
9240    with same N/M but another starting point and/or another function.
9241    MinLMRestartFrom() allows to reuse already initialized structure.
9242 
9243 
9244 INPUT PARAMETERS:
9245     N       -   dimension, N>1
9246                 * if given, only leading N elements of X are used
9247                 * if not given, automatically determined from size of X
9248     M       -   number of functions f[i]
9249     X       -   initial solution, array[0..N-1]
9250     DiffStep-   differentiation step, >0
9251 
9252 OUTPUT PARAMETERS:
9253     State   -   structure which stores algorithm state
9254 
9255 See also MinLMIteration, MinLMResults.
9256 
9257 NOTES:
9258 1. you may tune stopping conditions with MinLMSetCond() function
9259 2. if target function contains exp() or other fast growing functions,  and
9260    optimization algorithm makes too large steps which leads  to  overflow,
9261    use MinLMSetStpMax() function to bound algorithm's steps.
9262 
9263   -- ALGLIB --
9264      Copyright 30.03.2009 by Bochkanov Sergey
9265 *************************************************************************/
9266 void minlmcreatev(const ae_int_t n, const ae_int_t m, const real_1d_array &x, const double diffstep, minlmstate &state, const xparams _xparams = alglib::xdefault);
9267 void minlmcreatev(const ae_int_t m, const real_1d_array &x, const double diffstep, minlmstate &state, const xparams _xparams = alglib::xdefault);
9268 
9269 
9270 /*************************************************************************
9271     LEVENBERG-MARQUARDT-LIKE METHOD FOR NON-LINEAR OPTIMIZATION
9272 
9273 DESCRIPTION:
9274 This  function  is  used  to  find  minimum  of general form (not "sum-of-
9275 -squares") function
9276     F = F(x[0], ..., x[n-1])
9277 using  its  gradient  and  Hessian.  Levenberg-Marquardt modification with
9278 L-BFGS pre-optimization and internal pre-conditioned  L-BFGS  optimization
9279 after each Levenberg-Marquardt step is used.
9280 
9281 
9282 REQUIREMENTS:
9283 This algorithm will request following information during its operation:
9284 
9285 * function value F at given point X
9286 * F and gradient G (simultaneously) at given point X
9287 * F, G and Hessian H (simultaneously) at given point X
9288 
9289 There are several overloaded versions of  MinLMOptimize()  function  which
9290 correspond  to  different LM-like optimization algorithms provided by this
9291 unit. You should choose version which accepts func(),  grad()  and  hess()
9292 function pointers. First pointer is used to calculate F  at  given  point,
9293 second  one  calculates  F(x)  and  grad F(x),  third one calculates F(x),
9294 grad F(x), hess F(x).
9295 
9296 You can try to initialize MinLMState structure with FGH-function and  then
9297 use incorrect version of MinLMOptimize() (for example, version which  does
9298 not provide Hessian matrix), but it will lead to  exception  being  thrown
9299 after first attempt to calculate Hessian.
9300 
9301 
9302 USAGE:
9303 1. User initializes algorithm state with MinLMCreateFGH() call
9304 2. User tunes solver parameters with MinLMSetCond(),  MinLMSetStpMax() and
9305    other functions
9306 3. User calls MinLMOptimize() function which  takes algorithm  state   and
9307    pointers (delegates, etc.) to callback functions.
9308 4. User calls MinLMResults() to get solution
9309 5. Optionally, user may call MinLMRestartFrom() to solve  another  problem
9310    with same N but another starting point and/or another function.
9311    MinLMRestartFrom() allows to reuse already initialized structure.
9312 
9313 
9314 INPUT PARAMETERS:
9315     N       -   dimension, N>1
9316                 * if given, only leading N elements of X are used
9317                 * if not given, automatically determined from size of X
9318     X       -   initial solution, array[0..N-1]
9319 
9320 OUTPUT PARAMETERS:
9321     State   -   structure which stores algorithm state
9322 
9323 NOTES:
9324 1. you may tune stopping conditions with MinLMSetCond() function
9325 2. if target function contains exp() or other fast growing functions,  and
9326    optimization algorithm makes too large steps which leads  to  overflow,
9327    use MinLMSetStpMax() function to bound algorithm's steps.
9328 
9329   -- ALGLIB --
9330      Copyright 30.03.2009 by Bochkanov Sergey
9331 *************************************************************************/
9332 void minlmcreatefgh(const ae_int_t n, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
9333 void minlmcreatefgh(const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
9334 
9335 
9336 /*************************************************************************
9337 This function sets stopping conditions for Levenberg-Marquardt optimization
9338 algorithm.
9339 
9340 INPUT PARAMETERS:
9341     State   -   structure which stores algorithm state
9342     EpsX    -   >=0
9343                 The subroutine finishes its work if  on  k+1-th  iteration
9344                 the condition |v|<=EpsX is fulfilled, where:
9345                 * |.| means Euclidian norm
9346                 * v - scaled step vector, v[i]=dx[i]/s[i]
9347                 * dx - ste pvector, dx=X(k+1)-X(k)
9348                 * s - scaling coefficients set by MinLMSetScale()
9349                 Recommended values: 1E-9 ... 1E-12.
9350     MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
9351                 iterations   is    unlimited.   Only   Levenberg-Marquardt
9352                 iterations  are  counted  (L-BFGS/CG  iterations  are  NOT
9353                 counted because their cost is very low compared to that of
9354                 LM).
9355 
9356 Passing  EpsX=0  and  MaxIts=0  (simultaneously)  will  lead  to automatic
9357 stopping criterion selection (small EpsX).
9358 
9359 NOTE: it is not recommended to set large EpsX (say, 0.001). Because LM  is
9360       a second-order method, it performs very precise steps anyway.
9361 
9362   -- ALGLIB --
9363      Copyright 02.04.2010 by Bochkanov Sergey
9364 *************************************************************************/
9365 void minlmsetcond(const minlmstate &state, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
9366 
9367 
9368 /*************************************************************************
9369 This function turns on/off reporting.
9370 
9371 INPUT PARAMETERS:
9372     State   -   structure which stores algorithm state
9373     NeedXRep-   whether iteration reports are needed or not
9374 
9375 If NeedXRep is True, algorithm will call rep() callback function if  it is
9376 provided to MinLMOptimize(). Both Levenberg-Marquardt and internal  L-BFGS
9377 iterations are reported.
9378 
9379   -- ALGLIB --
9380      Copyright 02.04.2010 by Bochkanov Sergey
9381 *************************************************************************/
9382 void minlmsetxrep(const minlmstate &state, const bool needxrep, const xparams _xparams = alglib::xdefault);
9383 
9384 
9385 /*************************************************************************
9386 This function sets maximum step length
9387 
9388 INPUT PARAMETERS:
9389     State   -   structure which stores algorithm state
9390     StpMax  -   maximum step length, >=0. Set StpMax to 0.0,  if you don't
9391                 want to limit step length.
9392 
9393 Use this subroutine when you optimize target function which contains exp()
9394 or  other  fast  growing  functions,  and optimization algorithm makes too
9395 large  steps  which  leads  to overflow. This function allows us to reject
9396 steps  that  are  too  large  (and  therefore  expose  us  to the possible
9397 overflow) without actually calculating function value at the x+stp*d.
9398 
9399 NOTE: non-zero StpMax leads to moderate  performance  degradation  because
9400 intermediate  step  of  preconditioned L-BFGS optimization is incompatible
9401 with limits on step size.
9402 
9403   -- ALGLIB --
9404      Copyright 02.04.2010 by Bochkanov Sergey
9405 *************************************************************************/
9406 void minlmsetstpmax(const minlmstate &state, const double stpmax, const xparams _xparams = alglib::xdefault);
9407 
9408 
9409 /*************************************************************************
9410 This function sets scaling coefficients for LM optimizer.
9411 
9412 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
9413 size and gradient are scaled before comparison with tolerances).  Scale of
9414 the I-th variable is a translation invariant measure of:
9415 a) "how large" the variable is
9416 b) how large the step should be to make significant changes in the function
9417 
9418 Generally, scale is NOT considered to be a form of preconditioner.  But LM
9419 optimizer is unique in that it uses scaling matrix both  in  the  stopping
9420 condition tests and as Marquardt damping factor.
9421 
9422 Proper scaling is very important for the algorithm performance. It is less
9423 important for the quality of results, but still has some influence (it  is
9424 easier  to  converge  when  variables  are  properly  scaled, so premature
9425 stopping is possible when very badly scalled variables are  combined  with
9426 relaxed stopping conditions).
9427 
9428 INPUT PARAMETERS:
9429     State   -   structure stores algorithm state
9430     S       -   array[N], non-zero scaling coefficients
9431                 S[i] may be negative, sign doesn't matter.
9432 
9433   -- ALGLIB --
9434      Copyright 14.01.2011 by Bochkanov Sergey
9435 *************************************************************************/
9436 void minlmsetscale(const minlmstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
9437 
9438 
9439 /*************************************************************************
9440 This function sets boundary constraints for LM optimizer
9441 
9442 Boundary constraints are inactive by default (after initial creation).
9443 They are preserved until explicitly turned off with another SetBC() call.
9444 
9445 INPUT PARAMETERS:
9446     State   -   structure stores algorithm state
9447     BndL    -   lower bounds, array[N].
9448                 If some (all) variables are unbounded, you may specify
9449                 very small number or -INF (latter is recommended because
9450                 it will allow solver to use better algorithm).
9451     BndU    -   upper bounds, array[N].
9452                 If some (all) variables are unbounded, you may specify
9453                 very large number or +INF (latter is recommended because
9454                 it will allow solver to use better algorithm).
9455 
9456 NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
9457 variable will be "frozen" at X[i]=BndL[i]=BndU[i].
9458 
9459 NOTE 2: this solver has following useful properties:
9460 * bound constraints are always satisfied exactly
9461 * function is evaluated only INSIDE area specified by bound constraints
9462   or at its boundary
9463 
9464   -- ALGLIB --
9465      Copyright 14.01.2011 by Bochkanov Sergey
9466 *************************************************************************/
9467 void minlmsetbc(const minlmstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams = alglib::xdefault);
9468 
9469 
9470 /*************************************************************************
9471 This function sets general linear constraints for LM optimizer
9472 
9473 Linear constraints are inactive by default (after initial creation).  They
9474 are preserved until explicitly turned off with another minlmsetlc() call.
9475 
9476 INPUT PARAMETERS:
9477     State   -   structure stores algorithm state
9478     C       -   linear constraints, array[K,N+1].
9479                 Each row of C represents one constraint, either equality
9480                 or inequality (see below):
9481                 * first N elements correspond to coefficients,
9482                 * last element corresponds to the right part.
9483                 All elements of C (including right part) must be finite.
9484     CT      -   type of constraints, array[K]:
9485                 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
9486                 * if CT[i]=0, then I-th constraint is C[i,*]*x  = C[i,n+1]
9487                 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
9488     K       -   number of equality/inequality constraints, K>=0:
9489                 * if given, only leading K elements of C/CT are used
9490                 * if not given, automatically determined from sizes of C/CT
9491 
9492 IMPORTANT: if you have linear constraints, it is strongly  recommended  to
9493            set scale of variables with minlmsetscale(). QP solver which is
9494            used to calculate linearly constrained steps heavily relies  on
9495            good scaling of input problems.
9496 
9497 IMPORTANT: solvers created with minlmcreatefgh()  do  not  support  linear
9498            constraints.
9499 
9500 NOTE: linear  (non-bound)  constraints are satisfied only approximately  -
9501       there  always  exists some violation due  to  numerical  errors  and
9502       algorithmic limitations.
9503 
9504 NOTE: general linear constraints  add  significant  overhead  to  solution
9505       process. Although solver performs roughly same amount of  iterations
9506       (when compared  with  similar  box-only  constrained  problem), each
9507       iteration   now    involves  solution  of  linearly  constrained  QP
9508       subproblem, which requires ~3-5 times more Cholesky  decompositions.
9509       Thus, if you can reformulate your problem in such way  this  it  has
9510       only box constraints, it may be beneficial to do so.
9511 
9512   -- ALGLIB --
9513      Copyright 14.01.2011 by Bochkanov Sergey
9514 *************************************************************************/
9515 void minlmsetlc(const minlmstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams = alglib::xdefault);
9516 void minlmsetlc(const minlmstate &state, const real_2d_array &c, const integer_1d_array &ct, const xparams _xparams = alglib::xdefault);
9517 
9518 
9519 /*************************************************************************
9520 This function is used to change acceleration settings
9521 
9522 You can choose between three acceleration strategies:
9523 * AccType=0, no acceleration.
9524 * AccType=1, secant updates are used to update quadratic model after  each
9525   iteration. After fixed number of iterations (or after  model  breakdown)
9526   we  recalculate  quadratic  model  using  analytic  Jacobian  or  finite
9527   differences. Number of secant-based iterations depends  on  optimization
9528   settings: about 3 iterations - when we have analytic Jacobian, up to 2*N
9529   iterations - when we use finite differences to calculate Jacobian.
9530 
9531 AccType=1 is recommended when Jacobian  calculation  cost is prohibitively
9532 high (several Mx1 function vector calculations  followed  by  several  NxN
9533 Cholesky factorizations are faster than calculation of one M*N  Jacobian).
9534 It should also be used when we have no Jacobian, because finite difference
9535 approximation takes too much time to compute.
9536 
9537 Table below list  optimization  protocols  (XYZ  protocol  corresponds  to
9538 MinLMCreateXYZ) and acceleration types they support (and use by  default).
9539 
9540 ACCELERATION TYPES SUPPORTED BY OPTIMIZATION PROTOCOLS:
9541 
9542 protocol    0   1   comment
9543 V           +   +
9544 VJ          +   +
9545 FGH         +
9546 
9547 DEFAULT VALUES:
9548 
9549 protocol    0   1   comment
9550 V               x   without acceleration it is so slooooooooow
9551 VJ          x
9552 FGH         x
9553 
9554 NOTE: this  function should be called before optimization. Attempt to call
9555 it during algorithm iterations may result in unexpected behavior.
9556 
9557 NOTE: attempt to call this function with unsupported protocol/acceleration
9558 combination will result in exception being thrown.
9559 
9560   -- ALGLIB --
9561      Copyright 14.10.2010 by Bochkanov Sergey
9562 *************************************************************************/
9563 void minlmsetacctype(const minlmstate &state, const ae_int_t acctype, const xparams _xparams = alglib::xdefault);
9564 
9565 
9566 /*************************************************************************
9567 This function provides reverse communication interface
9568 Reverse communication interface is not documented or recommended to use.
9569 See below for functions which provide better documented API
9570 *************************************************************************/
9571 bool minlmiteration(const minlmstate &state, const xparams _xparams = alglib::xdefault);
9572 
9573 
9574 /*************************************************************************
9575 This family of functions is used to launcn iterations of nonlinear optimizer
9576 
9577 These functions accept following parameters:
9578     state   -   algorithm state
9579     func    -   callback which calculates function (or merit function)
9580                 value func at given point x
9581     grad    -   callback which calculates function (or merit function)
9582                 value func and gradient grad at given point x
9583     hess    -   callback which calculates function (or merit function)
9584                 value func, gradient grad and Hessian hess at given point x
9585     fvec    -   callback which calculates function vector fi[]
9586                 at given point x
9587     jac     -   callback which calculates function vector fi[]
9588                 and Jacobian jac at given point x
9589     rep     -   optional callback which is called after each iteration
9590                 can be NULL
9591     ptr     -   optional pointer which is passed to func/grad/hess/jac/rep
9592                 can be NULL
9593 
9594 NOTES:
9595 
9596 1. Depending on function used to create state  structure,  this  algorithm
9597    may accept Jacobian and/or Hessian and/or gradient.  According  to  the
9598    said above, there ase several versions of this function,  which  accept
9599    different sets of callbacks.
9600 
9601    This flexibility opens way to subtle errors - you may create state with
9602    MinLMCreateFGH() (optimization using Hessian), but call function  which
9603    does not accept Hessian. So when algorithm will request Hessian,  there
9604    will be no callback to call. In this case exception will be thrown.
9605 
9606    Be careful to avoid such errors because there is no way to find them at
9607    compile time - you can see them at runtime only.
9608 
9609   -- ALGLIB --
9610      Copyright 10.03.2009 by Bochkanov Sergey
9611 
9612 *************************************************************************/
9613 void minlmoptimize(minlmstate &state,
9614     void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
9615     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
9616     void *ptr = NULL,
9617     const xparams _xparams = alglib::xdefault);
9618 void minlmoptimize(minlmstate &state,
9619     void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
9620     void  (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
9621     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
9622     void *ptr = NULL,
9623     const xparams _xparams = alglib::xdefault);
9624 void minlmoptimize(minlmstate &state,
9625     void (*func)(const real_1d_array &x, double &func, void *ptr),
9626     void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
9627     void (*hess)(const real_1d_array &x, double &func, real_1d_array &grad, real_2d_array &hess, void *ptr),
9628     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
9629     void *ptr = NULL,
9630     const xparams _xparams = alglib::xdefault);
9631 void minlmoptimize(minlmstate &state,
9632     void (*func)(const real_1d_array &x, double &func, void *ptr),
9633     void  (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
9634     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
9635     void *ptr = NULL,
9636     const xparams _xparams = alglib::xdefault);
9637 void minlmoptimize(minlmstate &state,
9638     void (*func)(const real_1d_array &x, double &func, void *ptr),
9639     void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
9640     void  (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
9641     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
9642     void *ptr = NULL,
9643     const xparams _xparams = alglib::xdefault);
9644 
9645 
9646 /*************************************************************************
9647 This  function  activates/deactivates verification  of  the  user-supplied
9648 analytic Jacobian.
9649 
9650 Upon  activation  of  this  option  OptGuard  integrity  checker  performs
9651 numerical differentiation of your target function vector  at  the  initial
9652 point (note: future versions may also perform check  at  the final  point)
9653 and compares numerical Jacobian with analytic one provided by you.
9654 
9655 If difference is too large, an error flag is set and optimization  session
9656 continues. After optimization session is over, you can retrieve the report
9657 which stores  both  Jacobians,  and  specific  components  highlighted  as
9658 suspicious by the OptGuard.
9659 
9660 The OptGuard report can be retrieved with minlmoptguardresults().
9661 
9662 IMPORTANT: gradient check is a high-overhead option which  will  cost  you
9663            about 3*N additional function evaluations. In many cases it may
9664            cost as much as the rest of the optimization session.
9665 
9666            YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO
9667            CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY.
9668 
9669 NOTE: unlike previous incarnation of the gradient checking code,  OptGuard
9670       does NOT interrupt optimization even if it discovers bad gradient.
9671 
9672 INPUT PARAMETERS:
9673     State       -   structure used to store algorithm state
9674     TestStep    -   verification step used for numerical differentiation:
9675                     * TestStep=0 turns verification off
9676                     * TestStep>0 activates verification
9677                     You should carefully choose TestStep. Value  which  is
9678                     too large (so large that  function  behavior  is  non-
9679                     cubic at this scale) will lead  to  false  alarms. Too
9680                     short step will result in rounding  errors  dominating
9681                     numerical derivative.
9682 
9683                     You may use different step for different parameters by
9684                     means of setting scale with minlmsetscale().
9685 
9686 === EXPLANATION ==========================================================
9687 
9688 In order to verify gradient algorithm performs following steps:
9689   * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
9690     where X[i] is i-th component of the initial point and S[i] is a  scale
9691     of i-th parameter
9692   * F(X) is evaluated at these trial points
9693   * we perform one more evaluation in the middle point of the interval
9694   * we  build  cubic  model using function values and derivatives at trial
9695     points and we compare its prediction with actual value in  the  middle
9696     point
9697 
9698   -- ALGLIB --
9699      Copyright 15.06.2014 by Bochkanov Sergey
9700 *************************************************************************/
9701 void minlmoptguardgradient(const minlmstate &state, const double teststep, const xparams _xparams = alglib::xdefault);
9702 
9703 
9704 /*************************************************************************
9705 Results of OptGuard integrity check, should be called  after  optimization
9706 session is over.
9707 
9708 OptGuard checks analytic Jacobian  against  reference  value  obtained  by
9709 numerical differentiation with user-specified step.
9710 
9711 NOTE: other optimizers perform additional OptGuard checks for things  like
9712       C0/C1-continuity violations. However, LM optimizer  can  check  only
9713       for incorrect Jacobian.
9714 
9715       The reason is that unlike line search methods LM optimizer does  not
9716       perform extensive evaluations along the line. Thus, we simply do not
9717       have enough data to catch C0/C1-violations.
9718 
9719 This check is activated with  minlmoptguardgradient() function.
9720 
9721 Following flags are set when these errors are suspected:
9722 * rep.badgradsuspected, and additionally:
9723   * rep.badgradfidx for specific function (Jacobian row) suspected
9724   * rep.badgradvidx for specific variable (Jacobian column) suspected
9725   * rep.badgradxbase, a point where gradient/Jacobian is tested
9726   * rep.badgraduser, user-provided gradient/Jacobian
9727   * rep.badgradnum, reference gradient/Jacobian obtained via numerical
9728     differentiation
9729 
9730 INPUT PARAMETERS:
9731     state   -   algorithm state
9732 
9733 OUTPUT PARAMETERS:
9734     rep     -   OptGuard report
9735 
9736   -- ALGLIB --
9737      Copyright 21.11.2018 by Bochkanov Sergey
9738 *************************************************************************/
9739 void minlmoptguardresults(const minlmstate &state, optguardreport &rep, const xparams _xparams = alglib::xdefault);
9740 
9741 
9742 /*************************************************************************
9743 Levenberg-Marquardt algorithm results
9744 
9745 NOTE: if you activated OptGuard integrity checking functionality and  want
9746       to get OptGuard report,  it  can  be  retrieved  with  the  help  of
9747       minlmoptguardresults() function.
9748 
9749 INPUT PARAMETERS:
9750     State   -   algorithm state
9751 
9752 OUTPUT PARAMETERS:
9753     X       -   array[0..N-1], solution
9754     Rep     -   optimization  report;  includes  termination   codes   and
9755                 additional information. Termination codes are listed below,
9756                 see comments for this structure for more info.
9757                 Termination code is stored in rep.terminationtype field:
9758                 * -8    optimizer detected NAN/INF values either in the
9759                         function itself, or in its Jacobian
9760                 * -3    constraints are inconsistent
9761                 *  2    relative step is no more than EpsX.
9762                 *  5    MaxIts steps was taken
9763                 *  7    stopping conditions are too stringent,
9764                         further improvement is impossible
9765                 *  8    terminated by user who called minlmrequesttermination().
9766                         X contains point which was "current accepted" when
9767                         termination request was submitted.
9768 
9769   -- ALGLIB --
9770      Copyright 10.03.2009 by Bochkanov Sergey
9771 *************************************************************************/
9772 void minlmresults(const minlmstate &state, real_1d_array &x, minlmreport &rep, const xparams _xparams = alglib::xdefault);
9773 
9774 
9775 /*************************************************************************
9776 Levenberg-Marquardt algorithm results
9777 
9778 Buffered implementation of MinLMResults(), which uses pre-allocated buffer
9779 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
9780 intended to be used in the inner cycles of performance critical algorithms
9781 where array reallocation penalty is too large to be ignored.
9782 
9783   -- ALGLIB --
9784      Copyright 10.03.2009 by Bochkanov Sergey
9785 *************************************************************************/
9786 void minlmresultsbuf(const minlmstate &state, real_1d_array &x, minlmreport &rep, const xparams _xparams = alglib::xdefault);
9787 
9788 
9789 /*************************************************************************
9790 This  subroutine  restarts  LM  algorithm from new point. All optimization
9791 parameters are left unchanged.
9792 
9793 This  function  allows  to  solve multiple  optimization  problems  (which
9794 must have same number of dimensions) without object reallocation penalty.
9795 
9796 INPUT PARAMETERS:
9797     State   -   structure used for reverse communication previously
9798                 allocated with MinLMCreateXXX call.
9799     X       -   new starting point.
9800 
9801   -- ALGLIB --
9802      Copyright 30.07.2010 by Bochkanov Sergey
9803 *************************************************************************/
9804 void minlmrestartfrom(const minlmstate &state, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
9805 
9806 
9807 /*************************************************************************
9808 This subroutine submits request for termination of running  optimizer.  It
9809 should be called from user-supplied callback when user decides that it  is
9810 time to "smoothly" terminate optimization process.  As  result,  optimizer
9811 stops at point which was "current accepted" when termination  request  was
9812 submitted and returns error code 8 (successful termination).
9813 
9814 INPUT PARAMETERS:
9815     State   -   optimizer structure
9816 
9817 NOTE: after  request  for  termination  optimizer  may   perform   several
9818       additional calls to user-supplied callbacks. It does  NOT  guarantee
9819       to stop immediately - it just guarantees that these additional calls
9820       will be discarded later.
9821 
9822 NOTE: calling this function on optimizer which is NOT running will have no
9823       effect.
9824 
9825 NOTE: multiple calls to this function are possible. First call is counted,
9826       subsequent calls are silently ignored.
9827 
9828   -- ALGLIB --
9829      Copyright 08.10.2014 by Bochkanov Sergey
9830 *************************************************************************/
9831 void minlmrequesttermination(const minlmstate &state, const xparams _xparams = alglib::xdefault);
9832 
9833 
9834 /*************************************************************************
9835 This is obsolete function.
9836 
9837 Since ALGLIB 3.3 it is equivalent to MinLMCreateVJ().
9838 
9839   -- ALGLIB --
9840      Copyright 30.03.2009 by Bochkanov Sergey
9841 *************************************************************************/
9842 void minlmcreatevgj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
9843 void minlmcreatevgj(const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
9844 
9845 
9846 /*************************************************************************
9847 This is obsolete function.
9848 
9849 Since ALGLIB 3.3 it is equivalent to MinLMCreateFJ().
9850 
9851   -- ALGLIB --
9852      Copyright 30.03.2009 by Bochkanov Sergey
9853 *************************************************************************/
9854 void minlmcreatefgj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
9855 void minlmcreatefgj(const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
9856 
9857 
9858 /*************************************************************************
9859 This function is considered obsolete since ALGLIB 3.1.0 and is present for
9860 backward  compatibility  only.  We  recommend  to use MinLMCreateVJ, which
9861 provides similar, but more consistent and feature-rich interface.
9862 
9863   -- ALGLIB --
9864      Copyright 30.03.2009 by Bochkanov Sergey
9865 *************************************************************************/
9866 void minlmcreatefj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
9867 void minlmcreatefj(const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
9868 #endif
9869 }
9870 
9871 /////////////////////////////////////////////////////////////////////////
9872 //
9873 // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (FUNCTIONS)
9874 //
9875 /////////////////////////////////////////////////////////////////////////
9876 namespace alglib_impl
9877 {
9878 #if defined(AE_COMPILE_CQMODELS) || !defined(AE_PARTIAL_BUILD)
9879 void cqminit(ae_int_t n, convexquadraticmodel* s, ae_state *_state);
9880 void cqmseta(convexquadraticmodel* s,
9881      /* Real    */ ae_matrix* a,
9882      ae_bool isupper,
9883      double alpha,
9884      ae_state *_state);
9885 void cqmgeta(convexquadraticmodel* s,
9886      /* Real    */ ae_matrix* a,
9887      ae_state *_state);
9888 void cqmrewritedensediagonal(convexquadraticmodel* s,
9889      /* Real    */ ae_vector* z,
9890      ae_state *_state);
9891 void cqmsetd(convexquadraticmodel* s,
9892      /* Real    */ ae_vector* d,
9893      double tau,
9894      ae_state *_state);
9895 void cqmdropa(convexquadraticmodel* s, ae_state *_state);
9896 void cqmsetb(convexquadraticmodel* s,
9897      /* Real    */ ae_vector* b,
9898      ae_state *_state);
9899 void cqmsetq(convexquadraticmodel* s,
9900      /* Real    */ ae_matrix* q,
9901      /* Real    */ ae_vector* r,
9902      ae_int_t k,
9903      double theta,
9904      ae_state *_state);
9905 void cqmsetactiveset(convexquadraticmodel* s,
9906      /* Real    */ ae_vector* x,
9907      /* Boolean */ ae_vector* activeset,
9908      ae_state *_state);
9909 double cqmeval(convexquadraticmodel* s,
9910      /* Real    */ ae_vector* x,
9911      ae_state *_state);
9912 void cqmevalx(convexquadraticmodel* s,
9913      /* Real    */ ae_vector* x,
9914      double* r,
9915      double* noise,
9916      ae_state *_state);
9917 void cqmgradunconstrained(convexquadraticmodel* s,
9918      /* Real    */ ae_vector* x,
9919      /* Real    */ ae_vector* g,
9920      ae_state *_state);
9921 double cqmxtadx2(convexquadraticmodel* s,
9922      /* Real    */ ae_vector* x,
9923      /* Real    */ ae_vector* tmp,
9924      ae_state *_state);
9925 void cqmadx(convexquadraticmodel* s,
9926      /* Real    */ ae_vector* x,
9927      /* Real    */ ae_vector* y,
9928      ae_state *_state);
9929 ae_bool cqmconstrainedoptimum(convexquadraticmodel* s,
9930      /* Real    */ ae_vector* x,
9931      ae_state *_state);
9932 void cqmscalevector(convexquadraticmodel* s,
9933      /* Real    */ ae_vector* x,
9934      ae_state *_state);
9935 void cqmgetdiaga(convexquadraticmodel* s,
9936      /* Real    */ ae_vector* x,
9937      ae_state *_state);
9938 double cqmdebugconstrainedevalt(convexquadraticmodel* s,
9939      /* Real    */ ae_vector* x,
9940      ae_state *_state);
9941 double cqmdebugconstrainedevale(convexquadraticmodel* s,
9942      /* Real    */ ae_vector* x,
9943      ae_state *_state);
9944 void _convexquadraticmodel_init(void* _p, ae_state *_state, ae_bool make_automatic);
9945 void _convexquadraticmodel_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9946 void _convexquadraticmodel_clear(void* _p);
9947 void _convexquadraticmodel_destroy(void* _p);
9948 #endif
9949 #if defined(AE_COMPILE_OPTGUARDAPI) || !defined(AE_PARTIAL_BUILD)
9950 void optguardinitinternal(optguardreport* rep,
9951      ae_int_t n,
9952      ae_int_t k,
9953      ae_state *_state);
9954 void optguardexportreport(optguardreport* srcrep,
9955      ae_int_t n,
9956      ae_int_t k,
9957      ae_bool badgradhasxj,
9958      optguardreport* dstrep,
9959      ae_state *_state);
9960 void smoothnessmonitorexportc1test0report(optguardnonc1test0report* srcrep,
9961      /* Real    */ ae_vector* s,
9962      optguardnonc1test0report* dstrep,
9963      ae_state *_state);
9964 void smoothnessmonitorexportc1test1report(optguardnonc1test1report* srcrep,
9965      /* Real    */ ae_vector* s,
9966      optguardnonc1test1report* dstrep,
9967      ae_state *_state);
9968 ae_bool optguardallclear(optguardreport* rep, ae_state *_state);
9969 void _optguardreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
9970 void _optguardreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9971 void _optguardreport_clear(void* _p);
9972 void _optguardreport_destroy(void* _p);
9973 void _optguardnonc1test0report_init(void* _p, ae_state *_state, ae_bool make_automatic);
9974 void _optguardnonc1test0report_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9975 void _optguardnonc1test0report_clear(void* _p);
9976 void _optguardnonc1test0report_destroy(void* _p);
9977 void _optguardnonc1test1report_init(void* _p, ae_state *_state, ae_bool make_automatic);
9978 void _optguardnonc1test1report_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9979 void _optguardnonc1test1report_clear(void* _p);
9980 void _optguardnonc1test1report_destroy(void* _p);
9981 #endif
9982 #if defined(AE_COMPILE_OPTSERV) || !defined(AE_PARTIAL_BUILD)
9983 void checkbcviolation(/* Boolean */ ae_vector* hasbndl,
9984      /* Real    */ ae_vector* bndl,
9985      /* Boolean */ ae_vector* hasbndu,
9986      /* Real    */ ae_vector* bndu,
9987      /* Real    */ ae_vector* x,
9988      ae_int_t n,
9989      /* Real    */ ae_vector* s,
9990      ae_bool nonunits,
9991      double* bcerr,
9992      ae_int_t* bcidx,
9993      ae_state *_state);
9994 void checklcviolation(/* Real    */ ae_matrix* cleic,
9995      /* Integer */ ae_vector* lcsrcidx,
9996      ae_int_t nec,
9997      ae_int_t nic,
9998      /* Real    */ ae_vector* x,
9999      ae_int_t n,
10000      double* lcerr,
10001      ae_int_t* lcidx,
10002      ae_state *_state);
10003 void checknlcviolation(/* Real    */ ae_vector* fi,
10004      ae_int_t ng,
10005      ae_int_t nh,
10006      double* nlcerr,
10007      ae_int_t* nlcidx,
10008      ae_state *_state);
10009 void trimprepare(double f, double* threshold, ae_state *_state);
10010 void trimfunction(double* f,
10011      /* Real    */ ae_vector* g,
10012      ae_int_t n,
10013      double threshold,
10014      ae_state *_state);
10015 ae_bool enforceboundaryconstraints(/* Real    */ ae_vector* x,
10016      /* Real    */ ae_vector* bl,
10017      /* Boolean */ ae_vector* havebl,
10018      /* Real    */ ae_vector* bu,
10019      /* Boolean */ ae_vector* havebu,
10020      ae_int_t nmain,
10021      ae_int_t nslack,
10022      ae_state *_state);
10023 void projectgradientintobc(/* Real    */ ae_vector* x,
10024      /* Real    */ ae_vector* g,
10025      /* Real    */ ae_vector* bl,
10026      /* Boolean */ ae_vector* havebl,
10027      /* Real    */ ae_vector* bu,
10028      /* Boolean */ ae_vector* havebu,
10029      ae_int_t nmain,
10030      ae_int_t nslack,
10031      ae_state *_state);
10032 void calculatestepbound(/* Real    */ ae_vector* x,
10033      /* Real    */ ae_vector* d,
10034      double alpha,
10035      /* Real    */ ae_vector* bndl,
10036      /* Boolean */ ae_vector* havebndl,
10037      /* Real    */ ae_vector* bndu,
10038      /* Boolean */ ae_vector* havebndu,
10039      ae_int_t nmain,
10040      ae_int_t nslack,
10041      ae_int_t* variabletofreeze,
10042      double* valuetofreeze,
10043      double* maxsteplen,
10044      ae_state *_state);
10045 ae_int_t postprocessboundedstep(/* Real    */ ae_vector* x,
10046      /* Real    */ ae_vector* xprev,
10047      /* Real    */ ae_vector* bndl,
10048      /* Boolean */ ae_vector* havebndl,
10049      /* Real    */ ae_vector* bndu,
10050      /* Boolean */ ae_vector* havebndu,
10051      ae_int_t nmain,
10052      ae_int_t nslack,
10053      ae_int_t variabletofreeze,
10054      double valuetofreeze,
10055      double steptaken,
10056      double maxsteplen,
10057      ae_state *_state);
10058 void filterdirection(/* Real    */ ae_vector* d,
10059      /* Real    */ ae_vector* x,
10060      /* Real    */ ae_vector* bndl,
10061      /* Boolean */ ae_vector* havebndl,
10062      /* Real    */ ae_vector* bndu,
10063      /* Boolean */ ae_vector* havebndu,
10064      /* Real    */ ae_vector* s,
10065      ae_int_t nmain,
10066      ae_int_t nslack,
10067      double droptol,
10068      ae_state *_state);
10069 ae_int_t numberofchangedconstraints(/* Real    */ ae_vector* x,
10070      /* Real    */ ae_vector* xprev,
10071      /* Real    */ ae_vector* bndl,
10072      /* Boolean */ ae_vector* havebndl,
10073      /* Real    */ ae_vector* bndu,
10074      /* Boolean */ ae_vector* havebndu,
10075      ae_int_t nmain,
10076      ae_int_t nslack,
10077      ae_state *_state);
10078 ae_bool findfeasiblepoint(/* Real    */ ae_vector* x,
10079      /* Real    */ ae_vector* bndl,
10080      /* Boolean */ ae_vector* havebndl,
10081      /* Real    */ ae_vector* bndu,
10082      /* Boolean */ ae_vector* havebndu,
10083      ae_int_t nmain,
10084      ae_int_t nslack,
10085      /* Real    */ ae_matrix* ce,
10086      ae_int_t k,
10087      double epsi,
10088      ae_int_t* qpits,
10089      ae_int_t* gpaits,
10090      ae_state *_state);
10091 ae_bool derivativecheck(double f0,
10092      double df0,
10093      double f1,
10094      double df1,
10095      double f,
10096      double df,
10097      double width,
10098      ae_state *_state);
10099 void estimateparabolicmodel(double absasum,
10100      double absasum2,
10101      double mx,
10102      double mb,
10103      double md,
10104      double d1,
10105      double d2,
10106      ae_int_t* d1est,
10107      ae_int_t* d2est,
10108      ae_state *_state);
10109 void inexactlbfgspreconditioner(/* Real    */ ae_vector* s,
10110      ae_int_t n,
10111      /* Real    */ ae_vector* d,
10112      /* Real    */ ae_vector* c,
10113      /* Real    */ ae_matrix* w,
10114      ae_int_t k,
10115      precbuflbfgs* buf,
10116      ae_state *_state);
10117 void preparelowrankpreconditioner(/* Real    */ ae_vector* d,
10118      /* Real    */ ae_vector* c,
10119      /* Real    */ ae_matrix* w,
10120      ae_int_t n,
10121      ae_int_t k,
10122      precbuflowrank* buf,
10123      ae_state *_state);
10124 void applylowrankpreconditioner(/* Real    */ ae_vector* s,
10125      precbuflowrank* buf,
10126      ae_state *_state);
10127 void smoothnessmonitorinit(smoothnessmonitor* monitor,
10128      ae_int_t n,
10129      ae_int_t k,
10130      ae_bool checksmoothness,
10131      ae_state *_state);
10132 void smoothnessmonitorstartlinesearch(smoothnessmonitor* monitor,
10133      /* Real    */ ae_vector* x,
10134      /* Real    */ ae_vector* fi,
10135      /* Real    */ ae_matrix* jac,
10136      ae_state *_state);
10137 void smoothnessmonitorstartlinesearch1u(smoothnessmonitor* monitor,
10138      /* Real    */ ae_vector* s,
10139      /* Real    */ ae_vector* invs,
10140      /* Real    */ ae_vector* x,
10141      double f0,
10142      /* Real    */ ae_vector* j0,
10143      ae_state *_state);
10144 void smoothnessmonitorenqueuepoint(smoothnessmonitor* monitor,
10145      /* Real    */ ae_vector* d,
10146      double stp,
10147      /* Real    */ ae_vector* x,
10148      /* Real    */ ae_vector* fi,
10149      /* Real    */ ae_matrix* jac,
10150      ae_state *_state);
10151 void smoothnessmonitorenqueuepoint1u(smoothnessmonitor* monitor,
10152      /* Real    */ ae_vector* s,
10153      /* Real    */ ae_vector* invs,
10154      /* Real    */ ae_vector* d,
10155      double stp,
10156      /* Real    */ ae_vector* x,
10157      double f0,
10158      /* Real    */ ae_vector* j0,
10159      ae_state *_state);
10160 void smoothnessmonitorfinalizelinesearch(smoothnessmonitor* monitor,
10161      ae_state *_state);
10162 void smoothnessmonitorexportreport(smoothnessmonitor* monitor,
10163      optguardreport* rep,
10164      ae_state *_state);
10165 ae_bool smoothnessmonitorcheckgradientatx0(smoothnessmonitor* monitor,
10166      /* Real    */ ae_vector* unscaledx0,
10167      /* Real    */ ae_vector* s,
10168      /* Real    */ ae_vector* bndl,
10169      /* Real    */ ae_vector* bndu,
10170      ae_bool hasboxconstraints,
10171      double teststep,
10172      ae_state *_state);
10173 void _precbuflbfgs_init(void* _p, ae_state *_state, ae_bool make_automatic);
10174 void _precbuflbfgs_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10175 void _precbuflbfgs_clear(void* _p);
10176 void _precbuflbfgs_destroy(void* _p);
10177 void _precbuflowrank_init(void* _p, ae_state *_state, ae_bool make_automatic);
10178 void _precbuflowrank_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10179 void _precbuflowrank_clear(void* _p);
10180 void _precbuflowrank_destroy(void* _p);
10181 void _smoothnessmonitor_init(void* _p, ae_state *_state, ae_bool make_automatic);
10182 void _smoothnessmonitor_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10183 void _smoothnessmonitor_clear(void* _p);
10184 void _smoothnessmonitor_destroy(void* _p);
10185 #endif
10186 #if defined(AE_COMPILE_SNNLS) || !defined(AE_PARTIAL_BUILD)
10187 void snnlsinit(ae_int_t nsmax,
10188      ae_int_t ndmax,
10189      ae_int_t nrmax,
10190      snnlssolver* s,
10191      ae_state *_state);
10192 void snnlssetproblem(snnlssolver* s,
10193      /* Real    */ ae_matrix* a,
10194      /* Real    */ ae_vector* b,
10195      ae_int_t ns,
10196      ae_int_t nd,
10197      ae_int_t nr,
10198      ae_state *_state);
10199 void snnlsdropnnc(snnlssolver* s, ae_int_t idx, ae_state *_state);
10200 void snnlssolve(snnlssolver* s,
10201      /* Real    */ ae_vector* x,
10202      ae_state *_state);
10203 void _snnlssolver_init(void* _p, ae_state *_state, ae_bool make_automatic);
10204 void _snnlssolver_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10205 void _snnlssolver_clear(void* _p);
10206 void _snnlssolver_destroy(void* _p);
10207 #endif
10208 #if defined(AE_COMPILE_SACTIVESETS) || !defined(AE_PARTIAL_BUILD)
10209 void sasinit(ae_int_t n, sactiveset* s, ae_state *_state);
10210 void sassetscale(sactiveset* state,
10211      /* Real    */ ae_vector* s,
10212      ae_state *_state);
10213 void sassetprecdiag(sactiveset* state,
10214      /* Real    */ ae_vector* d,
10215      ae_state *_state);
10216 void sassetbc(sactiveset* state,
10217      /* Real    */ ae_vector* bndl,
10218      /* Real    */ ae_vector* bndu,
10219      ae_state *_state);
10220 void sassetlc(sactiveset* state,
10221      /* Real    */ ae_matrix* c,
10222      /* Integer */ ae_vector* ct,
10223      ae_int_t k,
10224      ae_state *_state);
10225 void sassetlcx(sactiveset* state,
10226      /* Real    */ ae_matrix* cleic,
10227      ae_int_t nec,
10228      ae_int_t nic,
10229      ae_state *_state);
10230 ae_bool sasstartoptimization(sactiveset* state,
10231      /* Real    */ ae_vector* x,
10232      ae_state *_state);
10233 void sasexploredirection(sactiveset* state,
10234      /* Real    */ ae_vector* d,
10235      double* stpmax,
10236      ae_int_t* cidx,
10237      double* vval,
10238      ae_state *_state);
10239 ae_int_t sasmoveto(sactiveset* state,
10240      /* Real    */ ae_vector* xn,
10241      ae_bool needact,
10242      ae_int_t cidx,
10243      double cval,
10244      ae_state *_state);
10245 void sasimmediateactivation(sactiveset* state,
10246      ae_int_t cidx,
10247      double cval,
10248      ae_state *_state);
10249 void sasconstraineddescent(sactiveset* state,
10250      /* Real    */ ae_vector* g,
10251      /* Real    */ ae_vector* d,
10252      ae_state *_state);
10253 void sasconstraineddescentprec(sactiveset* state,
10254      /* Real    */ ae_vector* g,
10255      /* Real    */ ae_vector* d,
10256      ae_state *_state);
10257 void sasconstraineddirection(sactiveset* state,
10258      /* Real    */ ae_vector* d,
10259      ae_state *_state);
10260 void sasconstraineddirectionprec(sactiveset* state,
10261      /* Real    */ ae_vector* d,
10262      ae_state *_state);
10263 void sascorrection(sactiveset* state,
10264      /* Real    */ ae_vector* x,
10265      double* penalty,
10266      ae_state *_state);
10267 double sasactivelcpenalty1(sactiveset* state,
10268      /* Real    */ ae_vector* x,
10269      ae_state *_state);
10270 double sasscaledconstrainednorm(sactiveset* state,
10271      /* Real    */ ae_vector* d,
10272      ae_state *_state);
10273 void sasstopoptimization(sactiveset* state, ae_state *_state);
10274 void sasreactivateconstraints(sactiveset* state,
10275      /* Real    */ ae_vector* gc,
10276      ae_state *_state);
10277 void sasreactivateconstraintsprec(sactiveset* state,
10278      /* Real    */ ae_vector* gc,
10279      ae_state *_state);
10280 void sasrebuildbasis(sactiveset* state, ae_state *_state);
10281 void sasappendtobasis(sactiveset* state,
10282      /* Boolean */ ae_vector* newentries,
10283      ae_state *_state);
10284 void _sactiveset_init(void* _p, ae_state *_state, ae_bool make_automatic);
10285 void _sactiveset_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10286 void _sactiveset_clear(void* _p);
10287 void _sactiveset_destroy(void* _p);
10288 #endif
10289 #if defined(AE_COMPILE_QQPSOLVER) || !defined(AE_PARTIAL_BUILD)
10290 void qqploaddefaults(ae_int_t n, qqpsettings* s, ae_state *_state);
10291 void qqpcopysettings(qqpsettings* src, qqpsettings* dst, ae_state *_state);
10292 void qqppreallocategrowdense(qqpbuffers* sstate,
10293      ae_int_t nexpected,
10294      ae_int_t ngrowto,
10295      ae_state *_state);
10296 void qqpoptimize(convexquadraticmodel* cqmac,
10297      sparsematrix* sparseac,
10298      /* Real    */ ae_matrix* denseac,
10299      ae_int_t akind,
10300      ae_bool isupper,
10301      /* Real    */ ae_vector* bc,
10302      /* Real    */ ae_vector* bndlc,
10303      /* Real    */ ae_vector* bnduc,
10304      /* Real    */ ae_vector* sc,
10305      /* Real    */ ae_vector* xoriginc,
10306      ae_int_t nc,
10307      qqpsettings* settings,
10308      qqpbuffers* sstate,
10309      /* Real    */ ae_vector* xs,
10310      ae_int_t* terminationtype,
10311      ae_state *_state);
10312 void _qqpsettings_init(void* _p, ae_state *_state, ae_bool make_automatic);
10313 void _qqpsettings_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10314 void _qqpsettings_clear(void* _p);
10315 void _qqpsettings_destroy(void* _p);
10316 void _qqpbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic);
10317 void _qqpbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10318 void _qqpbuffers_clear(void* _p);
10319 void _qqpbuffers_destroy(void* _p);
10320 #endif
10321 #if defined(AE_COMPILE_MINLBFGS) || !defined(AE_PARTIAL_BUILD)
10322 void minlbfgscreate(ae_int_t n,
10323      ae_int_t m,
10324      /* Real    */ ae_vector* x,
10325      minlbfgsstate* state,
10326      ae_state *_state);
10327 void minlbfgscreatef(ae_int_t n,
10328      ae_int_t m,
10329      /* Real    */ ae_vector* x,
10330      double diffstep,
10331      minlbfgsstate* state,
10332      ae_state *_state);
10333 void minlbfgssetcond(minlbfgsstate* state,
10334      double epsg,
10335      double epsf,
10336      double epsx,
10337      ae_int_t maxits,
10338      ae_state *_state);
10339 void minlbfgssetxrep(minlbfgsstate* state,
10340      ae_bool needxrep,
10341      ae_state *_state);
10342 void minlbfgssetstpmax(minlbfgsstate* state,
10343      double stpmax,
10344      ae_state *_state);
10345 void minlbfgssetscale(minlbfgsstate* state,
10346      /* Real    */ ae_vector* s,
10347      ae_state *_state);
10348 void minlbfgscreatex(ae_int_t n,
10349      ae_int_t m,
10350      /* Real    */ ae_vector* x,
10351      ae_int_t flags,
10352      double diffstep,
10353      minlbfgsstate* state,
10354      ae_state *_state);
10355 void minlbfgssetprecdefault(minlbfgsstate* state, ae_state *_state);
10356 void minlbfgssetpreccholesky(minlbfgsstate* state,
10357      /* Real    */ ae_matrix* p,
10358      ae_bool isupper,
10359      ae_state *_state);
10360 void minlbfgssetprecdiag(minlbfgsstate* state,
10361      /* Real    */ ae_vector* d,
10362      ae_state *_state);
10363 void minlbfgssetprecscale(minlbfgsstate* state, ae_state *_state);
10364 void minlbfgssetprecrankklbfgsfast(minlbfgsstate* state,
10365      /* Real    */ ae_vector* d,
10366      /* Real    */ ae_vector* c,
10367      /* Real    */ ae_matrix* w,
10368      ae_int_t cnt,
10369      ae_state *_state);
10370 void minlbfgssetpreclowrankexact(minlbfgsstate* state,
10371      /* Real    */ ae_vector* d,
10372      /* Real    */ ae_vector* c,
10373      /* Real    */ ae_matrix* w,
10374      ae_int_t cnt,
10375      ae_state *_state);
10376 ae_bool minlbfgsiteration(minlbfgsstate* state, ae_state *_state);
10377 void minlbfgsoptguardgradient(minlbfgsstate* state,
10378      double teststep,
10379      ae_state *_state);
10380 void minlbfgsoptguardsmoothness(minlbfgsstate* state,
10381      ae_int_t level,
10382      ae_state *_state);
10383 void minlbfgsoptguardresults(minlbfgsstate* state,
10384      optguardreport* rep,
10385      ae_state *_state);
10386 void minlbfgsoptguardnonc1test0results(minlbfgsstate* state,
10387      optguardnonc1test0report* strrep,
10388      optguardnonc1test0report* lngrep,
10389      ae_state *_state);
10390 void minlbfgsoptguardnonc1test1results(minlbfgsstate* state,
10391      optguardnonc1test1report* strrep,
10392      optguardnonc1test1report* lngrep,
10393      ae_state *_state);
10394 void minlbfgsresults(minlbfgsstate* state,
10395      /* Real    */ ae_vector* x,
10396      minlbfgsreport* rep,
10397      ae_state *_state);
10398 void minlbfgsresultsbuf(minlbfgsstate* state,
10399      /* Real    */ ae_vector* x,
10400      minlbfgsreport* rep,
10401      ae_state *_state);
10402 void minlbfgsrestartfrom(minlbfgsstate* state,
10403      /* Real    */ ae_vector* x,
10404      ae_state *_state);
10405 void minlbfgsrequesttermination(minlbfgsstate* state, ae_state *_state);
10406 void _minlbfgsstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
10407 void _minlbfgsstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10408 void _minlbfgsstate_clear(void* _p);
10409 void _minlbfgsstate_destroy(void* _p);
10410 void _minlbfgsreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
10411 void _minlbfgsreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10412 void _minlbfgsreport_clear(void* _p);
10413 void _minlbfgsreport_destroy(void* _p);
10414 #endif
10415 #if defined(AE_COMPILE_QPDENSEAULSOLVER) || !defined(AE_PARTIAL_BUILD)
10416 void qpdenseaulloaddefaults(ae_int_t nmain,
10417      qpdenseaulsettings* s,
10418      ae_state *_state);
10419 void qpdenseauloptimize(convexquadraticmodel* a,
10420      sparsematrix* sparsea,
10421      ae_int_t akind,
10422      ae_bool sparseaupper,
10423      /* Real    */ ae_vector* b,
10424      /* Real    */ ae_vector* bndl,
10425      /* Real    */ ae_vector* bndu,
10426      /* Real    */ ae_vector* s,
10427      /* Real    */ ae_vector* xorigin,
10428      ae_int_t nn,
10429      /* Real    */ ae_matrix* cleic,
10430      ae_int_t dnec,
10431      ae_int_t dnic,
10432      sparsematrix* scleic,
10433      ae_int_t snec,
10434      ae_int_t snic,
10435      ae_bool renormlc,
10436      qpdenseaulsettings* settings,
10437      qpdenseaulbuffers* state,
10438      /* Real    */ ae_vector* xs,
10439      ae_int_t* terminationtype,
10440      ae_state *_state);
10441 void _qpdenseaulsettings_init(void* _p, ae_state *_state, ae_bool make_automatic);
10442 void _qpdenseaulsettings_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10443 void _qpdenseaulsettings_clear(void* _p);
10444 void _qpdenseaulsettings_destroy(void* _p);
10445 void _qpdenseaulbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic);
10446 void _qpdenseaulbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10447 void _qpdenseaulbuffers_clear(void* _p);
10448 void _qpdenseaulbuffers_destroy(void* _p);
10449 #endif
10450 #if defined(AE_COMPILE_MINBLEIC) || !defined(AE_PARTIAL_BUILD)
10451 void minbleiccreate(ae_int_t n,
10452      /* Real    */ ae_vector* x,
10453      minbleicstate* state,
10454      ae_state *_state);
10455 void minbleiccreatef(ae_int_t n,
10456      /* Real    */ ae_vector* x,
10457      double diffstep,
10458      minbleicstate* state,
10459      ae_state *_state);
10460 void minbleicsetbc(minbleicstate* state,
10461      /* Real    */ ae_vector* bndl,
10462      /* Real    */ ae_vector* bndu,
10463      ae_state *_state);
10464 void minbleicsetlc(minbleicstate* state,
10465      /* Real    */ ae_matrix* c,
10466      /* Integer */ ae_vector* ct,
10467      ae_int_t k,
10468      ae_state *_state);
10469 void minbleicsetcond(minbleicstate* state,
10470      double epsg,
10471      double epsf,
10472      double epsx,
10473      ae_int_t maxits,
10474      ae_state *_state);
10475 void minbleicsetscale(minbleicstate* state,
10476      /* Real    */ ae_vector* s,
10477      ae_state *_state);
10478 void minbleicsetprecdefault(minbleicstate* state, ae_state *_state);
10479 void minbleicsetprecdiag(minbleicstate* state,
10480      /* Real    */ ae_vector* d,
10481      ae_state *_state);
10482 void minbleicsetprecscale(minbleicstate* state, ae_state *_state);
10483 void minbleicsetxrep(minbleicstate* state,
10484      ae_bool needxrep,
10485      ae_state *_state);
10486 void minbleicsetdrep(minbleicstate* state,
10487      ae_bool needdrep,
10488      ae_state *_state);
10489 void minbleicsetstpmax(minbleicstate* state,
10490      double stpmax,
10491      ae_state *_state);
10492 ae_bool minbleiciteration(minbleicstate* state, ae_state *_state);
10493 void minbleicoptguardgradient(minbleicstate* state,
10494      double teststep,
10495      ae_state *_state);
10496 void minbleicoptguardsmoothness(minbleicstate* state,
10497      ae_int_t level,
10498      ae_state *_state);
10499 void minbleicoptguardresults(minbleicstate* state,
10500      optguardreport* rep,
10501      ae_state *_state);
10502 void minbleicoptguardnonc1test0results(minbleicstate* state,
10503      optguardnonc1test0report* strrep,
10504      optguardnonc1test0report* lngrep,
10505      ae_state *_state);
10506 void minbleicoptguardnonc1test1results(minbleicstate* state,
10507      optguardnonc1test1report* strrep,
10508      optguardnonc1test1report* lngrep,
10509      ae_state *_state);
10510 void minbleicresults(minbleicstate* state,
10511      /* Real    */ ae_vector* x,
10512      minbleicreport* rep,
10513      ae_state *_state);
10514 void minbleicresultsbuf(minbleicstate* state,
10515      /* Real    */ ae_vector* x,
10516      minbleicreport* rep,
10517      ae_state *_state);
10518 void minbleicrestartfrom(minbleicstate* state,
10519      /* Real    */ ae_vector* x,
10520      ae_state *_state);
10521 void minbleicrequesttermination(minbleicstate* state, ae_state *_state);
10522 void minbleicemergencytermination(minbleicstate* state, ae_state *_state);
10523 void _minbleicstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
10524 void _minbleicstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10525 void _minbleicstate_clear(void* _p);
10526 void _minbleicstate_destroy(void* _p);
10527 void _minbleicreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
10528 void _minbleicreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10529 void _minbleicreport_clear(void* _p);
10530 void _minbleicreport_destroy(void* _p);
10531 #endif
10532 #if defined(AE_COMPILE_QPBLEICSOLVER) || !defined(AE_PARTIAL_BUILD)
10533 void qpbleicloaddefaults(ae_int_t nmain,
10534      qpbleicsettings* s,
10535      ae_state *_state);
10536 void qpbleiccopysettings(qpbleicsettings* src,
10537      qpbleicsettings* dst,
10538      ae_state *_state);
10539 void qpbleicoptimize(convexquadraticmodel* a,
10540      sparsematrix* sparsea,
10541      ae_int_t akind,
10542      ae_bool sparseaupper,
10543      double absasum,
10544      double absasum2,
10545      /* Real    */ ae_vector* b,
10546      /* Real    */ ae_vector* bndl,
10547      /* Real    */ ae_vector* bndu,
10548      /* Real    */ ae_vector* s,
10549      /* Real    */ ae_vector* xorigin,
10550      ae_int_t n,
10551      /* Real    */ ae_matrix* cleic,
10552      ae_int_t nec,
10553      ae_int_t nic,
10554      qpbleicsettings* settings,
10555      qpbleicbuffers* sstate,
10556      ae_bool* firstcall,
10557      /* Real    */ ae_vector* xs,
10558      ae_int_t* terminationtype,
10559      ae_state *_state);
10560 void _qpbleicsettings_init(void* _p, ae_state *_state, ae_bool make_automatic);
10561 void _qpbleicsettings_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10562 void _qpbleicsettings_clear(void* _p);
10563 void _qpbleicsettings_destroy(void* _p);
10564 void _qpbleicbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic);
10565 void _qpbleicbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10566 void _qpbleicbuffers_clear(void* _p);
10567 void _qpbleicbuffers_destroy(void* _p);
10568 #endif
10569 #if defined(AE_COMPILE_MINQP) || !defined(AE_PARTIAL_BUILD)
10570 void minqpcreate(ae_int_t n, minqpstate* state, ae_state *_state);
10571 void minqpsetlinearterm(minqpstate* state,
10572      /* Real    */ ae_vector* b,
10573      ae_state *_state);
10574 void minqpsetquadraticterm(minqpstate* state,
10575      /* Real    */ ae_matrix* a,
10576      ae_bool isupper,
10577      ae_state *_state);
10578 void minqpsetquadratictermsparse(minqpstate* state,
10579      sparsematrix* a,
10580      ae_bool isupper,
10581      ae_state *_state);
10582 void minqpsetstartingpoint(minqpstate* state,
10583      /* Real    */ ae_vector* x,
10584      ae_state *_state);
10585 void minqpsetorigin(minqpstate* state,
10586      /* Real    */ ae_vector* xorigin,
10587      ae_state *_state);
10588 void minqpsetscale(minqpstate* state,
10589      /* Real    */ ae_vector* s,
10590      ae_state *_state);
10591 void minqpsetscaleautodiag(minqpstate* state, ae_state *_state);
10592 void minqpsetalgobleic(minqpstate* state,
10593      double epsg,
10594      double epsf,
10595      double epsx,
10596      ae_int_t maxits,
10597      ae_state *_state);
10598 void minqpsetalgodenseaul(minqpstate* state,
10599      double epsx,
10600      double rho,
10601      ae_int_t itscnt,
10602      ae_state *_state);
10603 void minqpsetalgoquickqp(minqpstate* state,
10604      double epsg,
10605      double epsf,
10606      double epsx,
10607      ae_int_t maxouterits,
10608      ae_bool usenewton,
10609      ae_state *_state);
10610 void minqpsetbc(minqpstate* state,
10611      /* Real    */ ae_vector* bndl,
10612      /* Real    */ ae_vector* bndu,
10613      ae_state *_state);
10614 void minqpsetlc(minqpstate* state,
10615      /* Real    */ ae_matrix* c,
10616      /* Integer */ ae_vector* ct,
10617      ae_int_t k,
10618      ae_state *_state);
10619 void minqpsetlcsparse(minqpstate* state,
10620      sparsematrix* c,
10621      /* Integer */ ae_vector* ct,
10622      ae_int_t k,
10623      ae_state *_state);
10624 void minqpsetlcmixed(minqpstate* state,
10625      /* Real    */ ae_matrix* densec,
10626      /* Integer */ ae_vector* densect,
10627      ae_int_t densek,
10628      sparsematrix* sparsec,
10629      /* Integer */ ae_vector* sparsect,
10630      ae_int_t sparsek,
10631      ae_state *_state);
10632 void minqpoptimize(minqpstate* state, ae_state *_state);
10633 void minqpresults(minqpstate* state,
10634      /* Real    */ ae_vector* x,
10635      minqpreport* rep,
10636      ae_state *_state);
10637 void minqpresultsbuf(minqpstate* state,
10638      /* Real    */ ae_vector* x,
10639      minqpreport* rep,
10640      ae_state *_state);
10641 void minqpsetlineartermfast(minqpstate* state,
10642      /* Real    */ ae_vector* b,
10643      ae_state *_state);
10644 void minqpsetquadratictermfast(minqpstate* state,
10645      /* Real    */ ae_matrix* a,
10646      ae_bool isupper,
10647      double s,
10648      ae_state *_state);
10649 void minqprewritediagonal(minqpstate* state,
10650      /* Real    */ ae_vector* s,
10651      ae_state *_state);
10652 void minqpsetstartingpointfast(minqpstate* state,
10653      /* Real    */ ae_vector* x,
10654      ae_state *_state);
10655 void minqpsetoriginfast(minqpstate* state,
10656      /* Real    */ ae_vector* xorigin,
10657      ae_state *_state);
10658 void _minqpstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
10659 void _minqpstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10660 void _minqpstate_clear(void* _p);
10661 void _minqpstate_destroy(void* _p);
10662 void _minqpreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
10663 void _minqpreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10664 void _minqpreport_clear(void* _p);
10665 void _minqpreport_destroy(void* _p);
10666 #endif
10667 #if defined(AE_COMPILE_REVISEDDUALSIMPLEX) || !defined(AE_PARTIAL_BUILD)
10668 void dsssettingsinit(dualsimplexsettings* settings, ae_state *_state);
10669 void dssinit(ae_int_t n, dualsimplexstate* s, ae_state *_state);
10670 void dsssetproblem(dualsimplexstate* state,
10671      /* Real    */ ae_vector* c,
10672      /* Real    */ ae_vector* bndl,
10673      /* Real    */ ae_vector* bndu,
10674      /* Real    */ ae_vector* sv,
10675      /* Real    */ ae_matrix* densea,
10676      sparsematrix* sparsea,
10677      ae_int_t akind,
10678      /* Real    */ ae_vector* al,
10679      /* Real    */ ae_vector* au,
10680      ae_int_t k,
10681      dualsimplexbasis* proposedbasis,
10682      ae_int_t basisinittype,
10683      dualsimplexsettings* settings,
10684      ae_state *_state);
10685 void dssexportbasis(dualsimplexstate* state,
10686      dualsimplexbasis* basis,
10687      ae_state *_state);
10688 void dssoptimize(dualsimplexstate* state,
10689      dualsimplexsettings* settings,
10690      ae_state *_state);
10691 void _dualsimplexsettings_init(void* _p, ae_state *_state, ae_bool make_automatic);
10692 void _dualsimplexsettings_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10693 void _dualsimplexsettings_clear(void* _p);
10694 void _dualsimplexsettings_destroy(void* _p);
10695 void _dualsimplexbasis_init(void* _p, ae_state *_state, ae_bool make_automatic);
10696 void _dualsimplexbasis_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10697 void _dualsimplexbasis_clear(void* _p);
10698 void _dualsimplexbasis_destroy(void* _p);
10699 void _dualsimplexsubproblem_init(void* _p, ae_state *_state, ae_bool make_automatic);
10700 void _dualsimplexsubproblem_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10701 void _dualsimplexsubproblem_clear(void* _p);
10702 void _dualsimplexsubproblem_destroy(void* _p);
10703 void _dualsimplexstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
10704 void _dualsimplexstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10705 void _dualsimplexstate_clear(void* _p);
10706 void _dualsimplexstate_destroy(void* _p);
10707 #endif
10708 #if defined(AE_COMPILE_MINLP) || !defined(AE_PARTIAL_BUILD)
10709 void minlpcreate(ae_int_t n, minlpstate* state, ae_state *_state);
10710 void minlpsetcost(minlpstate* state,
10711      /* Real    */ ae_vector* c,
10712      ae_state *_state);
10713 void minlpsetscale(minlpstate* state,
10714      /* Real    */ ae_vector* s,
10715      ae_state *_state);
10716 void minlpsetbc(minlpstate* state,
10717      /* Real    */ ae_vector* bndl,
10718      /* Real    */ ae_vector* bndu,
10719      ae_state *_state);
10720 void minlpsetbcall(minlpstate* state,
10721      double bndl,
10722      double bndu,
10723      ae_state *_state);
10724 void minlpsetbci(minlpstate* state,
10725      ae_int_t i,
10726      double bndl,
10727      double bndu,
10728      ae_state *_state);
10729 void minlpsetlc(minlpstate* state,
10730      /* Real    */ ae_matrix* a,
10731      /* Integer */ ae_vector* ct,
10732      ae_int_t k,
10733      ae_state *_state);
10734 void minlpsetlc2dense(minlpstate* state,
10735      /* Real    */ ae_matrix* a,
10736      /* Real    */ ae_vector* al,
10737      /* Real    */ ae_vector* au,
10738      ae_int_t k,
10739      ae_state *_state);
10740 void minlpsetlc2(minlpstate* state,
10741      sparsematrix* a,
10742      /* Real    */ ae_vector* al,
10743      /* Real    */ ae_vector* au,
10744      ae_int_t k,
10745      ae_state *_state);
10746 void minlpaddlc2dense(minlpstate* state,
10747      /* Real    */ ae_vector* a,
10748      double al,
10749      double au,
10750      ae_state *_state);
10751 void minlpaddlc2(minlpstate* state,
10752      /* Integer */ ae_vector* idxa,
10753      /* Real    */ ae_vector* vala,
10754      ae_int_t nnz,
10755      double al,
10756      double au,
10757      ae_state *_state);
10758 void minlpoptimize(minlpstate* state, ae_state *_state);
10759 void minlpresults(minlpstate* state,
10760      /* Real    */ ae_vector* x,
10761      minlpreport* rep,
10762      ae_state *_state);
10763 void minlpresultsbuf(minlpstate* state,
10764      /* Real    */ ae_vector* x,
10765      minlpreport* rep,
10766      ae_state *_state);
10767 void _minlpstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
10768 void _minlpstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10769 void _minlpstate_clear(void* _p);
10770 void _minlpstate_destroy(void* _p);
10771 void _minlpreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
10772 void _minlpreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10773 void _minlpreport_clear(void* _p);
10774 void _minlpreport_destroy(void* _p);
10775 #endif
10776 #if defined(AE_COMPILE_NLCSLP) || !defined(AE_PARTIAL_BUILD)
10777 void minslpinitbuf(/* Real    */ ae_vector* bndl,
10778      /* Real    */ ae_vector* bndu,
10779      /* Real    */ ae_vector* s,
10780      /* Real    */ ae_vector* x0,
10781      ae_int_t n,
10782      /* Real    */ ae_matrix* cleic,
10783      /* Integer */ ae_vector* lcsrcidx,
10784      ae_int_t nec,
10785      ae_int_t nic,
10786      ae_int_t nlec,
10787      ae_int_t nlic,
10788      double epsx,
10789      ae_int_t maxits,
10790      minslpstate* state,
10791      ae_state *_state);
10792 ae_bool minslpiteration(minslpstate* state,
10793      smoothnessmonitor* smonitor,
10794      ae_bool userterminationneeded,
10795      ae_state *_state);
10796 void _minslpsubsolver_init(void* _p, ae_state *_state, ae_bool make_automatic);
10797 void _minslpsubsolver_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10798 void _minslpsubsolver_clear(void* _p);
10799 void _minslpsubsolver_destroy(void* _p);
10800 void _minslpstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
10801 void _minslpstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10802 void _minslpstate_clear(void* _p);
10803 void _minslpstate_destroy(void* _p);
10804 #endif
10805 #if defined(AE_COMPILE_MINNLC) || !defined(AE_PARTIAL_BUILD)
10806 void minnlccreate(ae_int_t n,
10807      /* Real    */ ae_vector* x,
10808      minnlcstate* state,
10809      ae_state *_state);
10810 void minnlccreatef(ae_int_t n,
10811      /* Real    */ ae_vector* x,
10812      double diffstep,
10813      minnlcstate* state,
10814      ae_state *_state);
10815 void minnlcsetbc(minnlcstate* state,
10816      /* Real    */ ae_vector* bndl,
10817      /* Real    */ ae_vector* bndu,
10818      ae_state *_state);
10819 void minnlcsetlc(minnlcstate* state,
10820      /* Real    */ ae_matrix* c,
10821      /* Integer */ ae_vector* ct,
10822      ae_int_t k,
10823      ae_state *_state);
10824 void minnlcsetnlc(minnlcstate* state,
10825      ae_int_t nlec,
10826      ae_int_t nlic,
10827      ae_state *_state);
10828 void minnlcsetcond(minnlcstate* state,
10829      double epsx,
10830      ae_int_t maxits,
10831      ae_state *_state);
10832 void minnlcsetscale(minnlcstate* state,
10833      /* Real    */ ae_vector* s,
10834      ae_state *_state);
10835 void minnlcsetprecinexact(minnlcstate* state, ae_state *_state);
10836 void minnlcsetprecexactlowrank(minnlcstate* state,
10837      ae_int_t updatefreq,
10838      ae_state *_state);
10839 void minnlcsetprecexactrobust(minnlcstate* state,
10840      ae_int_t updatefreq,
10841      ae_state *_state);
10842 void minnlcsetprecnone(minnlcstate* state, ae_state *_state);
10843 void minnlcsetstpmax(minnlcstate* state, double stpmax, ae_state *_state);
10844 void minnlcsetalgoaul(minnlcstate* state,
10845      double rho,
10846      ae_int_t itscnt,
10847      ae_state *_state);
10848 void minnlcsetalgoslp(minnlcstate* state, ae_state *_state);
10849 void minnlcsetxrep(minnlcstate* state, ae_bool needxrep, ae_state *_state);
10850 ae_bool minnlciteration(minnlcstate* state, ae_state *_state);
10851 void minnlcoptguardgradient(minnlcstate* state,
10852      double teststep,
10853      ae_state *_state);
10854 void minnlcoptguardsmoothness(minnlcstate* state,
10855      ae_int_t level,
10856      ae_state *_state);
10857 void minnlcoptguardresults(minnlcstate* state,
10858      optguardreport* rep,
10859      ae_state *_state);
10860 void minnlcoptguardnonc1test0results(minnlcstate* state,
10861      optguardnonc1test0report* strrep,
10862      optguardnonc1test0report* lngrep,
10863      ae_state *_state);
10864 void minnlcoptguardnonc1test1results(minnlcstate* state,
10865      optguardnonc1test1report* strrep,
10866      optguardnonc1test1report* lngrep,
10867      ae_state *_state);
10868 void minnlcresults(minnlcstate* state,
10869      /* Real    */ ae_vector* x,
10870      minnlcreport* rep,
10871      ae_state *_state);
10872 void minnlcresultsbuf(minnlcstate* state,
10873      /* Real    */ ae_vector* x,
10874      minnlcreport* rep,
10875      ae_state *_state);
10876 void minnlcrequesttermination(minnlcstate* state, ae_state *_state);
10877 void minnlcrestartfrom(minnlcstate* state,
10878      /* Real    */ ae_vector* x,
10879      ae_state *_state);
10880 void minnlcequalitypenaltyfunction(double alpha,
10881      double* f,
10882      double* df,
10883      double* d2f,
10884      ae_state *_state);
10885 void minnlcinequalitypenaltyfunction(double alpha,
10886      double stabilizingpoint,
10887      double* f,
10888      double* df,
10889      double* d2f,
10890      ae_state *_state);
10891 void minnlcinequalityshiftfunction(double alpha,
10892      double* f,
10893      double* df,
10894      double* d2f,
10895      ae_state *_state);
10896 void _minnlcstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
10897 void _minnlcstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10898 void _minnlcstate_clear(void* _p);
10899 void _minnlcstate_destroy(void* _p);
10900 void _minnlcreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
10901 void _minnlcreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10902 void _minnlcreport_clear(void* _p);
10903 void _minnlcreport_destroy(void* _p);
10904 #endif
10905 #if defined(AE_COMPILE_MINBC) || !defined(AE_PARTIAL_BUILD)
10906 void minbccreate(ae_int_t n,
10907      /* Real    */ ae_vector* x,
10908      minbcstate* state,
10909      ae_state *_state);
10910 void minbccreatef(ae_int_t n,
10911      /* Real    */ ae_vector* x,
10912      double diffstep,
10913      minbcstate* state,
10914      ae_state *_state);
10915 void minbcsetbc(minbcstate* state,
10916      /* Real    */ ae_vector* bndl,
10917      /* Real    */ ae_vector* bndu,
10918      ae_state *_state);
10919 void minbcsetcond(minbcstate* state,
10920      double epsg,
10921      double epsf,
10922      double epsx,
10923      ae_int_t maxits,
10924      ae_state *_state);
10925 void minbcsetscale(minbcstate* state,
10926      /* Real    */ ae_vector* s,
10927      ae_state *_state);
10928 void minbcsetprecdefault(minbcstate* state, ae_state *_state);
10929 void minbcsetprecdiag(minbcstate* state,
10930      /* Real    */ ae_vector* d,
10931      ae_state *_state);
10932 void minbcsetprecscale(minbcstate* state, ae_state *_state);
10933 void minbcsetxrep(minbcstate* state, ae_bool needxrep, ae_state *_state);
10934 void minbcsetstpmax(minbcstate* state, double stpmax, ae_state *_state);
10935 ae_bool minbciteration(minbcstate* state, ae_state *_state);
10936 void minbcoptguardgradient(minbcstate* state,
10937      double teststep,
10938      ae_state *_state);
10939 void minbcoptguardsmoothness(minbcstate* state,
10940      ae_int_t level,
10941      ae_state *_state);
10942 void minbcoptguardresults(minbcstate* state,
10943      optguardreport* rep,
10944      ae_state *_state);
10945 void minbcoptguardnonc1test0results(minbcstate* state,
10946      optguardnonc1test0report* strrep,
10947      optguardnonc1test0report* lngrep,
10948      ae_state *_state);
10949 void minbcoptguardnonc1test1results(minbcstate* state,
10950      optguardnonc1test1report* strrep,
10951      optguardnonc1test1report* lngrep,
10952      ae_state *_state);
10953 void minbcresults(minbcstate* state,
10954      /* Real    */ ae_vector* x,
10955      minbcreport* rep,
10956      ae_state *_state);
10957 void minbcresultsbuf(minbcstate* state,
10958      /* Real    */ ae_vector* x,
10959      minbcreport* rep,
10960      ae_state *_state);
10961 void minbcrestartfrom(minbcstate* state,
10962      /* Real    */ ae_vector* x,
10963      ae_state *_state);
10964 void minbcrequesttermination(minbcstate* state, ae_state *_state);
10965 void _minbcstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
10966 void _minbcstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10967 void _minbcstate_clear(void* _p);
10968 void _minbcstate_destroy(void* _p);
10969 void _minbcreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
10970 void _minbcreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10971 void _minbcreport_clear(void* _p);
10972 void _minbcreport_destroy(void* _p);
10973 #endif
10974 #if defined(AE_COMPILE_MINNS) || !defined(AE_PARTIAL_BUILD)
10975 void minnscreate(ae_int_t n,
10976      /* Real    */ ae_vector* x,
10977      minnsstate* state,
10978      ae_state *_state);
10979 void minnscreatef(ae_int_t n,
10980      /* Real    */ ae_vector* x,
10981      double diffstep,
10982      minnsstate* state,
10983      ae_state *_state);
10984 void minnssetbc(minnsstate* state,
10985      /* Real    */ ae_vector* bndl,
10986      /* Real    */ ae_vector* bndu,
10987      ae_state *_state);
10988 void minnssetlc(minnsstate* state,
10989      /* Real    */ ae_matrix* c,
10990      /* Integer */ ae_vector* ct,
10991      ae_int_t k,
10992      ae_state *_state);
10993 void minnssetnlc(minnsstate* state,
10994      ae_int_t nlec,
10995      ae_int_t nlic,
10996      ae_state *_state);
10997 void minnssetcond(minnsstate* state,
10998      double epsx,
10999      ae_int_t maxits,
11000      ae_state *_state);
11001 void minnssetscale(minnsstate* state,
11002      /* Real    */ ae_vector* s,
11003      ae_state *_state);
11004 void minnssetalgoags(minnsstate* state,
11005      double radius,
11006      double penalty,
11007      ae_state *_state);
11008 void minnssetxrep(minnsstate* state, ae_bool needxrep, ae_state *_state);
11009 void minnsrequesttermination(minnsstate* state, ae_state *_state);
11010 ae_bool minnsiteration(minnsstate* state, ae_state *_state);
11011 void minnsresults(minnsstate* state,
11012      /* Real    */ ae_vector* x,
11013      minnsreport* rep,
11014      ae_state *_state);
11015 void minnsresultsbuf(minnsstate* state,
11016      /* Real    */ ae_vector* x,
11017      minnsreport* rep,
11018      ae_state *_state);
11019 void minnsrestartfrom(minnsstate* state,
11020      /* Real    */ ae_vector* x,
11021      ae_state *_state);
11022 void _minnsqp_init(void* _p, ae_state *_state, ae_bool make_automatic);
11023 void _minnsqp_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11024 void _minnsqp_clear(void* _p);
11025 void _minnsqp_destroy(void* _p);
11026 void _minnsstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
11027 void _minnsstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11028 void _minnsstate_clear(void* _p);
11029 void _minnsstate_destroy(void* _p);
11030 void _minnsreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
11031 void _minnsreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11032 void _minnsreport_clear(void* _p);
11033 void _minnsreport_destroy(void* _p);
11034 #endif
11035 #if defined(AE_COMPILE_MINCOMP) || !defined(AE_PARTIAL_BUILD)
11036 void minlbfgssetdefaultpreconditioner(minlbfgsstate* state,
11037      ae_state *_state);
11038 void minlbfgssetcholeskypreconditioner(minlbfgsstate* state,
11039      /* Real    */ ae_matrix* p,
11040      ae_bool isupper,
11041      ae_state *_state);
11042 void minbleicsetbarrierwidth(minbleicstate* state,
11043      double mu,
11044      ae_state *_state);
11045 void minbleicsetbarrierdecay(minbleicstate* state,
11046      double mudecay,
11047      ae_state *_state);
11048 void minasacreate(ae_int_t n,
11049      /* Real    */ ae_vector* x,
11050      /* Real    */ ae_vector* bndl,
11051      /* Real    */ ae_vector* bndu,
11052      minasastate* state,
11053      ae_state *_state);
11054 void minasasetcond(minasastate* state,
11055      double epsg,
11056      double epsf,
11057      double epsx,
11058      ae_int_t maxits,
11059      ae_state *_state);
11060 void minasasetxrep(minasastate* state, ae_bool needxrep, ae_state *_state);
11061 void minasasetalgorithm(minasastate* state,
11062      ae_int_t algotype,
11063      ae_state *_state);
11064 void minasasetstpmax(minasastate* state, double stpmax, ae_state *_state);
11065 ae_bool minasaiteration(minasastate* state, ae_state *_state);
11066 void minasaresults(minasastate* state,
11067      /* Real    */ ae_vector* x,
11068      minasareport* rep,
11069      ae_state *_state);
11070 void minasaresultsbuf(minasastate* state,
11071      /* Real    */ ae_vector* x,
11072      minasareport* rep,
11073      ae_state *_state);
11074 void minasarestartfrom(minasastate* state,
11075      /* Real    */ ae_vector* x,
11076      /* Real    */ ae_vector* bndl,
11077      /* Real    */ ae_vector* bndu,
11078      ae_state *_state);
11079 void _minasastate_init(void* _p, ae_state *_state, ae_bool make_automatic);
11080 void _minasastate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11081 void _minasastate_clear(void* _p);
11082 void _minasastate_destroy(void* _p);
11083 void _minasareport_init(void* _p, ae_state *_state, ae_bool make_automatic);
11084 void _minasareport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11085 void _minasareport_clear(void* _p);
11086 void _minasareport_destroy(void* _p);
11087 #endif
11088 #if defined(AE_COMPILE_MINCG) || !defined(AE_PARTIAL_BUILD)
11089 void mincgcreate(ae_int_t n,
11090      /* Real    */ ae_vector* x,
11091      mincgstate* state,
11092      ae_state *_state);
11093 void mincgcreatef(ae_int_t n,
11094      /* Real    */ ae_vector* x,
11095      double diffstep,
11096      mincgstate* state,
11097      ae_state *_state);
11098 void mincgsetcond(mincgstate* state,
11099      double epsg,
11100      double epsf,
11101      double epsx,
11102      ae_int_t maxits,
11103      ae_state *_state);
11104 void mincgsetscale(mincgstate* state,
11105      /* Real    */ ae_vector* s,
11106      ae_state *_state);
11107 void mincgsetxrep(mincgstate* state, ae_bool needxrep, ae_state *_state);
11108 void mincgsetdrep(mincgstate* state, ae_bool needdrep, ae_state *_state);
11109 void mincgsetcgtype(mincgstate* state, ae_int_t cgtype, ae_state *_state);
11110 void mincgsetstpmax(mincgstate* state, double stpmax, ae_state *_state);
11111 void mincgsuggeststep(mincgstate* state, double stp, ae_state *_state);
11112 double mincglastgoodstep(mincgstate* state, ae_state *_state);
11113 void mincgsetprecdefault(mincgstate* state, ae_state *_state);
11114 void mincgsetprecdiag(mincgstate* state,
11115      /* Real    */ ae_vector* d,
11116      ae_state *_state);
11117 void mincgsetprecscale(mincgstate* state, ae_state *_state);
11118 ae_bool mincgiteration(mincgstate* state, ae_state *_state);
11119 void mincgoptguardgradient(mincgstate* state,
11120      double teststep,
11121      ae_state *_state);
11122 void mincgoptguardsmoothness(mincgstate* state,
11123      ae_int_t level,
11124      ae_state *_state);
11125 void mincgoptguardresults(mincgstate* state,
11126      optguardreport* rep,
11127      ae_state *_state);
11128 void mincgoptguardnonc1test0results(mincgstate* state,
11129      optguardnonc1test0report* strrep,
11130      optguardnonc1test0report* lngrep,
11131      ae_state *_state);
11132 void mincgoptguardnonc1test1results(mincgstate* state,
11133      optguardnonc1test1report* strrep,
11134      optguardnonc1test1report* lngrep,
11135      ae_state *_state);
11136 void mincgresults(mincgstate* state,
11137      /* Real    */ ae_vector* x,
11138      mincgreport* rep,
11139      ae_state *_state);
11140 void mincgresultsbuf(mincgstate* state,
11141      /* Real    */ ae_vector* x,
11142      mincgreport* rep,
11143      ae_state *_state);
11144 void mincgrestartfrom(mincgstate* state,
11145      /* Real    */ ae_vector* x,
11146      ae_state *_state);
11147 void mincgrequesttermination(mincgstate* state, ae_state *_state);
11148 void mincgsetprecdiagfast(mincgstate* state,
11149      /* Real    */ ae_vector* d,
11150      ae_state *_state);
11151 void mincgsetpreclowrankfast(mincgstate* state,
11152      /* Real    */ ae_vector* d1,
11153      /* Real    */ ae_vector* c,
11154      /* Real    */ ae_matrix* v,
11155      ae_int_t vcnt,
11156      ae_state *_state);
11157 void mincgsetprecvarpart(mincgstate* state,
11158      /* Real    */ ae_vector* d2,
11159      ae_state *_state);
11160 void _mincgstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
11161 void _mincgstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11162 void _mincgstate_clear(void* _p);
11163 void _mincgstate_destroy(void* _p);
11164 void _mincgreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
11165 void _mincgreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11166 void _mincgreport_clear(void* _p);
11167 void _mincgreport_destroy(void* _p);
11168 #endif
11169 #if defined(AE_COMPILE_MINLM) || !defined(AE_PARTIAL_BUILD)
11170 void minlmcreatevj(ae_int_t n,
11171      ae_int_t m,
11172      /* Real    */ ae_vector* x,
11173      minlmstate* state,
11174      ae_state *_state);
11175 void minlmcreatev(ae_int_t n,
11176      ae_int_t m,
11177      /* Real    */ ae_vector* x,
11178      double diffstep,
11179      minlmstate* state,
11180      ae_state *_state);
11181 void minlmcreatefgh(ae_int_t n,
11182      /* Real    */ ae_vector* x,
11183      minlmstate* state,
11184      ae_state *_state);
11185 void minlmsetcond(minlmstate* state,
11186      double epsx,
11187      ae_int_t maxits,
11188      ae_state *_state);
11189 void minlmsetxrep(minlmstate* state, ae_bool needxrep, ae_state *_state);
11190 void minlmsetstpmax(minlmstate* state, double stpmax, ae_state *_state);
11191 void minlmsetscale(minlmstate* state,
11192      /* Real    */ ae_vector* s,
11193      ae_state *_state);
11194 void minlmsetbc(minlmstate* state,
11195      /* Real    */ ae_vector* bndl,
11196      /* Real    */ ae_vector* bndu,
11197      ae_state *_state);
11198 void minlmsetlc(minlmstate* state,
11199      /* Real    */ ae_matrix* c,
11200      /* Integer */ ae_vector* ct,
11201      ae_int_t k,
11202      ae_state *_state);
11203 void minlmsetacctype(minlmstate* state,
11204      ae_int_t acctype,
11205      ae_state *_state);
11206 ae_bool minlmiteration(minlmstate* state, ae_state *_state);
11207 void minlmoptguardgradient(minlmstate* state,
11208      double teststep,
11209      ae_state *_state);
11210 void minlmoptguardresults(minlmstate* state,
11211      optguardreport* rep,
11212      ae_state *_state);
11213 void minlmresults(minlmstate* state,
11214      /* Real    */ ae_vector* x,
11215      minlmreport* rep,
11216      ae_state *_state);
11217 void minlmresultsbuf(minlmstate* state,
11218      /* Real    */ ae_vector* x,
11219      minlmreport* rep,
11220      ae_state *_state);
11221 void minlmrestartfrom(minlmstate* state,
11222      /* Real    */ ae_vector* x,
11223      ae_state *_state);
11224 void minlmrequesttermination(minlmstate* state, ae_state *_state);
11225 void minlmcreatevgj(ae_int_t n,
11226      ae_int_t m,
11227      /* Real    */ ae_vector* x,
11228      minlmstate* state,
11229      ae_state *_state);
11230 void minlmcreatefgj(ae_int_t n,
11231      ae_int_t m,
11232      /* Real    */ ae_vector* x,
11233      minlmstate* state,
11234      ae_state *_state);
11235 void minlmcreatefj(ae_int_t n,
11236      ae_int_t m,
11237      /* Real    */ ae_vector* x,
11238      minlmstate* state,
11239      ae_state *_state);
11240 void _minlmstepfinder_init(void* _p, ae_state *_state, ae_bool make_automatic);
11241 void _minlmstepfinder_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11242 void _minlmstepfinder_clear(void* _p);
11243 void _minlmstepfinder_destroy(void* _p);
11244 void _minlmstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
11245 void _minlmstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11246 void _minlmstate_clear(void* _p);
11247 void _minlmstate_destroy(void* _p);
11248 void _minlmreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
11249 void _minlmreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11250 void _minlmreport_clear(void* _p);
11251 void _minlmreport_destroy(void* _p);
11252 #endif
11253 
11254 }
11255 #endif
11256 
11257