1 /*************************************************************************
2 ALGLIB 3.18.0 (source code generated 2021-10-25)
3 Copyright (c) Sergey Bochkanov (ALGLIB project).
4 
5 >>> SOURCE LICENSE >>>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation (www.fsf.org); either version 2 of the
9 License, or (at your option) any later version.
10 
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 A copy of the GNU General Public License is available at
17 http://www.fsf.org/licensing/licenses
18 >>> END OF LICENSE >>>
19 *************************************************************************/
20 #ifndef _optimization_pkg_h
21 #define _optimization_pkg_h
22 #include "ap.h"
23 #include "alglibinternal.h"
24 #include "linalg.h"
25 #include "alglibmisc.h"
26 #include "solvers.h"
27 
28 /////////////////////////////////////////////////////////////////////////
29 //
30 // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (DATATYPES)
31 //
32 /////////////////////////////////////////////////////////////////////////
33 namespace alglib_impl
34 {
35 #if defined(AE_COMPILE_OPTGUARDAPI) || !defined(AE_PARTIAL_BUILD)
36 typedef struct
37 {
38     ae_bool nonc0suspected;
39     ae_bool nonc0test0positive;
40     ae_int_t nonc0fidx;
41     double nonc0lipschitzc;
42     ae_bool nonc1suspected;
43     ae_bool nonc1test0positive;
44     ae_bool nonc1test1positive;
45     ae_int_t nonc1fidx;
46     double nonc1lipschitzc;
47     ae_bool badgradsuspected;
48     ae_int_t badgradfidx;
49     ae_int_t badgradvidx;
50     ae_vector badgradxbase;
51     ae_matrix badgraduser;
52     ae_matrix badgradnum;
53 } optguardreport;
54 typedef struct
55 {
56     ae_bool positive;
57     ae_int_t fidx;
58     ae_vector x0;
59     ae_vector d;
60     ae_int_t n;
61     ae_vector stp;
62     ae_vector f;
63     ae_int_t cnt;
64     ae_int_t stpidxa;
65     ae_int_t stpidxb;
66 } optguardnonc0report;
67 typedef struct
68 {
69     ae_bool positive;
70     ae_int_t fidx;
71     ae_vector x0;
72     ae_vector d;
73     ae_int_t n;
74     ae_vector stp;
75     ae_vector f;
76     ae_int_t cnt;
77     ae_int_t stpidxa;
78     ae_int_t stpidxb;
79 } optguardnonc1test0report;
80 typedef struct
81 {
82     ae_bool positive;
83     ae_int_t fidx;
84     ae_int_t vidx;
85     ae_vector x0;
86     ae_vector d;
87     ae_int_t n;
88     ae_vector stp;
89     ae_vector g;
90     ae_int_t cnt;
91     ae_int_t stpidxa;
92     ae_int_t stpidxb;
93 } optguardnonc1test1report;
94 #endif
95 #if defined(AE_COMPILE_OPTSERV) || !defined(AE_PARTIAL_BUILD)
96 typedef struct
97 {
98     ae_vector norms;
99     ae_vector alpha;
100     ae_vector rho;
101     ae_matrix yk;
102     ae_vector idx;
103     ae_vector bufa;
104     ae_vector bufb;
105 } precbuflbfgs;
106 typedef struct
107 {
108     ae_int_t n;
109     ae_int_t k;
110     ae_vector d;
111     ae_matrix v;
112     ae_vector bufc;
113     ae_matrix bufz;
114     ae_matrix bufw;
115     ae_vector tmp;
116 } precbuflowrank;
117 typedef struct
118 {
119     ae_int_t n;
120     ae_int_t k;
121     ae_bool checksmoothness;
122     ae_vector s;
123     ae_vector dcur;
124     ae_int_t enqueuedcnt;
125     ae_vector enqueuedstp;
126     ae_vector enqueuedx;
127     ae_vector enqueuedfunc;
128     ae_matrix enqueuedjac;
129     ae_vector sortedstp;
130     ae_vector sortedidx;
131     ae_int_t sortedcnt;
132     double probingstp;
133     ae_vector probingf;
134     ae_int_t probingnvalues;
135     double probingstepmax;
136     double probingstepscale;
137     ae_int_t probingnstepsstored;
138     ae_vector probingsteps;
139     ae_matrix probingvalues;
140     ae_matrix probingslopes;
141     rcommstate probingrcomm;
142     ae_bool linesearchspoiled;
143     ae_bool linesearchstarted;
144     double nonc0currentrating;
145     double nonc1currentrating;
146     ae_bool badgradhasxj;
147     optguardreport rep;
148     double nonc0strrating;
149     double nonc0lngrating;
150     optguardnonc0report nonc0strrep;
151     optguardnonc0report nonc0lngrep;
152     double nonc1test0strrating;
153     double nonc1test0lngrating;
154     optguardnonc1test0report nonc1test0strrep;
155     optguardnonc1test0report nonc1test0lngrep;
156     double nonc1test1strrating;
157     double nonc1test1lngrating;
158     optguardnonc1test1report nonc1test1strrep;
159     optguardnonc1test1report nonc1test1lngrep;
160     ae_bool needfij;
161     ae_vector x;
162     ae_vector fi;
163     ae_matrix j;
164     rcommstate rstateg0;
165     ae_vector xbase;
166     ae_vector fbase;
167     ae_vector fm;
168     ae_vector fc;
169     ae_vector fp;
170     ae_vector jm;
171     ae_vector jc;
172     ae_vector jp;
173     ae_matrix jbaseusr;
174     ae_matrix jbasenum;
175     ae_vector stp;
176     ae_vector bufr;
177     ae_vector f;
178     ae_vector g;
179     ae_vector deltax;
180     ae_vector tmpidx;
181     ae_vector bufi;
182     ae_vector xu;
183     ae_vector du;
184     ae_vector f0;
185     ae_matrix j0;
186 } smoothnessmonitor;
187 #endif
188 #if defined(AE_COMPILE_MINLBFGS) || !defined(AE_PARTIAL_BUILD)
189 typedef struct
190 {
191     ae_int_t n;
192     ae_int_t m;
193     double epsg;
194     double epsf;
195     double epsx;
196     ae_int_t maxits;
197     ae_bool xrep;
198     double stpmax;
199     ae_vector s;
200     double diffstep;
201     ae_int_t nfev;
202     ae_int_t mcstage;
203     ae_int_t k;
204     ae_int_t q;
205     ae_int_t p;
206     ae_vector rho;
207     ae_matrix yk;
208     ae_matrix sk;
209     ae_vector xp;
210     ae_vector theta;
211     ae_vector d;
212     double stp;
213     ae_vector work;
214     double fold;
215     double trimthreshold;
216     ae_vector xbase;
217     ae_int_t prectype;
218     double gammak;
219     ae_matrix denseh;
220     ae_vector diagh;
221     ae_vector precc;
222     ae_vector precd;
223     ae_matrix precw;
224     ae_int_t preck;
225     precbuflbfgs precbuf;
226     precbuflowrank lowrankbuf;
227     double fbase;
228     double fm2;
229     double fm1;
230     double fp1;
231     double fp2;
232     ae_vector autobuf;
233     ae_vector invs;
234     ae_vector x;
235     double f;
236     ae_vector g;
237     ae_bool needf;
238     ae_bool needfg;
239     ae_bool xupdated;
240     ae_bool userterminationneeded;
241     double teststep;
242     rcommstate rstate;
243     ae_int_t repiterationscount;
244     ae_int_t repnfev;
245     ae_int_t repterminationtype;
246     linminstate lstate;
247     ae_int_t smoothnessguardlevel;
248     smoothnessmonitor smonitor;
249     ae_vector lastscaleused;
250 } minlbfgsstate;
251 typedef struct
252 {
253     ae_int_t iterationscount;
254     ae_int_t nfev;
255     ae_int_t terminationtype;
256 } minlbfgsreport;
257 #endif
258 #if defined(AE_COMPILE_CQMODELS) || !defined(AE_PARTIAL_BUILD)
259 typedef struct
260 {
261     ae_int_t n;
262     ae_int_t k;
263     double alpha;
264     double tau;
265     double theta;
266     ae_matrix a;
267     ae_matrix q;
268     ae_vector b;
269     ae_vector r;
270     ae_vector xc;
271     ae_vector d;
272     ae_vector activeset;
273     ae_matrix tq2dense;
274     ae_matrix tk2;
275     ae_vector tq2diag;
276     ae_vector tq1;
277     ae_vector tk1;
278     double tq0;
279     double tk0;
280     ae_vector txc;
281     ae_vector tb;
282     ae_int_t nfree;
283     ae_int_t ecakind;
284     ae_matrix ecadense;
285     ae_matrix eq;
286     ae_matrix eccm;
287     ae_vector ecadiag;
288     ae_vector eb;
289     double ec;
290     ae_vector tmp0;
291     ae_vector tmp1;
292     ae_vector tmpg;
293     ae_matrix tmp2;
294     ae_bool ismaintermchanged;
295     ae_bool issecondarytermchanged;
296     ae_bool islineartermchanged;
297     ae_bool isactivesetchanged;
298 } convexquadraticmodel;
299 #endif
300 #if defined(AE_COMPILE_LPQPSERV) || !defined(AE_PARTIAL_BUILD)
301 #endif
302 #if defined(AE_COMPILE_SNNLS) || !defined(AE_PARTIAL_BUILD)
303 typedef struct
304 {
305     ae_int_t ns;
306     ae_int_t nd;
307     ae_int_t nr;
308     ae_matrix densea;
309     ae_vector b;
310     ae_vector nnc;
311     double debugflops;
312     ae_int_t debugmaxinnerits;
313     ae_vector xn;
314     ae_vector xp;
315     ae_matrix tmpca;
316     ae_matrix tmplq;
317     ae_matrix trda;
318     ae_vector trdd;
319     ae_vector crb;
320     ae_vector g;
321     ae_vector d;
322     ae_vector dx;
323     ae_vector diagaa;
324     ae_vector cb;
325     ae_vector cx;
326     ae_vector cborg;
327     ae_vector tmpcholesky;
328     ae_vector r;
329     ae_vector regdiag;
330     ae_vector tmp0;
331     ae_vector tmp1;
332     ae_vector tmp2;
333     ae_vector rdtmprowmap;
334 } snnlssolver;
335 #endif
336 #if defined(AE_COMPILE_SACTIVESETS) || !defined(AE_PARTIAL_BUILD)
337 typedef struct
338 {
339     ae_int_t n;
340     ae_int_t algostate;
341     ae_vector xc;
342     ae_bool hasxc;
343     ae_vector s;
344     ae_vector h;
345     ae_vector cstatus;
346     ae_bool basisisready;
347     ae_matrix sdensebatch;
348     ae_matrix pdensebatch;
349     ae_matrix idensebatch;
350     ae_int_t densebatchsize;
351     ae_vector sparsebatch;
352     ae_int_t sparsebatchsize;
353     ae_int_t basisage;
354     ae_bool feasinitpt;
355     ae_bool constraintschanged;
356     ae_vector hasbndl;
357     ae_vector hasbndu;
358     ae_vector bndl;
359     ae_vector bndu;
360     ae_matrix cleic;
361     ae_int_t nec;
362     ae_int_t nic;
363     ae_vector mtnew;
364     ae_vector mtx;
365     ae_vector mtas;
366     ae_vector cdtmp;
367     ae_vector corrtmp;
368     ae_vector unitdiagonal;
369     snnlssolver solver;
370     ae_vector scntmp;
371     ae_vector tmp0;
372     ae_vector tmpfeas;
373     ae_matrix tmpm0;
374     ae_vector rctmps;
375     ae_vector rctmpg;
376     ae_vector rctmprightpart;
377     ae_matrix rctmpdense0;
378     ae_matrix rctmpdense1;
379     ae_vector rctmpisequality;
380     ae_vector rctmpconstraintidx;
381     ae_vector rctmplambdas;
382     ae_matrix tmpbasis;
383     ae_vector tmpnormestimates;
384     ae_vector tmpreciph;
385     ae_vector tmpprodp;
386     ae_vector tmpprods;
387     ae_vector tmpcp;
388     ae_vector tmpcs;
389     ae_vector tmpci;
390 } sactiveset;
391 #endif
392 #if defined(AE_COMPILE_QQPSOLVER) || !defined(AE_PARTIAL_BUILD)
393 typedef struct
394 {
395     double epsg;
396     double epsf;
397     double epsx;
398     ae_int_t maxouterits;
399     ae_bool cgphase;
400     ae_bool cnphase;
401     ae_int_t cgminits;
402     ae_int_t cgmaxits;
403     ae_int_t cnmaxupdates;
404     ae_int_t sparsesolver;
405 } qqpsettings;
406 typedef struct
407 {
408     ae_int_t n;
409     ae_int_t akind;
410     ae_matrix densea;
411     sparsematrix sparsea;
412     ae_bool sparseupper;
413     double absamax;
414     double absasum;
415     double absasum2;
416     ae_vector b;
417     ae_vector bndl;
418     ae_vector bndu;
419     ae_vector havebndl;
420     ae_vector havebndu;
421     ae_vector xs;
422     ae_vector xf;
423     ae_vector gc;
424     ae_vector xp;
425     ae_vector dc;
426     ae_vector dp;
427     ae_vector cgc;
428     ae_vector cgp;
429     sactiveset sas;
430     ae_vector activated;
431     ae_int_t nfree;
432     ae_int_t cnmodelage;
433     ae_matrix densez;
434     sparsematrix sparsecca;
435     ae_vector yidx;
436     ae_vector regdiag;
437     ae_vector regx0;
438     ae_vector tmpcn;
439     ae_vector tmpcni;
440     ae_vector tmpcnb;
441     ae_vector tmp0;
442     ae_vector tmp1;
443     ae_vector stpbuf;
444     sparsebuffers sbuf;
445     ae_int_t repinneriterationscount;
446     ae_int_t repouteriterationscount;
447     ae_int_t repncholesky;
448     ae_int_t repncupdates;
449 } qqpbuffers;
450 #endif
451 #if defined(AE_COMPILE_QPDENSEAULSOLVER) || !defined(AE_PARTIAL_BUILD)
452 typedef struct
453 {
454     double epsx;
455     ae_int_t outerits;
456     double rho;
457 } qpdenseaulsettings;
458 typedef struct
459 {
460     ae_vector nulc;
461     ae_matrix sclsfta;
462     ae_vector sclsftb;
463     ae_vector sclsfthasbndl;
464     ae_vector sclsfthasbndu;
465     ae_vector sclsftbndl;
466     ae_vector sclsftbndu;
467     ae_vector sclsftxc;
468     ae_matrix sclsftcleic;
469     ae_vector cidx;
470     ae_vector cscales;
471     ae_matrix exa;
472     ae_vector exb;
473     ae_vector exxc;
474     ae_vector exbndl;
475     ae_vector exbndu;
476     ae_vector exscale;
477     ae_vector exxorigin;
478     qqpsettings qqpsettingsuser;
479     qqpbuffers qqpbuf;
480     ae_vector nulcest;
481     ae_vector tmpg;
482     ae_vector tmp0;
483     ae_matrix tmp2;
484     ae_vector modelg;
485     ae_vector d;
486     ae_vector deltax;
487     convexquadraticmodel dummycqm;
488     sparsematrix dummysparse;
489     ae_matrix qrkkt;
490     ae_vector qrrightpart;
491     ae_vector qrtau;
492     ae_vector qrsv0;
493     ae_vector qrsvx1;
494     ae_vector nicerr;
495     ae_vector nicnact;
496     ae_int_t repinneriterationscount;
497     ae_int_t repouteriterationscount;
498     ae_int_t repncholesky;
499     ae_int_t repnwrkchanges;
500     ae_int_t repnwrk0;
501     ae_int_t repnwrk1;
502     ae_int_t repnwrkf;
503     ae_int_t repnmv;
504 } qpdenseaulbuffers;
505 #endif
506 #if defined(AE_COMPILE_MINBLEIC) || !defined(AE_PARTIAL_BUILD)
507 typedef struct
508 {
509     ae_int_t nmain;
510     ae_int_t nslack;
511     double epsg;
512     double epsf;
513     double epsx;
514     ae_int_t maxits;
515     ae_bool xrep;
516     ae_bool drep;
517     double stpmax;
518     double diffstep;
519     sactiveset sas;
520     ae_vector s;
521     ae_int_t prectype;
522     ae_vector diagh;
523     ae_vector x;
524     double f;
525     ae_vector g;
526     ae_bool needf;
527     ae_bool needfg;
528     ae_bool xupdated;
529     ae_bool lsstart;
530     ae_bool steepestdescentstep;
531     ae_bool boundedstep;
532     ae_bool userterminationneeded;
533     rcommstate rstate;
534     ae_vector ugc;
535     ae_vector cgc;
536     ae_vector xn;
537     ae_vector ugn;
538     ae_vector cgn;
539     ae_vector xp;
540     double fc;
541     double fn;
542     double fp;
543     ae_vector d;
544     ae_matrix cleic;
545     ae_int_t nec;
546     ae_int_t nic;
547     double lastgoodstep;
548     double lastscaledgoodstep;
549     double maxscaledgrad;
550     ae_vector hasbndl;
551     ae_vector hasbndu;
552     ae_vector bndl;
553     ae_vector bndu;
554     ae_int_t repinneriterationscount;
555     ae_int_t repouteriterationscount;
556     ae_int_t repnfev;
557     ae_int_t repvaridx;
558     ae_int_t repterminationtype;
559     double repdebugeqerr;
560     double repdebugfs;
561     double repdebugff;
562     double repdebugdx;
563     ae_int_t repdebugfeasqpits;
564     ae_int_t repdebugfeasgpaits;
565     ae_vector xstart;
566     snnlssolver solver;
567     double fbase;
568     double fm2;
569     double fm1;
570     double fp1;
571     double fp2;
572     double xm1;
573     double xp1;
574     double gm1;
575     double gp1;
576     ae_int_t cidx;
577     double cval;
578     ae_vector tmpprec;
579     ae_vector tmp0;
580     ae_int_t nfev;
581     ae_int_t mcstage;
582     double stp;
583     double curstpmax;
584     double activationstep;
585     ae_vector work;
586     linminstate lstate;
587     double trimthreshold;
588     ae_int_t nonmonotoniccnt;
589     ae_matrix bufyk;
590     ae_matrix bufsk;
591     ae_vector bufrho;
592     ae_vector buftheta;
593     ae_int_t bufsize;
594     double teststep;
595     ae_int_t smoothnessguardlevel;
596     smoothnessmonitor smonitor;
597     ae_vector lastscaleused;
598     ae_vector invs;
599 } minbleicstate;
600 typedef struct
601 {
602     ae_int_t iterationscount;
603     ae_int_t nfev;
604     ae_int_t varidx;
605     ae_int_t terminationtype;
606     double debugeqerr;
607     double debugfs;
608     double debugff;
609     double debugdx;
610     ae_int_t debugfeasqpits;
611     ae_int_t debugfeasgpaits;
612     ae_int_t inneriterationscount;
613     ae_int_t outeriterationscount;
614 } minbleicreport;
615 #endif
616 #if defined(AE_COMPILE_QPBLEICSOLVER) || !defined(AE_PARTIAL_BUILD)
617 typedef struct
618 {
619     double epsg;
620     double epsf;
621     double epsx;
622     ae_int_t maxits;
623 } qpbleicsettings;
624 typedef struct
625 {
626     minbleicstate solver;
627     minbleicreport solverrep;
628     ae_vector tmp0;
629     ae_vector tmp1;
630     ae_vector tmpi;
631     ae_int_t repinneriterationscount;
632     ae_int_t repouteriterationscount;
633 } qpbleicbuffers;
634 #endif
635 #if defined(AE_COMPILE_VIPMSOLVER) || !defined(AE_PARTIAL_BUILD)
636 typedef struct
637 {
638     ae_int_t n;
639     ae_int_t m;
640     ae_vector x;
641     ae_vector g;
642     ae_vector w;
643     ae_vector t;
644     ae_vector p;
645     ae_vector y;
646     ae_vector z;
647     ae_vector v;
648     ae_vector s;
649     ae_vector q;
650 } vipmvars;
651 typedef struct
652 {
653     ae_vector sigma;
654     ae_vector beta;
655     ae_vector rho;
656     ae_vector nu;
657     ae_vector tau;
658     ae_vector alpha;
659     ae_vector gammaz;
660     ae_vector gammas;
661     ae_vector gammaw;
662     ae_vector gammaq;
663 } vipmrighthandside;
664 typedef struct
665 {
666     ae_bool slacksforequalityconstraints;
667     ae_int_t n;
668     ae_int_t nmain;
669     double epsp;
670     double epsd;
671     double epsgap;
672     ae_bool islinear;
673     ae_vector scl;
674     ae_vector invscl;
675     ae_vector xorigin;
676     double targetscale;
677     ae_vector c;
678     ae_matrix denseh;
679     sparsematrix sparseh;
680     ae_vector diagr;
681     ae_int_t hkind;
682     ae_vector bndl;
683     ae_vector bndu;
684     ae_vector rawbndl;
685     ae_vector rawbndu;
686     ae_vector hasbndl;
687     ae_vector hasbndu;
688     ae_matrix denseafull;
689     ae_matrix denseamain;
690     sparsematrix sparseafull;
691     sparsematrix sparseamain;
692     sparsematrix combinedaslack;
693     ae_vector ascales;
694     ae_vector aflips;
695     ae_vector b;
696     ae_vector r;
697     ae_vector hasr;
698     ae_int_t mdense;
699     ae_int_t msparse;
700     vipmvars x0;
701     vipmvars current;
702     vipmvars best;
703     vipmvars trial;
704     vipmvars deltaaff;
705     vipmvars deltacorr;
706     ae_vector isfrozen;
707     ae_vector hasgz;
708     ae_vector hasts;
709     ae_vector haswv;
710     ae_vector haspq;
711     ae_int_t repiterationscount;
712     ae_int_t repncholesky;
713     ae_bool dotrace;
714     ae_bool dodetailedtrace;
715     ae_int_t factorizationtype;
716     ae_bool factorizationpoweredup;
717     ae_bool factorizationpresent;
718     ae_vector diagdz;
719     ae_vector diagdzi;
720     ae_vector diagdziri;
721     ae_vector diagds;
722     ae_vector diagdsi;
723     ae_vector diagdsiri;
724     ae_vector diagdw;
725     ae_vector diagdwi;
726     ae_vector diagdwir;
727     ae_vector diagdq;
728     ae_vector diagdqi;
729     ae_vector diagdqiri;
730     ae_vector diagddr;
731     ae_vector diagde;
732     ae_vector diagder;
733     ae_matrix factdensehaug;
734     ae_vector factregdhrh;
735     ae_vector factinvregdzrz;
736     ae_vector factregewave;
737     sparsematrix factsparsekkttmpl;
738     sparsematrix factsparsekkt;
739     ae_vector factsparsekktpivp;
740     ae_vector facttmpdiag;
741     spcholanalysis ldltanalysis;
742     ae_vector factsparsediagd;
743     vipmrighthandside rhs;
744     ae_vector rhsalphacap;
745     ae_vector rhsbetacap;
746     ae_vector rhsnucap;
747     ae_vector rhstaucap;
748     ae_vector deltaxy;
749     ae_vector tmphx;
750     ae_vector tmpax;
751     ae_vector tmpaty;
752     vipmvars zerovars;
753     ae_vector dummyr;
754     ae_vector tmpy;
755     ae_vector tmp0;
756     ae_vector tmp1;
757     ae_vector tmp2;
758     ae_matrix tmpr2;
759     ae_vector tmplaggrad;
760     ae_vector tmpi;
761     sparsematrix tmpsparse0;
762 } vipmstate;
763 #endif
764 #if defined(AE_COMPILE_MINQP) || !defined(AE_PARTIAL_BUILD)
765 typedef struct
766 {
767     ae_int_t n;
768     qqpsettings qqpsettingsuser;
769     qpbleicsettings qpbleicsettingsuser;
770     qpdenseaulsettings qpdenseaulsettingsuser;
771     double veps;
772     ae_bool dbgskipconstraintnormalization;
773     ae_int_t algokind;
774     ae_int_t akind;
775     convexquadraticmodel a;
776     sparsematrix sparsea;
777     ae_bool sparseaupper;
778     double absamax;
779     double absasum;
780     double absasum2;
781     ae_vector b;
782     ae_vector bndl;
783     ae_vector bndu;
784     ae_int_t stype;
785     ae_vector s;
786     ae_vector havebndl;
787     ae_vector havebndu;
788     ae_vector xorigin;
789     ae_vector startx;
790     ae_bool havex;
791     ae_matrix densec;
792     sparsematrix sparsec;
793     ae_vector cl;
794     ae_vector cu;
795     ae_int_t mdense;
796     ae_int_t msparse;
797     ae_vector xs;
798     ae_int_t repinneriterationscount;
799     ae_int_t repouteriterationscount;
800     ae_int_t repncholesky;
801     ae_int_t repnmv;
802     ae_int_t repterminationtype;
803     ae_vector replagbc;
804     ae_vector replaglc;
805     ae_vector effectives;
806     ae_vector tmp0;
807     ae_matrix ecleic;
808     ae_vector elaglc;
809     ae_vector elagmlt;
810     ae_vector elagidx;
811     ae_matrix dummyr2;
812     sparsematrix dummysparse;
813     ae_matrix tmpr2;
814     ae_vector wrkbndl;
815     ae_vector wrkbndu;
816     ae_vector wrkcl;
817     ae_vector wrkcu;
818     ae_matrix wrkdensec;
819     sparsematrix wrksparsec;
820     ae_bool qpbleicfirstcall;
821     qpbleicbuffers qpbleicbuf;
822     qqpbuffers qqpbuf;
823     qpdenseaulbuffers qpdenseaulbuf;
824     vipmstate vsolver;
825 } minqpstate;
826 typedef struct
827 {
828     ae_int_t inneriterationscount;
829     ae_int_t outeriterationscount;
830     ae_int_t nmv;
831     ae_int_t ncholesky;
832     ae_int_t terminationtype;
833     ae_vector lagbc;
834     ae_vector laglc;
835 } minqpreport;
836 #endif
837 #if defined(AE_COMPILE_MINLM) || !defined(AE_PARTIAL_BUILD)
838 typedef struct
839 {
840     ae_int_t n;
841     ae_int_t m;
842     double stpmax;
843     ae_int_t modelage;
844     ae_int_t maxmodelage;
845     ae_bool hasfi;
846     double epsx;
847     ae_vector x;
848     double f;
849     ae_vector fi;
850     ae_bool needf;
851     ae_bool needfi;
852     double fbase;
853     ae_vector modeldiag;
854     ae_vector xbase;
855     ae_vector fibase;
856     ae_vector bndl;
857     ae_vector bndu;
858     ae_vector havebndl;
859     ae_vector havebndu;
860     ae_vector s;
861     rcommstate rstate;
862     ae_vector xdir;
863     ae_vector choleskybuf;
864     ae_vector tmp0;
865     ae_vector tmpct;
866     double actualdecrease;
867     double predicteddecrease;
868     minqpstate qpstate;
869     minqpreport qprep;
870     sparsematrix tmpsp;
871 } minlmstepfinder;
872 typedef struct
873 {
874     ae_int_t n;
875     ae_int_t m;
876     double diffstep;
877     double epsx;
878     ae_int_t maxits;
879     ae_bool xrep;
880     double stpmax;
881     ae_int_t maxmodelage;
882     ae_bool makeadditers;
883     ae_vector x;
884     double f;
885     ae_vector fi;
886     ae_matrix j;
887     ae_matrix h;
888     ae_vector g;
889     ae_bool needf;
890     ae_bool needfg;
891     ae_bool needfgh;
892     ae_bool needfij;
893     ae_bool needfi;
894     ae_bool xupdated;
895     ae_bool userterminationneeded;
896     ae_int_t algomode;
897     ae_bool hasf;
898     ae_bool hasfi;
899     ae_bool hasg;
900     ae_vector xbase;
901     double fbase;
902     ae_vector fibase;
903     ae_vector gbase;
904     ae_matrix quadraticmodel;
905     ae_vector bndl;
906     ae_vector bndu;
907     ae_vector havebndl;
908     ae_vector havebndu;
909     ae_vector s;
910     ae_matrix cleic;
911     ae_int_t nec;
912     ae_int_t nic;
913     double lambdav;
914     double nu;
915     ae_int_t modelage;
916     ae_vector xnew;
917     ae_vector xdir;
918     ae_vector deltax;
919     ae_vector deltaf;
920     ae_bool deltaxready;
921     ae_bool deltafready;
922     smoothnessmonitor smonitor;
923     double teststep;
924     ae_vector lastscaleused;
925     ae_int_t repiterationscount;
926     ae_int_t repterminationtype;
927     ae_int_t repnfunc;
928     ae_int_t repnjac;
929     ae_int_t repngrad;
930     ae_int_t repnhess;
931     ae_int_t repncholesky;
932     rcommstate rstate;
933     ae_vector choleskybuf;
934     ae_vector tmp0;
935     double actualdecrease;
936     double predicteddecrease;
937     double xm1;
938     double xp1;
939     ae_vector fm1;
940     ae_vector fp1;
941     ae_vector fc1;
942     ae_vector gm1;
943     ae_vector gp1;
944     ae_vector gc1;
945     minlbfgsstate internalstate;
946     minlbfgsreport internalrep;
947     minqpstate qpstate;
948     minqpreport qprep;
949     minlmstepfinder finderstate;
950 } minlmstate;
951 typedef struct
952 {
953     ae_int_t iterationscount;
954     ae_int_t terminationtype;
955     ae_int_t nfunc;
956     ae_int_t njac;
957     ae_int_t ngrad;
958     ae_int_t nhess;
959     ae_int_t ncholesky;
960 } minlmreport;
961 #endif
962 #if defined(AE_COMPILE_MINCG) || !defined(AE_PARTIAL_BUILD)
963 typedef struct
964 {
965     ae_int_t n;
966     double epsg;
967     double epsf;
968     double epsx;
969     ae_int_t maxits;
970     double stpmax;
971     double suggestedstep;
972     ae_bool xrep;
973     ae_bool drep;
974     ae_int_t cgtype;
975     ae_int_t prectype;
976     ae_vector diagh;
977     ae_vector diaghl2;
978     ae_matrix vcorr;
979     ae_int_t vcnt;
980     ae_vector s;
981     double diffstep;
982     ae_int_t nfev;
983     ae_int_t mcstage;
984     ae_int_t k;
985     ae_vector xk;
986     ae_vector dk;
987     ae_vector xn;
988     ae_vector dn;
989     ae_vector d;
990     double fold;
991     double stp;
992     double curstpmax;
993     ae_vector yk;
994     double lastgoodstep;
995     double lastscaledstep;
996     ae_int_t mcinfo;
997     ae_bool innerresetneeded;
998     ae_bool terminationneeded;
999     double trimthreshold;
1000     ae_vector xbase;
1001     ae_int_t rstimer;
1002     ae_vector x;
1003     double f;
1004     ae_vector g;
1005     ae_bool needf;
1006     ae_bool needfg;
1007     ae_bool xupdated;
1008     ae_bool algpowerup;
1009     ae_bool lsstart;
1010     ae_bool lsend;
1011     ae_bool userterminationneeded;
1012     rcommstate rstate;
1013     ae_int_t repiterationscount;
1014     ae_int_t repnfev;
1015     ae_int_t repterminationtype;
1016     ae_int_t debugrestartscount;
1017     linminstate lstate;
1018     double fbase;
1019     double fm2;
1020     double fm1;
1021     double fp1;
1022     double fp2;
1023     double betahs;
1024     double betady;
1025     ae_vector work0;
1026     ae_vector work1;
1027     ae_vector invs;
1028     double teststep;
1029     ae_int_t smoothnessguardlevel;
1030     smoothnessmonitor smonitor;
1031     ae_vector lastscaleused;
1032 } mincgstate;
1033 typedef struct
1034 {
1035     ae_int_t iterationscount;
1036     ae_int_t nfev;
1037     ae_int_t terminationtype;
1038 } mincgreport;
1039 #endif
1040 #if defined(AE_COMPILE_NLCSQP) || !defined(AE_PARTIAL_BUILD)
1041 typedef struct
1042 {
1043     ae_int_t algokind;
1044     vipmstate ipmsolver;
1045     ae_vector curb;
1046     ae_vector curbndl;
1047     ae_vector curbndu;
1048     ae_vector cural;
1049     ae_vector curau;
1050     sparsematrix sparserawlc;
1051     sparsematrix sparseefflc;
1052     ae_vector d0;
1053     ae_matrix h;
1054     ae_matrix densedummy;
1055     sparsematrix sparsedummy;
1056     ae_vector tmp0;
1057     ae_vector tmp1;
1058     ae_vector tmp2;
1059     ae_vector sk;
1060     ae_vector yk;
1061     ae_vector hasbndl;
1062     ae_vector hasbndu;
1063     ae_vector hasal;
1064     ae_vector hasau;
1065     ae_matrix activea;
1066     ae_vector activerhs;
1067     ae_vector activeidx;
1068     ae_int_t activesetsize;
1069 } minsqpsubsolver;
1070 typedef struct
1071 {
1072     ae_vector sclagtmp0;
1073     ae_vector sclagtmp1;
1074 } minsqptmplagrangian;
1075 typedef struct
1076 {
1077     ae_vector mftmp0;
1078 } minsqptmpmerit;
1079 typedef struct
1080 {
1081     ae_int_t n;
1082     ae_int_t nec;
1083     ae_int_t nic;
1084     ae_int_t nlec;
1085     ae_int_t nlic;
1086     ae_vector d;
1087     ae_vector dx;
1088     ae_vector stepkx;
1089     ae_vector stepkxc;
1090     ae_vector stepkxn;
1091     ae_vector stepkfi;
1092     ae_vector stepkfic;
1093     ae_vector stepkfin;
1094     ae_matrix stepkj;
1095     ae_matrix stepkjc;
1096     ae_matrix stepkjn;
1097     ae_vector lagmult;
1098     ae_vector dummylagmult;
1099     ae_vector penalties;
1100     minsqptmpmerit tmpmerit;
1101     minsqptmplagrangian tmplagrangianfg;
1102     ae_vector stepklaggrad;
1103     ae_vector stepknlaggrad;
1104     ae_int_t status;
1105     ae_bool increasebigc;
1106     rcommstate rmeritphasestate;
1107 } minsqpmeritphasestate;
1108 typedef struct
1109 {
1110     ae_int_t n;
1111     ae_int_t nec;
1112     ae_int_t nic;
1113     ae_int_t nlec;
1114     ae_int_t nlic;
1115     ae_vector s;
1116     ae_matrix scaledcleic;
1117     ae_vector lcsrcidx;
1118     ae_vector hasbndl;
1119     ae_vector hasbndu;
1120     ae_vector scaledbndl;
1121     ae_vector scaledbndu;
1122     double epsx;
1123     ae_int_t maxits;
1124     ae_vector x;
1125     ae_vector fi;
1126     ae_matrix j;
1127     double f;
1128     ae_bool needfij;
1129     ae_bool xupdated;
1130     minsqpmeritphasestate meritstate;
1131     double bigc;
1132     double trustrad;
1133     ae_int_t trustradstagnationcnt;
1134     ae_int_t fstagnationcnt;
1135     ae_vector step0x;
1136     ae_vector stepkx;
1137     ae_vector backupx;
1138     ae_vector step0fi;
1139     ae_vector stepkfi;
1140     ae_vector backupfi;
1141     ae_matrix step0j;
1142     ae_matrix stepkj;
1143     ae_bool haslagmult;
1144     ae_vector meritlagmult;
1145     ae_vector dummylagmult;
1146     ae_matrix abslagmemory;
1147     ae_vector fscales;
1148     ae_vector tracegamma;
1149     minsqpsubsolver subsolver;
1150     minsqptmpmerit tmpmerit;
1151     ae_int_t repsimplexiterations;
1152     ae_int_t repsimplexiterations1;
1153     ae_int_t repsimplexiterations2;
1154     ae_int_t repsimplexiterations3;
1155     ae_int_t repiterationscount;
1156     ae_int_t repterminationtype;
1157     double repbcerr;
1158     ae_int_t repbcidx;
1159     double replcerr;
1160     ae_int_t replcidx;
1161     double repnlcerr;
1162     ae_int_t repnlcidx;
1163     rcommstate rstate;
1164 } minsqpstate;
1165 #endif
1166 #if defined(AE_COMPILE_LPQPPRESOLVE) || !defined(AE_PARTIAL_BUILD)
1167 typedef struct
1168 {
1169     ae_int_t newn;
1170     ae_int_t oldn;
1171     ae_int_t newm;
1172     ae_int_t oldm;
1173     ae_vector rawbndl;
1174     ae_vector rawbndu;
1175     ae_vector colscales;
1176     ae_vector rowscales;
1177     double costscale;
1178     ae_vector c;
1179     ae_vector bndl;
1180     ae_vector bndu;
1181     sparsematrix sparsea;
1182     ae_vector al;
1183     ae_vector au;
1184 } presolveinfo;
1185 #endif
1186 #if defined(AE_COMPILE_REVISEDDUALSIMPLEX) || !defined(AE_PARTIAL_BUILD)
1187 typedef struct
1188 {
1189     double pivottol;
1190     double perturbmag;
1191     ae_int_t maxtrfage;
1192     ae_int_t trftype;
1193     ae_int_t ratiotest;
1194     ae_int_t pricing;
1195     ae_int_t shifting;
1196     double xtolabs;
1197     double xtolrelabs;
1198     double dtolabs;
1199 } dualsimplexsettings;
1200 typedef struct
1201 {
1202     ae_int_t n;
1203     ae_int_t k;
1204     ae_vector idx;
1205     ae_vector vals;
1206     ae_vector dense;
1207 } dssvector;
1208 typedef struct
1209 {
1210     ae_int_t ns;
1211     ae_int_t m;
1212     ae_vector idx;
1213     ae_vector nidx;
1214     ae_vector isbasic;
1215     ae_int_t trftype;
1216     ae_bool isvalidtrf;
1217     ae_int_t trfage;
1218     ae_matrix denselu;
1219     sparsematrix sparsel;
1220     sparsematrix sparseu;
1221     sparsematrix sparseut;
1222     ae_vector rowpermbwd;
1223     ae_vector colpermbwd;
1224     ae_vector densepfieta;
1225     ae_vector densemu;
1226     ae_vector rk;
1227     ae_vector dk;
1228     ae_vector dseweights;
1229     ae_bool dsevalid;
1230     double eminu;
1231     ae_int_t statfact;
1232     ae_int_t statupdt;
1233     double statoffdiag;
1234     ae_vector wtmp0;
1235     ae_vector wtmp1;
1236     ae_vector wtmp2;
1237     ae_vector nrs;
1238     ae_vector tcinvidx;
1239     ae_matrix denselu2;
1240     ae_vector densep2;
1241     ae_vector densep2c;
1242     sparsematrix sparselu1;
1243     sparsematrix sparselu2;
1244     sluv2buffer lubuf2;
1245     ae_vector tmpi;
1246     ae_vector utmp0;
1247     ae_vector utmpi;
1248     sparsematrix sparseludbg;
1249 } dualsimplexbasis;
1250 typedef struct
1251 {
1252     ae_int_t ns;
1253     ae_int_t m;
1254     ae_vector rawc;
1255     ae_vector bndl;
1256     ae_vector bndu;
1257     ae_vector bndt;
1258     ae_vector xa;
1259     ae_vector d;
1260     ae_int_t state;
1261     ae_vector xb;
1262     ae_vector bndlb;
1263     ae_vector bndub;
1264     ae_vector bndtb;
1265     ae_vector bndtollb;
1266     ae_vector bndtolub;
1267     ae_vector effc;
1268 } dualsimplexsubproblem;
1269 typedef struct
1270 {
1271     ae_vector rowscales;
1272     ae_vector rawbndl;
1273     ae_vector rawbndu;
1274     ae_int_t ns;
1275     ae_int_t m;
1276     sparsematrix a;
1277     sparsematrix at;
1278     dualsimplexbasis basis;
1279     dualsimplexsubproblem primary;
1280     dualsimplexsubproblem phase1;
1281     dualsimplexsubproblem phase3;
1282     ae_vector repx;
1283     ae_vector replagbc;
1284     ae_vector replaglc;
1285     ae_vector repstats;
1286     ae_int_t repterminationtype;
1287     ae_int_t repiterationscount;
1288     ae_int_t repiterationscount1;
1289     ae_int_t repiterationscount2;
1290     ae_int_t repiterationscount3;
1291     ae_int_t repphase1time;
1292     ae_int_t repphase2time;
1293     ae_int_t repphase3time;
1294     ae_int_t repdualpricingtime;
1295     ae_int_t repdualbtrantime;
1296     ae_int_t repdualpivotrowtime;
1297     ae_int_t repdualratiotesttime;
1298     ae_int_t repdualftrantime;
1299     ae_int_t repdualupdatesteptime;
1300     double repfillpivotrow;
1301     ae_int_t repfillpivotrowcnt;
1302     double repfillrhor;
1303     ae_int_t repfillrhorcnt;
1304     double repfilldensemu;
1305     ae_int_t repfilldensemucnt;
1306     ae_bool dotrace;
1307     ae_bool dodetailedtrace;
1308     ae_bool dotimers;
1309     ae_vector btrantmp0;
1310     ae_vector btrantmp1;
1311     ae_vector btrantmp2;
1312     ae_vector ftrantmp0;
1313     ae_vector ftrantmp1;
1314     ae_vector possibleflips;
1315     ae_int_t possibleflipscnt;
1316     ae_vector dfctmp0;
1317     ae_vector dfctmp1;
1318     ae_vector dfctmp2;
1319     ae_vector ustmpi;
1320     apbuffers xydsbuf;
1321     ae_vector tmp0;
1322     ae_vector tmp1;
1323     ae_vector tmp2;
1324     dssvector alphar;
1325     dssvector rhor;
1326     ae_vector tau;
1327     ae_vector alphaq;
1328     ae_vector alphaqim;
1329     ae_vector eligiblealphar;
1330     ae_vector harrisset;
1331 } dualsimplexstate;
1332 #endif
1333 #if defined(AE_COMPILE_MINLP) || !defined(AE_PARTIAL_BUILD)
1334 typedef struct
1335 {
1336     ae_int_t n;
1337     ae_int_t algokind;
1338     double ipmlambda;
1339     ae_vector s;
1340     ae_vector c;
1341     ae_vector bndl;
1342     ae_vector bndu;
1343     ae_int_t m;
1344     sparsematrix a;
1345     ae_vector al;
1346     ae_vector au;
1347     ae_vector xs;
1348     ae_vector lagbc;
1349     ae_vector laglc;
1350     ae_vector cs;
1351     double repf;
1352     double repprimalerror;
1353     double repdualerror;
1354     double repslackerror;
1355     ae_int_t repiterationscount;
1356     ae_int_t repterminationtype;
1357     ae_int_t repn;
1358     ae_int_t repm;
1359     double dsseps;
1360     double ipmeps;
1361     dualsimplexstate dss;
1362     vipmstate ipm;
1363     ae_vector adddtmpi;
1364     ae_vector adddtmpr;
1365     ae_vector tmpax;
1366     ae_vector tmpg;
1367     presolveinfo presolver;
1368     ae_vector zeroorigin;
1369     ae_vector units;
1370     sparsematrix ipmquadratic;
1371 } minlpstate;
1372 typedef struct
1373 {
1374     double f;
1375     ae_vector lagbc;
1376     ae_vector laglc;
1377     ae_vector y;
1378     ae_vector stats;
1379     double primalerror;
1380     double dualerror;
1381     double slackerror;
1382     ae_int_t iterationscount;
1383     ae_int_t terminationtype;
1384 } minlpreport;
1385 #endif
1386 #if defined(AE_COMPILE_NLCSLP) || !defined(AE_PARTIAL_BUILD)
1387 typedef struct
1388 {
1389     presolveinfo presolver;
1390     dualsimplexstate dss;
1391     dualsimplexsettings dsssettings;
1392     dualsimplexbasis lastbasis;
1393     ae_bool basispresent;
1394     ae_matrix curd;
1395     ae_int_t curdcnt;
1396     ae_vector curb;
1397     ae_vector curbndl;
1398     ae_vector curbndu;
1399     ae_vector cural;
1400     ae_vector curau;
1401     sparsematrix sparserawlc;
1402     sparsematrix sparseefflc;
1403     ae_int_t hessiantype;
1404     ae_matrix h;
1405     ae_matrix curhd;
1406     ae_matrix densedummy;
1407     sparsematrix sparsedummy;
1408     ae_vector tmp0;
1409     ae_vector tmp1;
1410     ae_vector sk;
1411     ae_vector yk;
1412     ae_vector xs;
1413     ae_vector laglc;
1414     ae_vector lagbc;
1415     ae_vector cs;
1416 } minslpsubsolver;
1417 typedef struct
1418 {
1419     ae_vector sclagtmp0;
1420     ae_vector sclagtmp1;
1421 } minslptmplagrangian;
1422 typedef struct
1423 {
1424     ae_vector mftmp0;
1425 } minslptmpmerit;
1426 typedef struct
1427 {
1428     ae_bool usecorrection;
1429     ae_vector d;
1430     ae_vector dx;
1431     ae_vector stepkxc;
1432     ae_vector stepkxn;
1433     ae_vector stepkfic;
1434     ae_vector stepkfin;
1435     ae_matrix stepkjc;
1436     ae_matrix stepkjn;
1437     ae_vector dummylagmult;
1438     minslptmpmerit tmpmerit;
1439     rcommstate rphase13state;
1440 } minslpphase13state;
1441 typedef struct
1442 {
1443     ae_vector stepkxn;
1444     ae_vector stepkxc;
1445     ae_vector stepkfin;
1446     ae_vector stepkfic;
1447     ae_matrix stepkjn;
1448     ae_matrix stepkjc;
1449     ae_vector stepklaggrad;
1450     ae_vector stepknlaggrad;
1451     ae_vector stepknlagmult;
1452     ae_vector meritlagmult;
1453     minslptmplagrangian tmplagrangianfg;
1454     double lastlcerr;
1455     ae_int_t lastlcidx;
1456     double lastnlcerr;
1457     ae_int_t lastnlcidx;
1458     ae_vector tmp0;
1459     ae_vector d;
1460     linminstate mcstate;
1461     minslptmpmerit tmpmerit;
1462     rcommstate rphase2state;
1463 } minslpphase2state;
1464 typedef struct
1465 {
1466     ae_int_t n;
1467     ae_int_t nec;
1468     ae_int_t nic;
1469     ae_int_t nlec;
1470     ae_int_t nlic;
1471     ae_vector s;
1472     ae_matrix scaledcleic;
1473     ae_vector lcsrcidx;
1474     ae_vector hasbndl;
1475     ae_vector hasbndu;
1476     ae_vector scaledbndl;
1477     ae_vector scaledbndu;
1478     double epsx;
1479     ae_int_t maxits;
1480     ae_int_t hessiantype;
1481     ae_vector x;
1482     ae_vector fi;
1483     ae_matrix j;
1484     double f;
1485     ae_bool needfij;
1486     ae_bool xupdated;
1487     minslpphase13state state13;
1488     minslpphase2state state2;
1489     double trustrad;
1490     ae_int_t lpfailurecnt;
1491     ae_int_t fstagnationcnt;
1492     ae_vector step0x;
1493     ae_vector stepkx;
1494     ae_vector backupx;
1495     ae_vector step0fi;
1496     ae_vector stepkfi;
1497     ae_vector backupfi;
1498     ae_matrix step0j;
1499     ae_matrix stepkj;
1500     ae_matrix backupj;
1501     ae_vector meritlagmult;
1502     ae_vector dummylagmult;
1503     ae_vector fscales;
1504     ae_vector meritfunctionhistory;
1505     ae_vector maxlaghistory;
1506     ae_int_t historylen;
1507     minslpsubsolver subsolver;
1508     minslptmpmerit tmpmerit;
1509     ae_int_t repsimplexiterations;
1510     ae_int_t repsimplexiterations1;
1511     ae_int_t repsimplexiterations2;
1512     ae_int_t repsimplexiterations3;
1513     ae_int_t repinneriterationscount;
1514     ae_int_t repouteriterationscount;
1515     ae_int_t repterminationtype;
1516     double repbcerr;
1517     ae_int_t repbcidx;
1518     double replcerr;
1519     ae_int_t replcidx;
1520     double repnlcerr;
1521     ae_int_t repnlcidx;
1522     rcommstate rstate;
1523 } minslpstate;
1524 #endif
1525 #if defined(AE_COMPILE_MINNLC) || !defined(AE_PARTIAL_BUILD)
1526 typedef struct
1527 {
1528     double stabilizingpoint;
1529     double initialinequalitymultiplier;
1530     ae_int_t solvertype;
1531     ae_int_t prectype;
1532     ae_int_t updatefreq;
1533     double rho;
1534     ae_int_t n;
1535     double epsx;
1536     ae_int_t maxits;
1537     ae_int_t aulitscnt;
1538     ae_bool xrep;
1539     double stpmax;
1540     double diffstep;
1541     double teststep;
1542     ae_vector s;
1543     ae_vector bndl;
1544     ae_vector bndu;
1545     ae_vector hasbndl;
1546     ae_vector hasbndu;
1547     ae_int_t nec;
1548     ae_int_t nic;
1549     ae_matrix cleic;
1550     ae_vector lcsrcidx;
1551     ae_int_t ng;
1552     ae_int_t nh;
1553     ae_vector x;
1554     double f;
1555     ae_vector fi;
1556     ae_matrix j;
1557     ae_bool needfij;
1558     ae_bool needfi;
1559     ae_bool xupdated;
1560     rcommstate rstate;
1561     rcommstate rstateaul;
1562     rcommstate rstateslp;
1563     ae_vector scaledbndl;
1564     ae_vector scaledbndu;
1565     ae_matrix scaledcleic;
1566     ae_vector xc;
1567     ae_vector xstart;
1568     ae_vector xbase;
1569     ae_vector fbase;
1570     ae_vector dfbase;
1571     ae_vector fm2;
1572     ae_vector fm1;
1573     ae_vector fp1;
1574     ae_vector fp2;
1575     ae_vector dfm1;
1576     ae_vector dfp1;
1577     ae_vector bufd;
1578     ae_vector bufc;
1579     ae_vector tmp0;
1580     ae_matrix bufw;
1581     ae_matrix bufz;
1582     ae_vector xk;
1583     ae_vector xk1;
1584     ae_vector gk;
1585     ae_vector gk1;
1586     double gammak;
1587     ae_bool xkpresent;
1588     minlbfgsstate auloptimizer;
1589     minlbfgsreport aulreport;
1590     ae_vector nubc;
1591     ae_vector nulc;
1592     ae_vector nunlc;
1593     ae_bool userterminationneeded;
1594     minslpstate slpsolverstate;
1595     minsqpstate sqpsolverstate;
1596     ae_int_t smoothnessguardlevel;
1597     smoothnessmonitor smonitor;
1598     ae_vector lastscaleused;
1599     ae_int_t repinneriterationscount;
1600     ae_int_t repouteriterationscount;
1601     ae_int_t repnfev;
1602     ae_int_t repterminationtype;
1603     double repbcerr;
1604     ae_int_t repbcidx;
1605     double replcerr;
1606     ae_int_t replcidx;
1607     double repnlcerr;
1608     ae_int_t repnlcidx;
1609     ae_int_t repdbgphase0its;
1610 } minnlcstate;
1611 typedef struct
1612 {
1613     ae_int_t iterationscount;
1614     ae_int_t nfev;
1615     ae_int_t terminationtype;
1616     double bcerr;
1617     ae_int_t bcidx;
1618     double lcerr;
1619     ae_int_t lcidx;
1620     double nlcerr;
1621     ae_int_t nlcidx;
1622     ae_int_t dbgphase0its;
1623 } minnlcreport;
1624 #endif
1625 #if defined(AE_COMPILE_MINNS) || !defined(AE_PARTIAL_BUILD)
1626 typedef struct
1627 {
1628     double fc;
1629     double fn;
1630     ae_vector xc;
1631     ae_vector xn;
1632     ae_vector x0;
1633     ae_vector gc;
1634     ae_vector d;
1635     ae_matrix uh;
1636     ae_matrix ch;
1637     ae_matrix rk;
1638     ae_vector invutc;
1639     ae_vector tmp0;
1640     ae_vector tmpidx;
1641     ae_vector tmpd;
1642     ae_vector tmpc;
1643     ae_vector tmplambdas;
1644     ae_matrix tmpc2;
1645     ae_vector tmpb;
1646     snnlssolver nnls;
1647 } minnsqp;
1648 typedef struct
1649 {
1650     ae_int_t solvertype;
1651     ae_int_t n;
1652     double epsx;
1653     ae_int_t maxits;
1654     ae_bool xrep;
1655     double diffstep;
1656     ae_vector s;
1657     ae_vector bndl;
1658     ae_vector bndu;
1659     ae_vector hasbndl;
1660     ae_vector hasbndu;
1661     ae_int_t nec;
1662     ae_int_t nic;
1663     ae_matrix cleic;
1664     ae_int_t ng;
1665     ae_int_t nh;
1666     ae_vector x;
1667     double f;
1668     ae_vector fi;
1669     ae_matrix j;
1670     ae_bool needfij;
1671     ae_bool needfi;
1672     ae_bool xupdated;
1673     rcommstate rstate;
1674     rcommstate rstateags;
1675     hqrndstate agsrs;
1676     double agsradius;
1677     ae_int_t agssamplesize;
1678     double agsraddecay;
1679     double agsalphadecay;
1680     double agsdecrease;
1681     double agsinitstp;
1682     double agsstattold;
1683     double agsshortstpabs;
1684     double agsshortstprel;
1685     double agsshortf;
1686     ae_int_t agsshortlimit;
1687     double agsrhononlinear;
1688     ae_int_t agsminupdate;
1689     ae_int_t agsmaxraddecays;
1690     ae_int_t agsmaxbacktrack;
1691     ae_int_t agsmaxbacktracknonfull;
1692     double agspenaltylevel;
1693     double agspenaltyincrease;
1694     ae_vector xstart;
1695     ae_vector xc;
1696     ae_vector xn;
1697     ae_vector rawg;
1698     ae_vector meritg;
1699     double rawf;
1700     double meritf;
1701     ae_vector d;
1702     ae_vector colmax;
1703     ae_vector diagh;
1704     ae_vector signmin;
1705     ae_vector signmax;
1706     ae_bool userterminationneeded;
1707     ae_vector scaledbndl;
1708     ae_vector scaledbndu;
1709     ae_matrix scaledcleic;
1710     double rholinear;
1711     ae_matrix samplex;
1712     ae_matrix samplegm;
1713     ae_matrix samplegmbc;
1714     ae_vector samplef;
1715     minnsqp nsqp;
1716     ae_vector tmp0;
1717     ae_vector tmp1;
1718     ae_matrix tmp2;
1719     ae_vector tmp3;
1720     ae_vector xbase;
1721     ae_vector fbase;
1722     ae_vector fp;
1723     ae_vector fm;
1724     ae_vector xscaled;
1725     ae_int_t repinneriterationscount;
1726     ae_int_t repouteriterationscount;
1727     ae_int_t repnfev;
1728     ae_int_t repvaridx;
1729     ae_int_t repfuncidx;
1730     ae_int_t repterminationtype;
1731     double replcerr;
1732     double repnlcerr;
1733     ae_int_t dbgncholesky;
1734 } minnsstate;
1735 typedef struct
1736 {
1737     ae_int_t iterationscount;
1738     ae_int_t nfev;
1739     double cerr;
1740     double lcerr;
1741     double nlcerr;
1742     ae_int_t terminationtype;
1743     ae_int_t varidx;
1744     ae_int_t funcidx;
1745 } minnsreport;
1746 #endif
1747 #if defined(AE_COMPILE_MINCOMP) || !defined(AE_PARTIAL_BUILD)
1748 typedef struct
1749 {
1750     ae_int_t n;
1751     double epsg;
1752     double epsf;
1753     double epsx;
1754     ae_int_t maxits;
1755     ae_bool xrep;
1756     double stpmax;
1757     ae_int_t cgtype;
1758     ae_int_t k;
1759     ae_int_t nfev;
1760     ae_int_t mcstage;
1761     ae_vector bndl;
1762     ae_vector bndu;
1763     ae_int_t curalgo;
1764     ae_int_t acount;
1765     double mu;
1766     double finit;
1767     double dginit;
1768     ae_vector ak;
1769     ae_vector xk;
1770     ae_vector dk;
1771     ae_vector an;
1772     ae_vector xn;
1773     ae_vector dn;
1774     ae_vector d;
1775     double fold;
1776     double stp;
1777     ae_vector work;
1778     ae_vector yk;
1779     ae_vector gc;
1780     double laststep;
1781     ae_vector x;
1782     double f;
1783     ae_vector g;
1784     ae_bool needfg;
1785     ae_bool xupdated;
1786     rcommstate rstate;
1787     ae_int_t repiterationscount;
1788     ae_int_t repnfev;
1789     ae_int_t repterminationtype;
1790     ae_int_t debugrestartscount;
1791     linminstate lstate;
1792     double betahs;
1793     double betady;
1794 } minasastate;
1795 typedef struct
1796 {
1797     ae_int_t iterationscount;
1798     ae_int_t nfev;
1799     ae_int_t terminationtype;
1800     ae_int_t activeconstraints;
1801 } minasareport;
1802 #endif
1803 #if defined(AE_COMPILE_MINBC) || !defined(AE_PARTIAL_BUILD)
1804 typedef struct
1805 {
1806     ae_int_t nmain;
1807     double epsg;
1808     double epsf;
1809     double epsx;
1810     ae_int_t maxits;
1811     ae_bool xrep;
1812     double stpmax;
1813     double diffstep;
1814     ae_vector s;
1815     ae_int_t prectype;
1816     ae_vector diagh;
1817     ae_vector x;
1818     double f;
1819     ae_vector g;
1820     ae_bool needf;
1821     ae_bool needfg;
1822     ae_bool xupdated;
1823     ae_bool userterminationneeded;
1824     rcommstate rstate;
1825     ae_vector xc;
1826     ae_vector ugc;
1827     ae_vector cgc;
1828     ae_vector xn;
1829     ae_vector ugn;
1830     ae_vector cgn;
1831     ae_vector xp;
1832     double fc;
1833     double fn;
1834     double fp;
1835     ae_vector d;
1836     double lastscaledgoodstep;
1837     ae_vector hasbndl;
1838     ae_vector hasbndu;
1839     ae_vector bndl;
1840     ae_vector bndu;
1841     ae_int_t repiterationscount;
1842     ae_int_t repnfev;
1843     ae_int_t repvaridx;
1844     ae_int_t repterminationtype;
1845     ae_vector xstart;
1846     double fbase;
1847     double fm2;
1848     double fm1;
1849     double fp1;
1850     double fp2;
1851     double xm1;
1852     double xp1;
1853     double gm1;
1854     double gp1;
1855     ae_vector tmpprec;
1856     ae_vector tmp0;
1857     ae_int_t nfev;
1858     ae_int_t mcstage;
1859     double stp;
1860     double curstpmax;
1861     ae_vector work;
1862     linminstate lstate;
1863     double trimthreshold;
1864     ae_int_t nonmonotoniccnt;
1865     ae_matrix bufyk;
1866     ae_matrix bufsk;
1867     ae_vector bufrho;
1868     ae_vector buftheta;
1869     ae_int_t bufsize;
1870     double teststep;
1871     ae_int_t smoothnessguardlevel;
1872     smoothnessmonitor smonitor;
1873     ae_vector lastscaleused;
1874     ae_vector invs;
1875 } minbcstate;
1876 typedef struct
1877 {
1878     ae_int_t iterationscount;
1879     ae_int_t nfev;
1880     ae_int_t varidx;
1881     ae_int_t terminationtype;
1882 } minbcreport;
1883 #endif
1884 #if defined(AE_COMPILE_OPTS) || !defined(AE_PARTIAL_BUILD)
1885 typedef struct
1886 {
1887     ae_int_t n;
1888     ae_bool hasknowntarget;
1889     double targetf;
1890     ae_vector s;
1891     ae_vector c;
1892     ae_vector bndl;
1893     ae_vector bndu;
1894     ae_int_t m;
1895     sparsematrix a;
1896     ae_vector al;
1897     ae_vector au;
1898 } lptestproblem;
1899 #endif
1900 
1901 }
1902 
1903 /////////////////////////////////////////////////////////////////////////
1904 //
1905 // THIS SECTION CONTAINS C++ INTERFACE
1906 //
1907 /////////////////////////////////////////////////////////////////////////
1908 namespace alglib
1909 {
1910 
1911 #if defined(AE_COMPILE_OPTGUARDAPI) || !defined(AE_PARTIAL_BUILD)
1912 /*************************************************************************
1913 This structure is used to store  OptGuard  report,  i.e.  report  on   the
1914 properties of the nonlinear function being optimized with ALGLIB.
1915 
1916 After you tell your optimizer to activate OptGuard  this technology starts
1917 to silently monitor function values and gradients/Jacobians  being  passed
1918 all around during your optimization session. Depending on specific set  of
1919 checks enabled OptGuard may perform additional function evaluations  (say,
1920 about 3*N evaluations if you want to check analytic gradient for errors).
1921 
1922 Upon discovering that something strange happens  (function  values  and/or
1923 gradient components change too sharply and/or unexpectedly) OptGuard  sets
1924 one of the "suspicion  flags" (without interrupting optimization session).
1925 After optimization is done, you can examine OptGuard report.
1926 
1927 Following report fields can be set:
1928 * nonc0suspected
1929 * nonc1suspected
1930 * badgradsuspected
1931 
1932 
1933 === WHAT CAN BE DETECTED WITH OptGuard INTEGRITY CHECKER =================
1934 
1935 Following  types  of  errors  in your target function (constraints) can be
1936 caught:
1937 a) discontinuous functions ("non-C0" part of the report)
1938 b) functions with discontinuous derivative ("non-C1" part of the report)
1939 c) errors in the analytic gradient provided by user
1940 
1941 These types of errors result in optimizer  stopping  well  before reaching
1942 solution (most often - right after encountering discontinuity).
1943 
1944 Type A errors are usually  coding  errors  during  implementation  of  the
1945 target function. Most "normal" problems involve continuous functions,  and
1946 anyway you can't reliably optimize discontinuous function.
1947 
1948 Type B errors are either coding errors or (in case code itself is correct)
1949 evidence of the fact  that  your  problem  is  an  "incorrect"  one.  Most
1950 optimizers (except for ones provided by MINNS subpackage) do  not  support
1951 nonsmooth problems.
1952 
1953 Type C errors are coding errors which often prevent optimizer from  making
1954 even one step  or result in optimizing stopping  too  early,  as  soon  as
1955 actual descent direction becomes too different from one suggested by user-
1956 supplied gradient.
1957 
1958 
1959 === WHAT IS REPORTED =====================================================
1960 
1961 Following set of report fields deals with discontinuous  target functions,
1962 ones not belonging to C0 continuity class:
1963 
1964 * nonc0suspected - is a flag which is set upon discovering some indication
1965   of the discontinuity. If this flag is false, the rest of "non-C0" fields
1966   should be ignored
1967 * nonc0fidx - is an index of the function (0 for  target  function,  1  or
1968   higher for nonlinear constraints) which is suspected of being "non-C0"
1969 * nonc0lipshitzc - a Lipchitz constant for a function which was  suspected
1970   of being non-continuous.
1971 * nonc0test0positive -  set  to  indicate  specific  test  which  detected
1972   continuity violation (test #0)
1973 
1974 Following set of report fields deals with discontinuous gradient/Jacobian,
1975 i.e. with functions violating C1 continuity:
1976 
1977 * nonc1suspected - is a flag which is set upon discovering some indication
1978   of the discontinuity. If this flag is false, the rest of "non-C1" fields
1979   should be ignored
1980 * nonc1fidx - is an index of the function (0 for  target  function,  1  or
1981   higher for nonlinear constraints) which is suspected of being "non-C1"
1982 * nonc1lipshitzc - a Lipchitz constant for a function gradient  which  was
1983   suspected of being non-smooth.
1984 * nonc1test0positive -  set  to  indicate  specific  test  which  detected
1985   continuity violation (test #0)
1986 * nonc1test1positive -  set  to  indicate  specific  test  which  detected
1987   continuity violation (test #1)
1988 
1989 Following set of report fields deals with errors in the gradient:
1990 * badgradsuspected - is a flad which is set upon discovering an  error  in
1991   the analytic gradient supplied by user
1992 * badgradfidx - index  of   the  function  with bad gradient (0 for target
1993   function, 1 or higher for nonlinear constraints)
1994 * badgradvidx - index of the variable
1995 * badgradxbase - location where Jacobian is tested
1996 * following  matrices  store  user-supplied  Jacobian  and  its  numerical
1997   differentiation version (which is assumed to be  free  from  the  coding
1998   errors), both of them computed near the initial point:
1999   * badgraduser, an array[K,N], analytic Jacobian supplied by user
2000   * badgradnum,  an array[K,N], numeric  Jacobian computed by ALGLIB
2001   Here K is a total number of  nonlinear  functions  (target  +  nonlinear
2002   constraints), N is a variable number.
2003   The  element  of  badgraduser[] with index [badgradfidx,badgradvidx]  is
2004   assumed to be wrong.
2005 
2006 More detailed error log can  be  obtained  from  optimizer  by  explicitly
2007 requesting reports for tests C0.0, C1.0, C1.1.
2008 
2009   -- ALGLIB --
2010      Copyright 19.11.2018 by Bochkanov Sergey
2011 *************************************************************************/
2012 class _optguardreport_owner
2013 {
2014 public:
2015     _optguardreport_owner();
2016     _optguardreport_owner(const _optguardreport_owner &rhs);
2017     _optguardreport_owner& operator=(const _optguardreport_owner &rhs);
2018     virtual ~_optguardreport_owner();
2019     alglib_impl::optguardreport* c_ptr();
2020     alglib_impl::optguardreport* c_ptr() const;
2021 protected:
2022     alglib_impl::optguardreport *p_struct;
2023 };
2024 class optguardreport : public _optguardreport_owner
2025 {
2026 public:
2027     optguardreport();
2028     optguardreport(const optguardreport &rhs);
2029     optguardreport& operator=(const optguardreport &rhs);
2030     virtual ~optguardreport();
2031     ae_bool &nonc0suspected;
2032     ae_bool &nonc0test0positive;
2033     ae_int_t &nonc0fidx;
2034     double &nonc0lipschitzc;
2035     ae_bool &nonc1suspected;
2036     ae_bool &nonc1test0positive;
2037     ae_bool &nonc1test1positive;
2038     ae_int_t &nonc1fidx;
2039     double &nonc1lipschitzc;
2040     ae_bool &badgradsuspected;
2041     ae_int_t &badgradfidx;
2042     ae_int_t &badgradvidx;
2043     real_1d_array badgradxbase;
2044     real_2d_array badgraduser;
2045     real_2d_array badgradnum;
2046 
2047 };
2048 
2049 
2050 /*************************************************************************
2051 This  structure  is  used  for  detailed   reporting  about  suspected  C0
2052 continuity violation.
2053 
2054 === WHAT IS TESTED =======================================================
2055 
2056 C0 test  studies  function  values (not gradient!)  obtained  during  line
2057 searches and monitors estimate of the Lipschitz  constant.  Sudden  spikes
2058 usually indicate that discontinuity was detected.
2059 
2060 
2061 === WHAT IS REPORTED =====================================================
2062 
2063 Actually, report retrieval function returns TWO report structures:
2064 
2065 * one for most suspicious point found so far (one with highest  change  in
2066   the function value), so called "strongest" report
2067 * another one for most detailed line search (more function  evaluations  =
2068   easier to understand what's going on) which triggered  test #0 criteria,
2069   so called "longest" report
2070 
2071 In both cases following fields are returned:
2072 
2073 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
2074   did not notice anything (in the latter cases fields below are empty).
2075 * fidx - is an index of the function (0 for  target  function, 1 or higher
2076   for nonlinear constraints) which is suspected of being "non-C1"
2077 * x0[], d[] - arrays of length N which store initial point  and  direction
2078   for line search (d[] can be normalized, but does not have to)
2079 * stp[], f[] - arrays of length CNT which store step lengths and  function
2080   values at these points; f[i] is evaluated in x0+stp[i]*d.
2081 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
2082   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
2083   with  most  likely  position  of  the  violation  between  stpidxa+1 and
2084   stpidxa+2.
2085 
2086 You can plot function values stored in stp[]  and  f[]  arrays  and  study
2087 behavior of your function by your own eyes, just  to  be  sure  that  test
2088 correctly reported C1 violation.
2089 
2090   -- ALGLIB --
2091      Copyright 19.11.2018 by Bochkanov Sergey
2092 *************************************************************************/
2093 class _optguardnonc0report_owner
2094 {
2095 public:
2096     _optguardnonc0report_owner();
2097     _optguardnonc0report_owner(const _optguardnonc0report_owner &rhs);
2098     _optguardnonc0report_owner& operator=(const _optguardnonc0report_owner &rhs);
2099     virtual ~_optguardnonc0report_owner();
2100     alglib_impl::optguardnonc0report* c_ptr();
2101     alglib_impl::optguardnonc0report* c_ptr() const;
2102 protected:
2103     alglib_impl::optguardnonc0report *p_struct;
2104 };
2105 class optguardnonc0report : public _optguardnonc0report_owner
2106 {
2107 public:
2108     optguardnonc0report();
2109     optguardnonc0report(const optguardnonc0report &rhs);
2110     optguardnonc0report& operator=(const optguardnonc0report &rhs);
2111     virtual ~optguardnonc0report();
2112     ae_bool &positive;
2113     ae_int_t &fidx;
2114     real_1d_array x0;
2115     real_1d_array d;
2116     ae_int_t &n;
2117     real_1d_array stp;
2118     real_1d_array f;
2119     ae_int_t &cnt;
2120     ae_int_t &stpidxa;
2121     ae_int_t &stpidxb;
2122 
2123 };
2124 
2125 
2126 /*************************************************************************
2127 This  structure  is  used  for  detailed   reporting  about  suspected  C1
2128 continuity violation as flagged by C1 test #0 (OptGuard  has several tests
2129 for C1 continuity, this report is used by #0).
2130 
2131 === WHAT IS TESTED =======================================================
2132 
2133 C1 test #0 studies function values (not gradient!)  obtained  during  line
2134 searches and monitors behavior of directional  derivative  estimate.  This
2135 test is less powerful than test #1, but it does  not  depend  on  gradient
2136 values  and  thus  it  is  more  robust  against  artifacts  introduced by
2137 numerical differentiation.
2138 
2139 
2140 === WHAT IS REPORTED =====================================================
2141 
2142 Actually, report retrieval function returns TWO report structures:
2143 
2144 * one for most suspicious point found so far (one with highest  change  in
2145   the directional derivative), so called "strongest" report
2146 * another one for most detailed line search (more function  evaluations  =
2147   easier to understand what's going on) which triggered  test #0 criteria,
2148   so called "longest" report
2149 
2150 In both cases following fields are returned:
2151 
2152 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
2153   did not notice anything (in the latter cases fields below are empty).
2154 * fidx - is an index of the function (0 for  target  function, 1 or higher
2155   for nonlinear constraints) which is suspected of being "non-C1"
2156 * x0[], d[] - arrays of length N which store initial point  and  direction
2157   for line search (d[] can be normalized, but does not have to)
2158 * stp[], f[] - arrays of length CNT which store step lengths and  function
2159   values at these points; f[i] is evaluated in x0+stp[i]*d.
2160 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
2161   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
2162   with  most  likely  position  of  the  violation  between  stpidxa+1 and
2163   stpidxa+2.
2164 
2165 You can plot function values stored in stp[]  and  f[]  arrays  and  study
2166 behavior of your function by your own eyes, just  to  be  sure  that  test
2167 correctly reported C1 violation.
2168 
2169   -- ALGLIB --
2170      Copyright 19.11.2018 by Bochkanov Sergey
2171 *************************************************************************/
2172 class _optguardnonc1test0report_owner
2173 {
2174 public:
2175     _optguardnonc1test0report_owner();
2176     _optguardnonc1test0report_owner(const _optguardnonc1test0report_owner &rhs);
2177     _optguardnonc1test0report_owner& operator=(const _optguardnonc1test0report_owner &rhs);
2178     virtual ~_optguardnonc1test0report_owner();
2179     alglib_impl::optguardnonc1test0report* c_ptr();
2180     alglib_impl::optguardnonc1test0report* c_ptr() const;
2181 protected:
2182     alglib_impl::optguardnonc1test0report *p_struct;
2183 };
2184 class optguardnonc1test0report : public _optguardnonc1test0report_owner
2185 {
2186 public:
2187     optguardnonc1test0report();
2188     optguardnonc1test0report(const optguardnonc1test0report &rhs);
2189     optguardnonc1test0report& operator=(const optguardnonc1test0report &rhs);
2190     virtual ~optguardnonc1test0report();
2191     ae_bool &positive;
2192     ae_int_t &fidx;
2193     real_1d_array x0;
2194     real_1d_array d;
2195     ae_int_t &n;
2196     real_1d_array stp;
2197     real_1d_array f;
2198     ae_int_t &cnt;
2199     ae_int_t &stpidxa;
2200     ae_int_t &stpidxb;
2201 
2202 };
2203 
2204 
2205 /*************************************************************************
2206 This  structure  is  used  for  detailed   reporting  about  suspected  C1
2207 continuity violation as flagged by C1 test #1 (OptGuard  has several tests
2208 for C1 continuity, this report is used by #1).
2209 
2210 === WHAT IS TESTED =======================================================
2211 
2212 C1 test #1 studies individual  components  of  the  gradient  as  recorded
2213 during line searches. Upon discovering discontinuity in the gradient  this
2214 test records specific component which was suspected (or  one  with  highest
2215 indication of discontinuity if multiple components are suspected).
2216 
2217 When precise analytic gradient is provided this test is more powerful than
2218 test #0  which  works  with  function  values  and  ignores  user-provided
2219 gradient.  However,  test  #0  becomes  more   powerful   when   numerical
2220 differentiation is employed (in such cases test #1 detects  higher  levels
2221 of numerical noise and becomes too conservative).
2222 
2223 This test also tells specific components of the gradient which violate  C1
2224 continuity, which makes it more informative than #0, which just tells that
2225 continuity is violated.
2226 
2227 
2228 === WHAT IS REPORTED =====================================================
2229 
2230 Actually, report retrieval function returns TWO report structures:
2231 
2232 * one for most suspicious point found so far (one with highest  change  in
2233   the directional derivative), so called "strongest" report
2234 * another one for most detailed line search (more function  evaluations  =
2235   easier to understand what's going on) which triggered  test #1 criteria,
2236   so called "longest" report
2237 
2238 In both cases following fields are returned:
2239 
2240 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
2241   did not notice anything (in the latter cases fields below are empty).
2242 * fidx - is an index of the function (0 for  target  function, 1 or higher
2243   for nonlinear constraints) which is suspected of being "non-C1"
2244 * vidx - is an index of the variable in [0,N) with nonsmooth derivative
2245 * x0[], d[] - arrays of length N which store initial point  and  direction
2246   for line search (d[] can be normalized, but does not have to)
2247 * stp[], g[] - arrays of length CNT which store step lengths and  gradient
2248   values at these points; g[i] is evaluated in  x0+stp[i]*d  and  contains
2249   vidx-th component of the gradient.
2250 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
2251   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
2252   with  most  likely  position  of  the  violation  between  stpidxa+1 and
2253   stpidxa+2.
2254 
2255 You can plot function values stored in stp[]  and  g[]  arrays  and  study
2256 behavior of your function by your own eyes, just  to  be  sure  that  test
2257 correctly reported C1 violation.
2258 
2259   -- ALGLIB --
2260      Copyright 19.11.2018 by Bochkanov Sergey
2261 *************************************************************************/
2262 class _optguardnonc1test1report_owner
2263 {
2264 public:
2265     _optguardnonc1test1report_owner();
2266     _optguardnonc1test1report_owner(const _optguardnonc1test1report_owner &rhs);
2267     _optguardnonc1test1report_owner& operator=(const _optguardnonc1test1report_owner &rhs);
2268     virtual ~_optguardnonc1test1report_owner();
2269     alglib_impl::optguardnonc1test1report* c_ptr();
2270     alglib_impl::optguardnonc1test1report* c_ptr() const;
2271 protected:
2272     alglib_impl::optguardnonc1test1report *p_struct;
2273 };
2274 class optguardnonc1test1report : public _optguardnonc1test1report_owner
2275 {
2276 public:
2277     optguardnonc1test1report();
2278     optguardnonc1test1report(const optguardnonc1test1report &rhs);
2279     optguardnonc1test1report& operator=(const optguardnonc1test1report &rhs);
2280     virtual ~optguardnonc1test1report();
2281     ae_bool &positive;
2282     ae_int_t &fidx;
2283     ae_int_t &vidx;
2284     real_1d_array x0;
2285     real_1d_array d;
2286     ae_int_t &n;
2287     real_1d_array stp;
2288     real_1d_array g;
2289     ae_int_t &cnt;
2290     ae_int_t &stpidxa;
2291     ae_int_t &stpidxb;
2292 
2293 };
2294 #endif
2295 
2296 #if defined(AE_COMPILE_OPTSERV) || !defined(AE_PARTIAL_BUILD)
2297 
2298 #endif
2299 
2300 #if defined(AE_COMPILE_MINLBFGS) || !defined(AE_PARTIAL_BUILD)
2301 /*************************************************************************
2302 
2303 *************************************************************************/
2304 class _minlbfgsstate_owner
2305 {
2306 public:
2307     _minlbfgsstate_owner();
2308     _minlbfgsstate_owner(const _minlbfgsstate_owner &rhs);
2309     _minlbfgsstate_owner& operator=(const _minlbfgsstate_owner &rhs);
2310     virtual ~_minlbfgsstate_owner();
2311     alglib_impl::minlbfgsstate* c_ptr();
2312     alglib_impl::minlbfgsstate* c_ptr() const;
2313 protected:
2314     alglib_impl::minlbfgsstate *p_struct;
2315 };
2316 class minlbfgsstate : public _minlbfgsstate_owner
2317 {
2318 public:
2319     minlbfgsstate();
2320     minlbfgsstate(const minlbfgsstate &rhs);
2321     minlbfgsstate& operator=(const minlbfgsstate &rhs);
2322     virtual ~minlbfgsstate();
2323     ae_bool &needf;
2324     ae_bool &needfg;
2325     ae_bool &xupdated;
2326     double &f;
2327     real_1d_array g;
2328     real_1d_array x;
2329 
2330 };
2331 
2332 
2333 /*************************************************************************
2334 This structure stores optimization report:
2335 * IterationsCount           total number of inner iterations
2336 * NFEV                      number of gradient evaluations
2337 * TerminationType           termination type (see below)
2338 
2339 TERMINATION CODES
2340 
2341 TerminationType field contains completion code, which can be:
2342   -8    internal integrity control detected  infinite  or  NAN  values  in
2343         function/gradient. Abnormal termination signalled.
2344    1    relative function improvement is no more than EpsF.
2345    2    relative step is no more than EpsX.
2346    4    gradient norm is no more than EpsG
2347    5    MaxIts steps was taken
2348    7    stopping conditions are too stringent,
2349         further improvement is impossible,
2350         X contains best point found so far.
2351    8    terminated    by  user  who  called  minlbfgsrequesttermination().
2352         X contains point which was   "current accepted"  when  termination
2353         request was submitted.
2354 
2355 Other fields of this structure are not documented and should not be used!
2356 *************************************************************************/
2357 class _minlbfgsreport_owner
2358 {
2359 public:
2360     _minlbfgsreport_owner();
2361     _minlbfgsreport_owner(const _minlbfgsreport_owner &rhs);
2362     _minlbfgsreport_owner& operator=(const _minlbfgsreport_owner &rhs);
2363     virtual ~_minlbfgsreport_owner();
2364     alglib_impl::minlbfgsreport* c_ptr();
2365     alglib_impl::minlbfgsreport* c_ptr() const;
2366 protected:
2367     alglib_impl::minlbfgsreport *p_struct;
2368 };
2369 class minlbfgsreport : public _minlbfgsreport_owner
2370 {
2371 public:
2372     minlbfgsreport();
2373     minlbfgsreport(const minlbfgsreport &rhs);
2374     minlbfgsreport& operator=(const minlbfgsreport &rhs);
2375     virtual ~minlbfgsreport();
2376     ae_int_t &iterationscount;
2377     ae_int_t &nfev;
2378     ae_int_t &terminationtype;
2379 
2380 };
2381 #endif
2382 
2383 #if defined(AE_COMPILE_CQMODELS) || !defined(AE_PARTIAL_BUILD)
2384 
2385 #endif
2386 
2387 #if defined(AE_COMPILE_LPQPSERV) || !defined(AE_PARTIAL_BUILD)
2388 
2389 #endif
2390 
2391 #if defined(AE_COMPILE_SNNLS) || !defined(AE_PARTIAL_BUILD)
2392 
2393 #endif
2394 
2395 #if defined(AE_COMPILE_SACTIVESETS) || !defined(AE_PARTIAL_BUILD)
2396 
2397 #endif
2398 
2399 #if defined(AE_COMPILE_QQPSOLVER) || !defined(AE_PARTIAL_BUILD)
2400 
2401 #endif
2402 
2403 #if defined(AE_COMPILE_QPDENSEAULSOLVER) || !defined(AE_PARTIAL_BUILD)
2404 
2405 #endif
2406 
2407 #if defined(AE_COMPILE_MINBLEIC) || !defined(AE_PARTIAL_BUILD)
2408 /*************************************************************************
2409 This object stores nonlinear optimizer state.
2410 You should use functions provided by MinBLEIC subpackage to work with this
2411 object
2412 *************************************************************************/
2413 class _minbleicstate_owner
2414 {
2415 public:
2416     _minbleicstate_owner();
2417     _minbleicstate_owner(const _minbleicstate_owner &rhs);
2418     _minbleicstate_owner& operator=(const _minbleicstate_owner &rhs);
2419     virtual ~_minbleicstate_owner();
2420     alglib_impl::minbleicstate* c_ptr();
2421     alglib_impl::minbleicstate* c_ptr() const;
2422 protected:
2423     alglib_impl::minbleicstate *p_struct;
2424 };
2425 class minbleicstate : public _minbleicstate_owner
2426 {
2427 public:
2428     minbleicstate();
2429     minbleicstate(const minbleicstate &rhs);
2430     minbleicstate& operator=(const minbleicstate &rhs);
2431     virtual ~minbleicstate();
2432     ae_bool &needf;
2433     ae_bool &needfg;
2434     ae_bool &xupdated;
2435     double &f;
2436     real_1d_array g;
2437     real_1d_array x;
2438 
2439 };
2440 
2441 
2442 /*************************************************************************
2443 This structure stores optimization report:
2444 * IterationsCount           number of iterations
2445 * NFEV                      number of gradient evaluations
2446 * TerminationType           termination type (see below)
2447 
2448 TERMINATION CODES
2449 
2450 TerminationType field contains completion code, which can be:
2451   -8    internal integrity control detected  infinite  or  NAN  values  in
2452         function/gradient. Abnormal termination signalled.
2453   -3    inconsistent constraints. Feasible point is
2454         either nonexistent or too hard to find. Try to
2455         restart optimizer with better initial approximation
2456    1    relative function improvement is no more than EpsF.
2457    2    relative step is no more than EpsX.
2458    4    gradient norm is no more than EpsG
2459    5    MaxIts steps was taken
2460    7    stopping conditions are too stringent,
2461         further improvement is impossible,
2462         X contains best point found so far.
2463    8    terminated by user who called minbleicrequesttermination(). X contains
2464         point which was "current accepted" when  termination  request  was
2465         submitted.
2466 
2467 ADDITIONAL FIELDS
2468 
2469 There are additional fields which can be used for debugging:
2470 * DebugEqErr                error in the equality constraints (2-norm)
2471 * DebugFS                   f, calculated at projection of initial point
2472                             to the feasible set
2473 * DebugFF                   f, calculated at the final point
2474 * DebugDX                   |X_start-X_final|
2475 *************************************************************************/
2476 class _minbleicreport_owner
2477 {
2478 public:
2479     _minbleicreport_owner();
2480     _minbleicreport_owner(const _minbleicreport_owner &rhs);
2481     _minbleicreport_owner& operator=(const _minbleicreport_owner &rhs);
2482     virtual ~_minbleicreport_owner();
2483     alglib_impl::minbleicreport* c_ptr();
2484     alglib_impl::minbleicreport* c_ptr() const;
2485 protected:
2486     alglib_impl::minbleicreport *p_struct;
2487 };
2488 class minbleicreport : public _minbleicreport_owner
2489 {
2490 public:
2491     minbleicreport();
2492     minbleicreport(const minbleicreport &rhs);
2493     minbleicreport& operator=(const minbleicreport &rhs);
2494     virtual ~minbleicreport();
2495     ae_int_t &iterationscount;
2496     ae_int_t &nfev;
2497     ae_int_t &varidx;
2498     ae_int_t &terminationtype;
2499     double &debugeqerr;
2500     double &debugfs;
2501     double &debugff;
2502     double &debugdx;
2503     ae_int_t &debugfeasqpits;
2504     ae_int_t &debugfeasgpaits;
2505     ae_int_t &inneriterationscount;
2506     ae_int_t &outeriterationscount;
2507 
2508 };
2509 #endif
2510 
2511 #if defined(AE_COMPILE_QPBLEICSOLVER) || !defined(AE_PARTIAL_BUILD)
2512 
2513 #endif
2514 
2515 #if defined(AE_COMPILE_VIPMSOLVER) || !defined(AE_PARTIAL_BUILD)
2516 
2517 #endif
2518 
2519 #if defined(AE_COMPILE_MINQP) || !defined(AE_PARTIAL_BUILD)
2520 /*************************************************************************
2521 This object stores nonlinear optimizer state.
2522 You should use functions provided by MinQP subpackage to work with this
2523 object
2524 *************************************************************************/
2525 class _minqpstate_owner
2526 {
2527 public:
2528     _minqpstate_owner();
2529     _minqpstate_owner(const _minqpstate_owner &rhs);
2530     _minqpstate_owner& operator=(const _minqpstate_owner &rhs);
2531     virtual ~_minqpstate_owner();
2532     alglib_impl::minqpstate* c_ptr();
2533     alglib_impl::minqpstate* c_ptr() const;
2534 protected:
2535     alglib_impl::minqpstate *p_struct;
2536 };
2537 class minqpstate : public _minqpstate_owner
2538 {
2539 public:
2540     minqpstate();
2541     minqpstate(const minqpstate &rhs);
2542     minqpstate& operator=(const minqpstate &rhs);
2543     virtual ~minqpstate();
2544 
2545 };
2546 
2547 
2548 /*************************************************************************
2549 This structure stores optimization report:
2550 * InnerIterationsCount      number of inner iterations
2551 * OuterIterationsCount      number of outer iterations
2552 * NCholesky                 number of Cholesky decomposition
2553 * NMV                       number of matrix-vector products
2554                             (only products calculated as part of iterative
2555                             process are counted)
2556 * TerminationType           completion code (see below)
2557 * LagBC                     Lagrange multipliers for box constraints,
2558                             array[N], not filled by QP-BLEIC solver
2559 * LagLC                     Lagrange multipliers for linear constraints,
2560                             array[MSparse+MDense], ignored by QP-BLEIC solver
2561 
2562 === COMPLETION CODES =====================================================
2563 
2564 Completion codes:
2565 * -9    failure of the automatic scale evaluation:  one  of  the  diagonal
2566         elements of the quadratic term is non-positive.  Specify  variable
2567         scales manually!
2568 * -5    inappropriate solver was used:
2569         * QuickQP solver for problem with general linear constraints (dense/sparse)
2570 * -4    BLEIC-QP or QuickQP solver found unconstrained direction
2571         of negative curvature (function is unbounded from
2572         below  even  under  constraints),  no  meaningful
2573         minimum can be found.
2574 * -3    inconsistent constraints (or, maybe, feasible point is
2575         too hard to find). If you are sure that constraints are feasible,
2576         try to restart optimizer with better initial approximation.
2577 * -2    IPM solver has difficulty finding primal/dual feasible point.
2578         It is likely that the problem is either infeasible or unbounded,
2579         but it is difficult to determine exact reason for termination.
2580         X contains best point found so far.
2581 *  1..4 successful completion
2582 *  5    MaxIts steps was taken
2583 *  7    stopping conditions are too stringent,
2584         further improvement is impossible,
2585         X contains best point found so far.
2586 
2587 === LAGRANGE MULTIPLIERS =================================================
2588 
2589 Some  optimizers  report  values of  Lagrange  multipliers  on  successful
2590 completion (positive completion code):
2591 * DENSE-IPM-QP and SPARSE-IPM-QP return very precise Lagrange  multipliers
2592   as determined during solution process.
2593 * DENSE-AUL-QP returns approximate Lagrange multipliers  (which  are  very
2594   close to "true"  Lagrange  multipliers  except  for  overconstrained  or
2595   degenerate problems)
2596 
2597 Two arrays of multipliers are returned:
2598 * LagBC is array[N] which is loaded with multipliers from box constraints;
2599   LagBC[i]>0 means that I-th constraint is at the  upper bound, LagBC[I]<0
2600   means that I-th constraint is at the lower bound, LagBC[I]=0 means  that
2601   I-th box constraint is inactive.
2602 * LagLC is array[MSparse+MDense] which is  loaded  with  multipliers  from
2603   general  linear  constraints  (former  MSparse  elements  corresponds to
2604   sparse part of the constraint matrix, latter MDense are  for  the  dense
2605   constraints, as was specified by user).
2606   LagLC[i]>0 means that I-th constraint at  the  upper  bound,  LagLC[i]<0
2607   means that I-th constraint is at the lower bound, LagLC[i]=0 means  that
2608   I-th linear constraint is inactive.
2609 
2610 On failure (or when optimizer does not support Lagrange multipliers) these
2611 arrays are zero-filled.
2612 
2613 It is expected that at solution the dual feasibility condition holds:
2614 
2615     C+H*(Xs-X0) + SUM(Ei*LagBC[i],i=0..n-1) + SUM(Ai*LagLC[i],i=0..m-1) ~ 0
2616 
2617 where
2618 * C is a linear term
2619 * H is a quadratic term
2620 * Xs is a solution, and X0 is an origin term (zero by default)
2621 * Ei is a vector with 1.0 at position I and 0 in other positions
2622 * Ai is an I-th row of linear constraint matrix
2623 
2624 NOTE: methods  from  IPM  family  may  also  return  meaningful   Lagrange
2625       multipliers  on  completion   with   code   -2   (infeasibility   or
2626       unboundedness  detected).
2627 *************************************************************************/
2628 class _minqpreport_owner
2629 {
2630 public:
2631     _minqpreport_owner();
2632     _minqpreport_owner(const _minqpreport_owner &rhs);
2633     _minqpreport_owner& operator=(const _minqpreport_owner &rhs);
2634     virtual ~_minqpreport_owner();
2635     alglib_impl::minqpreport* c_ptr();
2636     alglib_impl::minqpreport* c_ptr() const;
2637 protected:
2638     alglib_impl::minqpreport *p_struct;
2639 };
2640 class minqpreport : public _minqpreport_owner
2641 {
2642 public:
2643     minqpreport();
2644     minqpreport(const minqpreport &rhs);
2645     minqpreport& operator=(const minqpreport &rhs);
2646     virtual ~minqpreport();
2647     ae_int_t &inneriterationscount;
2648     ae_int_t &outeriterationscount;
2649     ae_int_t &nmv;
2650     ae_int_t &ncholesky;
2651     ae_int_t &terminationtype;
2652     real_1d_array lagbc;
2653     real_1d_array laglc;
2654 
2655 };
2656 #endif
2657 
2658 #if defined(AE_COMPILE_MINLM) || !defined(AE_PARTIAL_BUILD)
2659 /*************************************************************************
2660 Levenberg-Marquardt optimizer.
2661 
2662 This structure should be created using one of the MinLMCreate???()
2663 functions. You should not access its fields directly; use ALGLIB functions
2664 to work with it.
2665 *************************************************************************/
2666 class _minlmstate_owner
2667 {
2668 public:
2669     _minlmstate_owner();
2670     _minlmstate_owner(const _minlmstate_owner &rhs);
2671     _minlmstate_owner& operator=(const _minlmstate_owner &rhs);
2672     virtual ~_minlmstate_owner();
2673     alglib_impl::minlmstate* c_ptr();
2674     alglib_impl::minlmstate* c_ptr() const;
2675 protected:
2676     alglib_impl::minlmstate *p_struct;
2677 };
2678 class minlmstate : public _minlmstate_owner
2679 {
2680 public:
2681     minlmstate();
2682     minlmstate(const minlmstate &rhs);
2683     minlmstate& operator=(const minlmstate &rhs);
2684     virtual ~minlmstate();
2685     ae_bool &needf;
2686     ae_bool &needfg;
2687     ae_bool &needfgh;
2688     ae_bool &needfi;
2689     ae_bool &needfij;
2690     ae_bool &xupdated;
2691     double &f;
2692     real_1d_array fi;
2693     real_1d_array g;
2694     real_2d_array h;
2695     real_2d_array j;
2696     real_1d_array x;
2697 
2698 };
2699 
2700 
2701 /*************************************************************************
2702 Optimization report, filled by MinLMResults() function
2703 
2704 FIELDS:
2705 * TerminationType, completetion code:
2706     * -8    optimizer detected NAN/INF values either in the function itself,
2707             or in its Jacobian
2708     * -5    inappropriate solver was used:
2709             * solver created with minlmcreatefgh() used  on  problem  with
2710               general linear constraints (set with minlmsetlc() call).
2711     * -3    constraints are inconsistent
2712     *  2    relative step is no more than EpsX.
2713     *  5    MaxIts steps was taken
2714     *  7    stopping conditions are too stringent,
2715             further improvement is impossible
2716     *  8    terminated   by  user  who  called  MinLMRequestTermination().
2717             X contains point which was "current accepted" when termination
2718             request was submitted.
2719 * IterationsCount, contains iterations count
2720 * NFunc, number of function calculations
2721 * NJac, number of Jacobi matrix calculations
2722 * NGrad, number of gradient calculations
2723 * NHess, number of Hessian calculations
2724 * NCholesky, number of Cholesky decomposition calculations
2725 *************************************************************************/
2726 class _minlmreport_owner
2727 {
2728 public:
2729     _minlmreport_owner();
2730     _minlmreport_owner(const _minlmreport_owner &rhs);
2731     _minlmreport_owner& operator=(const _minlmreport_owner &rhs);
2732     virtual ~_minlmreport_owner();
2733     alglib_impl::minlmreport* c_ptr();
2734     alglib_impl::minlmreport* c_ptr() const;
2735 protected:
2736     alglib_impl::minlmreport *p_struct;
2737 };
2738 class minlmreport : public _minlmreport_owner
2739 {
2740 public:
2741     minlmreport();
2742     minlmreport(const minlmreport &rhs);
2743     minlmreport& operator=(const minlmreport &rhs);
2744     virtual ~minlmreport();
2745     ae_int_t &iterationscount;
2746     ae_int_t &terminationtype;
2747     ae_int_t &nfunc;
2748     ae_int_t &njac;
2749     ae_int_t &ngrad;
2750     ae_int_t &nhess;
2751     ae_int_t &ncholesky;
2752 
2753 };
2754 #endif
2755 
2756 #if defined(AE_COMPILE_MINCG) || !defined(AE_PARTIAL_BUILD)
2757 /*************************************************************************
2758 This object stores state of the nonlinear CG optimizer.
2759 
2760 You should use ALGLIB functions to work with this object.
2761 *************************************************************************/
2762 class _mincgstate_owner
2763 {
2764 public:
2765     _mincgstate_owner();
2766     _mincgstate_owner(const _mincgstate_owner &rhs);
2767     _mincgstate_owner& operator=(const _mincgstate_owner &rhs);
2768     virtual ~_mincgstate_owner();
2769     alglib_impl::mincgstate* c_ptr();
2770     alglib_impl::mincgstate* c_ptr() const;
2771 protected:
2772     alglib_impl::mincgstate *p_struct;
2773 };
2774 class mincgstate : public _mincgstate_owner
2775 {
2776 public:
2777     mincgstate();
2778     mincgstate(const mincgstate &rhs);
2779     mincgstate& operator=(const mincgstate &rhs);
2780     virtual ~mincgstate();
2781     ae_bool &needf;
2782     ae_bool &needfg;
2783     ae_bool &xupdated;
2784     double &f;
2785     real_1d_array g;
2786     real_1d_array x;
2787 
2788 };
2789 
2790 
2791 /*************************************************************************
2792 This structure stores optimization report:
2793 * IterationsCount           total number of inner iterations
2794 * NFEV                      number of gradient evaluations
2795 * TerminationType           termination type (see below)
2796 
2797 TERMINATION CODES
2798 
2799 TerminationType field contains completion code, which can be:
2800   -8    internal integrity control detected  infinite  or  NAN  values  in
2801         function/gradient. Abnormal termination signalled.
2802    1    relative function improvement is no more than EpsF.
2803    2    relative step is no more than EpsX.
2804    4    gradient norm is no more than EpsG
2805    5    MaxIts steps was taken
2806    7    stopping conditions are too stringent,
2807         further improvement is impossible,
2808         X contains best point found so far.
2809    8    terminated by user who called mincgrequesttermination(). X contains
2810         point which was "current accepted" when  termination  request  was
2811         submitted.
2812 
2813 Other fields of this structure are not documented and should not be used!
2814 *************************************************************************/
2815 class _mincgreport_owner
2816 {
2817 public:
2818     _mincgreport_owner();
2819     _mincgreport_owner(const _mincgreport_owner &rhs);
2820     _mincgreport_owner& operator=(const _mincgreport_owner &rhs);
2821     virtual ~_mincgreport_owner();
2822     alglib_impl::mincgreport* c_ptr();
2823     alglib_impl::mincgreport* c_ptr() const;
2824 protected:
2825     alglib_impl::mincgreport *p_struct;
2826 };
2827 class mincgreport : public _mincgreport_owner
2828 {
2829 public:
2830     mincgreport();
2831     mincgreport(const mincgreport &rhs);
2832     mincgreport& operator=(const mincgreport &rhs);
2833     virtual ~mincgreport();
2834     ae_int_t &iterationscount;
2835     ae_int_t &nfev;
2836     ae_int_t &terminationtype;
2837 
2838 };
2839 #endif
2840 
2841 #if defined(AE_COMPILE_NLCSQP) || !defined(AE_PARTIAL_BUILD)
2842 
2843 #endif
2844 
2845 #if defined(AE_COMPILE_LPQPPRESOLVE) || !defined(AE_PARTIAL_BUILD)
2846 
2847 #endif
2848 
2849 #if defined(AE_COMPILE_REVISEDDUALSIMPLEX) || !defined(AE_PARTIAL_BUILD)
2850 
2851 #endif
2852 
2853 #if defined(AE_COMPILE_MINLP) || !defined(AE_PARTIAL_BUILD)
2854 /*************************************************************************
2855 This object stores linear solver state.
2856 You should use functions provided by MinLP subpackage to work with this
2857 object
2858 *************************************************************************/
2859 class _minlpstate_owner
2860 {
2861 public:
2862     _minlpstate_owner();
2863     _minlpstate_owner(const _minlpstate_owner &rhs);
2864     _minlpstate_owner& operator=(const _minlpstate_owner &rhs);
2865     virtual ~_minlpstate_owner();
2866     alglib_impl::minlpstate* c_ptr();
2867     alglib_impl::minlpstate* c_ptr() const;
2868 protected:
2869     alglib_impl::minlpstate *p_struct;
2870 };
2871 class minlpstate : public _minlpstate_owner
2872 {
2873 public:
2874     minlpstate();
2875     minlpstate(const minlpstate &rhs);
2876     minlpstate& operator=(const minlpstate &rhs);
2877     virtual ~minlpstate();
2878 
2879 };
2880 
2881 
2882 /*************************************************************************
2883 This structure stores optimization report:
2884 * f                         target function value
2885 * lagbc                     Lagrange coefficients for box constraints
2886 * laglc                     Lagrange coefficients for linear constraints
2887 * y                         dual variables
2888 * stats                     array[N+M], statuses of box (N) and linear (M)
2889                             constraints. This array is filled only by  DSS
2890                             algorithm because IPM always stops at INTERIOR
2891                             point:
2892                             * stats[i]>0  =>  constraint at upper bound
2893                                               (also used for free non-basic
2894                                               variables set to zero)
2895                             * stats[i]<0  =>  constraint at lower bound
2896                             * stats[i]=0  =>  constraint is inactive, basic
2897                                               variable
2898 * primalerror               primal feasibility error
2899 * dualerror                 dual feasibility error
2900 * slackerror                complementary slackness error
2901 * iterationscount           iteration count
2902 * terminationtype           completion code (see below)
2903 
2904 COMPLETION CODES
2905 
2906 Completion codes:
2907 * -4    LP problem is primal unbounded (dual infeasible)
2908 * -3    LP problem is primal infeasible (dual unbounded)
2909 *  1..4 successful completion
2910 *  5    MaxIts steps was taken
2911 *  7    stopping conditions are too stringent,
2912         further improvement is impossible,
2913         X contains best point found so far.
2914 
2915 LAGRANGE COEFFICIENTS
2916 
2917 Positive Lagrange coefficient means that constraint is at its upper bound.
2918 Negative coefficient means that constraint is at its lower  bound.  It  is
2919 expected that at solution the dual feasibility condition holds:
2920 
2921     C + SUM(Ei*LagBC[i],i=0..n-1) + SUM(Ai*LagLC[i],i=0..m-1) ~ 0
2922 
2923 where
2924 * C is a cost vector (linear term)
2925 * Ei is a vector with 1.0 at position I and 0 in other positions
2926 * Ai is an I-th row of linear constraint matrix
2927 *************************************************************************/
2928 class _minlpreport_owner
2929 {
2930 public:
2931     _minlpreport_owner();
2932     _minlpreport_owner(const _minlpreport_owner &rhs);
2933     _minlpreport_owner& operator=(const _minlpreport_owner &rhs);
2934     virtual ~_minlpreport_owner();
2935     alglib_impl::minlpreport* c_ptr();
2936     alglib_impl::minlpreport* c_ptr() const;
2937 protected:
2938     alglib_impl::minlpreport *p_struct;
2939 };
2940 class minlpreport : public _minlpreport_owner
2941 {
2942 public:
2943     minlpreport();
2944     minlpreport(const minlpreport &rhs);
2945     minlpreport& operator=(const minlpreport &rhs);
2946     virtual ~minlpreport();
2947     double &f;
2948     real_1d_array lagbc;
2949     real_1d_array laglc;
2950     real_1d_array y;
2951     integer_1d_array stats;
2952     double &primalerror;
2953     double &dualerror;
2954     double &slackerror;
2955     ae_int_t &iterationscount;
2956     ae_int_t &terminationtype;
2957 
2958 };
2959 #endif
2960 
2961 #if defined(AE_COMPILE_NLCSLP) || !defined(AE_PARTIAL_BUILD)
2962 
2963 #endif
2964 
2965 #if defined(AE_COMPILE_MINNLC) || !defined(AE_PARTIAL_BUILD)
2966 /*************************************************************************
2967 This object stores nonlinear optimizer state.
2968 You should use functions provided by MinNLC subpackage to work  with  this
2969 object
2970 *************************************************************************/
2971 class _minnlcstate_owner
2972 {
2973 public:
2974     _minnlcstate_owner();
2975     _minnlcstate_owner(const _minnlcstate_owner &rhs);
2976     _minnlcstate_owner& operator=(const _minnlcstate_owner &rhs);
2977     virtual ~_minnlcstate_owner();
2978     alglib_impl::minnlcstate* c_ptr();
2979     alglib_impl::minnlcstate* c_ptr() const;
2980 protected:
2981     alglib_impl::minnlcstate *p_struct;
2982 };
2983 class minnlcstate : public _minnlcstate_owner
2984 {
2985 public:
2986     minnlcstate();
2987     minnlcstate(const minnlcstate &rhs);
2988     minnlcstate& operator=(const minnlcstate &rhs);
2989     virtual ~minnlcstate();
2990     ae_bool &needfi;
2991     ae_bool &needfij;
2992     ae_bool &xupdated;
2993     double &f;
2994     real_1d_array fi;
2995     real_2d_array j;
2996     real_1d_array x;
2997 
2998 };
2999 
3000 
3001 /*************************************************************************
3002 These fields store optimization report:
3003 * iterationscount           total number of inner iterations
3004 * nfev                      number of gradient evaluations
3005 * terminationtype           termination type (see below)
3006 
3007 Scaled constraint violations are reported:
3008 * bcerr                     maximum violation of the box constraints
3009 * bcidx                     index of the most violated box  constraint (or
3010                             -1, if all box constraints  are  satisfied  or
3011                             there is no box constraint)
3012 * lcerr                     maximum violation of the  linear  constraints,
3013                             computed as maximum  scaled  distance  between
3014                             final point and constraint boundary.
3015 * lcidx                     index of the most violated  linear  constraint
3016                             (or -1, if all constraints  are  satisfied  or
3017                             there is no general linear constraints)
3018 * nlcerr                    maximum violation of the nonlinear constraints
3019 * nlcidx                    index of the most violated nonlinear constraint
3020                             (or -1, if all constraints  are  satisfied  or
3021                             there is no nonlinear constraints)
3022 
3023 Violations of box constraints are scaled on per-component basis  according
3024 to  the  scale  vector s[] as specified by minnlcsetscale(). Violations of
3025 the general linear  constraints  are  also  computed  using  user-supplied
3026 variable scaling. Violations of nonlinear constraints are computed "as is"
3027 
3028 TERMINATION CODES
3029 
3030 TerminationType field contains completion code, which can be either:
3031 
3032 === FAILURE CODE ===
3033   -8    internal integrity control detected  infinite  or  NAN  values  in
3034         function/gradient. Abnormal termination signaled.
3035   -3    box  constraints  are  infeasible.  Note: infeasibility of non-box
3036         constraints does NOT trigger emergency  completion;  you  have  to
3037         examine  bcerr/lcerr/nlcerr   to  detect   possibly   inconsistent
3038         constraints.
3039 
3040 === SUCCESS CODE ===
3041    2    relative step is no more than EpsX.
3042    5    MaxIts steps was taken
3043    7    stopping conditions are too stringent,
3044         further improvement is impossible,
3045         X contains best point found so far.
3046    8    user requested algorithm termination via minnlcrequesttermination(),
3047         last accepted point is returned
3048 
3049 Other fields of this structure are not documented and should not be used!
3050 *************************************************************************/
3051 class _minnlcreport_owner
3052 {
3053 public:
3054     _minnlcreport_owner();
3055     _minnlcreport_owner(const _minnlcreport_owner &rhs);
3056     _minnlcreport_owner& operator=(const _minnlcreport_owner &rhs);
3057     virtual ~_minnlcreport_owner();
3058     alglib_impl::minnlcreport* c_ptr();
3059     alglib_impl::minnlcreport* c_ptr() const;
3060 protected:
3061     alglib_impl::minnlcreport *p_struct;
3062 };
3063 class minnlcreport : public _minnlcreport_owner
3064 {
3065 public:
3066     minnlcreport();
3067     minnlcreport(const minnlcreport &rhs);
3068     minnlcreport& operator=(const minnlcreport &rhs);
3069     virtual ~minnlcreport();
3070     ae_int_t &iterationscount;
3071     ae_int_t &nfev;
3072     ae_int_t &terminationtype;
3073     double &bcerr;
3074     ae_int_t &bcidx;
3075     double &lcerr;
3076     ae_int_t &lcidx;
3077     double &nlcerr;
3078     ae_int_t &nlcidx;
3079     ae_int_t &dbgphase0its;
3080 
3081 };
3082 #endif
3083 
3084 #if defined(AE_COMPILE_MINNS) || !defined(AE_PARTIAL_BUILD)
3085 /*************************************************************************
3086 This object stores nonlinear optimizer state.
3087 You should use functions provided by MinNS subpackage to work  with  this
3088 object
3089 *************************************************************************/
3090 class _minnsstate_owner
3091 {
3092 public:
3093     _minnsstate_owner();
3094     _minnsstate_owner(const _minnsstate_owner &rhs);
3095     _minnsstate_owner& operator=(const _minnsstate_owner &rhs);
3096     virtual ~_minnsstate_owner();
3097     alglib_impl::minnsstate* c_ptr();
3098     alglib_impl::minnsstate* c_ptr() const;
3099 protected:
3100     alglib_impl::minnsstate *p_struct;
3101 };
3102 class minnsstate : public _minnsstate_owner
3103 {
3104 public:
3105     minnsstate();
3106     minnsstate(const minnsstate &rhs);
3107     minnsstate& operator=(const minnsstate &rhs);
3108     virtual ~minnsstate();
3109     ae_bool &needfi;
3110     ae_bool &needfij;
3111     ae_bool &xupdated;
3112     double &f;
3113     real_1d_array fi;
3114     real_2d_array j;
3115     real_1d_array x;
3116 
3117 };
3118 
3119 
3120 /*************************************************************************
3121 This structure stores optimization report:
3122 * IterationsCount           total number of inner iterations
3123 * NFEV                      number of gradient evaluations
3124 * TerminationType           termination type (see below)
3125 * CErr                      maximum violation of all types of constraints
3126 * LCErr                     maximum violation of linear constraints
3127 * NLCErr                    maximum violation of nonlinear constraints
3128 
3129 TERMINATION CODES
3130 
3131 TerminationType field contains completion code, which can be:
3132   -8    internal integrity control detected  infinite  or  NAN  values  in
3133         function/gradient. Abnormal termination signalled.
3134   -3    box constraints are inconsistent
3135   -1    inconsistent parameters were passed:
3136         * penalty parameter for minnssetalgoags() is zero,
3137           but we have nonlinear constraints set by minnssetnlc()
3138    2    sampling radius decreased below epsx
3139    5    MaxIts steps was taken
3140    7    stopping conditions are too stringent,
3141         further improvement is impossible,
3142         X contains best point found so far.
3143    8    User requested termination via MinNSRequestTermination()
3144 
3145 Other fields of this structure are not documented and should not be used!
3146 *************************************************************************/
3147 class _minnsreport_owner
3148 {
3149 public:
3150     _minnsreport_owner();
3151     _minnsreport_owner(const _minnsreport_owner &rhs);
3152     _minnsreport_owner& operator=(const _minnsreport_owner &rhs);
3153     virtual ~_minnsreport_owner();
3154     alglib_impl::minnsreport* c_ptr();
3155     alglib_impl::minnsreport* c_ptr() const;
3156 protected:
3157     alglib_impl::minnsreport *p_struct;
3158 };
3159 class minnsreport : public _minnsreport_owner
3160 {
3161 public:
3162     minnsreport();
3163     minnsreport(const minnsreport &rhs);
3164     minnsreport& operator=(const minnsreport &rhs);
3165     virtual ~minnsreport();
3166     ae_int_t &iterationscount;
3167     ae_int_t &nfev;
3168     double &cerr;
3169     double &lcerr;
3170     double &nlcerr;
3171     ae_int_t &terminationtype;
3172     ae_int_t &varidx;
3173     ae_int_t &funcidx;
3174 
3175 };
3176 #endif
3177 
3178 #if defined(AE_COMPILE_MINCOMP) || !defined(AE_PARTIAL_BUILD)
3179 /*************************************************************************
3180 
3181 *************************************************************************/
3182 class _minasastate_owner
3183 {
3184 public:
3185     _minasastate_owner();
3186     _minasastate_owner(const _minasastate_owner &rhs);
3187     _minasastate_owner& operator=(const _minasastate_owner &rhs);
3188     virtual ~_minasastate_owner();
3189     alglib_impl::minasastate* c_ptr();
3190     alglib_impl::minasastate* c_ptr() const;
3191 protected:
3192     alglib_impl::minasastate *p_struct;
3193 };
3194 class minasastate : public _minasastate_owner
3195 {
3196 public:
3197     minasastate();
3198     minasastate(const minasastate &rhs);
3199     minasastate& operator=(const minasastate &rhs);
3200     virtual ~minasastate();
3201     ae_bool &needfg;
3202     ae_bool &xupdated;
3203     double &f;
3204     real_1d_array g;
3205     real_1d_array x;
3206 
3207 };
3208 
3209 
3210 /*************************************************************************
3211 
3212 *************************************************************************/
3213 class _minasareport_owner
3214 {
3215 public:
3216     _minasareport_owner();
3217     _minasareport_owner(const _minasareport_owner &rhs);
3218     _minasareport_owner& operator=(const _minasareport_owner &rhs);
3219     virtual ~_minasareport_owner();
3220     alglib_impl::minasareport* c_ptr();
3221     alglib_impl::minasareport* c_ptr() const;
3222 protected:
3223     alglib_impl::minasareport *p_struct;
3224 };
3225 class minasareport : public _minasareport_owner
3226 {
3227 public:
3228     minasareport();
3229     minasareport(const minasareport &rhs);
3230     minasareport& operator=(const minasareport &rhs);
3231     virtual ~minasareport();
3232     ae_int_t &iterationscount;
3233     ae_int_t &nfev;
3234     ae_int_t &terminationtype;
3235     ae_int_t &activeconstraints;
3236 
3237 };
3238 #endif
3239 
3240 #if defined(AE_COMPILE_MINBC) || !defined(AE_PARTIAL_BUILD)
3241 /*************************************************************************
3242 This object stores nonlinear optimizer state.
3243 You should use functions provided by MinBC subpackage to work with this
3244 object
3245 *************************************************************************/
3246 class _minbcstate_owner
3247 {
3248 public:
3249     _minbcstate_owner();
3250     _minbcstate_owner(const _minbcstate_owner &rhs);
3251     _minbcstate_owner& operator=(const _minbcstate_owner &rhs);
3252     virtual ~_minbcstate_owner();
3253     alglib_impl::minbcstate* c_ptr();
3254     alglib_impl::minbcstate* c_ptr() const;
3255 protected:
3256     alglib_impl::minbcstate *p_struct;
3257 };
3258 class minbcstate : public _minbcstate_owner
3259 {
3260 public:
3261     minbcstate();
3262     minbcstate(const minbcstate &rhs);
3263     minbcstate& operator=(const minbcstate &rhs);
3264     virtual ~minbcstate();
3265     ae_bool &needf;
3266     ae_bool &needfg;
3267     ae_bool &xupdated;
3268     double &f;
3269     real_1d_array g;
3270     real_1d_array x;
3271 
3272 };
3273 
3274 
3275 /*************************************************************************
3276 This structure stores optimization report:
3277 * iterationscount           number of iterations
3278 * nfev                      number of gradient evaluations
3279 * terminationtype           termination type (see below)
3280 
3281 TERMINATION CODES
3282 
3283 terminationtype field contains completion code, which can be:
3284   -8    internal integrity control detected  infinite  or  NAN  values  in
3285         function/gradient. Abnormal termination signalled.
3286   -3    inconsistent constraints.
3287    1    relative function improvement is no more than EpsF.
3288    2    relative step is no more than EpsX.
3289    4    gradient norm is no more than EpsG
3290    5    MaxIts steps was taken
3291    7    stopping conditions are too stringent,
3292         further improvement is impossible,
3293         X contains best point found so far.
3294    8    terminated by user who called minbcrequesttermination(). X contains
3295         point which was "current accepted" when  termination  request  was
3296         submitted.
3297 *************************************************************************/
3298 class _minbcreport_owner
3299 {
3300 public:
3301     _minbcreport_owner();
3302     _minbcreport_owner(const _minbcreport_owner &rhs);
3303     _minbcreport_owner& operator=(const _minbcreport_owner &rhs);
3304     virtual ~_minbcreport_owner();
3305     alglib_impl::minbcreport* c_ptr();
3306     alglib_impl::minbcreport* c_ptr() const;
3307 protected:
3308     alglib_impl::minbcreport *p_struct;
3309 };
3310 class minbcreport : public _minbcreport_owner
3311 {
3312 public:
3313     minbcreport();
3314     minbcreport(const minbcreport &rhs);
3315     minbcreport& operator=(const minbcreport &rhs);
3316     virtual ~minbcreport();
3317     ae_int_t &iterationscount;
3318     ae_int_t &nfev;
3319     ae_int_t &varidx;
3320     ae_int_t &terminationtype;
3321 
3322 };
3323 #endif
3324 
3325 #if defined(AE_COMPILE_OPTS) || !defined(AE_PARTIAL_BUILD)
3326 /*************************************************************************
3327 This is a test problem class  intended  for  internal  performance  tests.
3328 Never use it directly in your projects.
3329 *************************************************************************/
3330 class _lptestproblem_owner
3331 {
3332 public:
3333     _lptestproblem_owner();
3334     _lptestproblem_owner(const _lptestproblem_owner &rhs);
3335     _lptestproblem_owner& operator=(const _lptestproblem_owner &rhs);
3336     virtual ~_lptestproblem_owner();
3337     alglib_impl::lptestproblem* c_ptr();
3338     alglib_impl::lptestproblem* c_ptr() const;
3339 protected:
3340     alglib_impl::lptestproblem *p_struct;
3341 };
3342 class lptestproblem : public _lptestproblem_owner
3343 {
3344 public:
3345     lptestproblem();
3346     lptestproblem(const lptestproblem &rhs);
3347     lptestproblem& operator=(const lptestproblem &rhs);
3348     virtual ~lptestproblem();
3349 
3350 };
3351 #endif
3352 
3353 #if defined(AE_COMPILE_OPTGUARDAPI) || !defined(AE_PARTIAL_BUILD)
3354 
3355 #endif
3356 
3357 #if defined(AE_COMPILE_OPTSERV) || !defined(AE_PARTIAL_BUILD)
3358 
3359 #endif
3360 
3361 #if defined(AE_COMPILE_MINLBFGS) || !defined(AE_PARTIAL_BUILD)
3362 /*************************************************************************
3363         LIMITED MEMORY BFGS METHOD FOR LARGE SCALE OPTIMIZATION
3364 
3365 DESCRIPTION:
3366 The subroutine minimizes function F(x) of N arguments by  using  a  quasi-
3367 Newton method (LBFGS scheme) which is optimized to use  a  minimum  amount
3368 of memory.
3369 The subroutine generates the approximation of an inverse Hessian matrix by
3370 using information about the last M steps of the algorithm  (instead of N).
3371 It lessens a required amount of memory from a value  of  order  N^2  to  a
3372 value of order 2*N*M.
3373 
3374 
3375 REQUIREMENTS:
3376 Algorithm will request following information during its operation:
3377 * function value F and its gradient G (simultaneously) at given point X
3378 
3379 
3380 USAGE:
3381 1. User initializes algorithm state with MinLBFGSCreate() call
3382 2. User tunes solver parameters with MinLBFGSSetCond() MinLBFGSSetStpMax()
3383    and other functions
3384 3. User calls MinLBFGSOptimize() function which takes algorithm  state and
3385    pointer (delegate, etc.) to callback function which calculates F/G.
3386 4. User calls MinLBFGSResults() to get solution
3387 5. Optionally user may call MinLBFGSRestartFrom() to solve another problem
3388    with same N/M but another starting point and/or another function.
3389    MinLBFGSRestartFrom() allows to reuse already initialized structure.
3390 
3391 
3392 INPUT PARAMETERS:
3393     N       -   problem dimension. N>0
3394     M       -   number of corrections in the BFGS scheme of Hessian
3395                 approximation update. Recommended value:  3<=M<=7. The smaller
3396                 value causes worse convergence, the bigger will  not  cause  a
3397                 considerably better convergence, but will cause a fall in  the
3398                 performance. M<=N.
3399     X       -   initial solution approximation, array[0..N-1].
3400 
3401 
3402 OUTPUT PARAMETERS:
3403     State   -   structure which stores algorithm state
3404 
3405 
3406 NOTES:
3407 1. you may tune stopping conditions with MinLBFGSSetCond() function
3408 2. if target function contains exp() or other fast growing functions,  and
3409    optimization algorithm makes too large steps which leads  to  overflow,
3410    use MinLBFGSSetStpMax() function to bound algorithm's  steps.  However,
3411    L-BFGS rarely needs such a tuning.
3412 
3413 
3414   -- ALGLIB --
3415      Copyright 02.04.2010 by Bochkanov Sergey
3416 *************************************************************************/
3417 void minlbfgscreate(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
3418 void minlbfgscreate(const ae_int_t m, const real_1d_array &x, minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
3419 
3420 
3421 /*************************************************************************
3422 The subroutine is finite difference variant of MinLBFGSCreate().  It  uses
3423 finite differences in order to differentiate target function.
3424 
3425 Description below contains information which is specific to  this function
3426 only. We recommend to read comments on MinLBFGSCreate() in  order  to  get
3427 more information about creation of LBFGS optimizer.
3428 
3429 INPUT PARAMETERS:
3430     N       -   problem dimension, N>0:
3431                 * if given, only leading N elements of X are used
3432                 * if not given, automatically determined from size of X
3433     M       -   number of corrections in the BFGS scheme of Hessian
3434                 approximation update. Recommended value:  3<=M<=7. The smaller
3435                 value causes worse convergence, the bigger will  not  cause  a
3436                 considerably better convergence, but will cause a fall in  the
3437                 performance. M<=N.
3438     X       -   starting point, array[0..N-1].
3439     DiffStep-   differentiation step, >0
3440 
3441 OUTPUT PARAMETERS:
3442     State   -   structure which stores algorithm state
3443 
3444 NOTES:
3445 1. algorithm uses 4-point central formula for differentiation.
3446 2. differentiation step along I-th axis is equal to DiffStep*S[I] where
3447    S[] is scaling vector which can be set by MinLBFGSSetScale() call.
3448 3. we recommend you to use moderate values of  differentiation  step.  Too
3449    large step will result in too large truncation  errors, while too small
3450    step will result in too large numerical  errors.  1.0E-6  can  be  good
3451    value to start with.
3452 4. Numerical  differentiation  is   very   inefficient  -   one   gradient
3453    calculation needs 4*N function evaluations. This function will work for
3454    any N - either small (1...10), moderate (10...100) or  large  (100...).
3455    However, performance penalty will be too severe for any N's except  for
3456    small ones.
3457    We should also say that code which relies on numerical  differentiation
3458    is   less  robust  and  precise.  LBFGS  needs  exact  gradient values.
3459    Imprecise gradient may slow  down  convergence,  especially  on  highly
3460    nonlinear problems.
3461    Thus  we  recommend to use this function for fast prototyping on small-
3462    dimensional problems only, and to implement analytical gradient as soon
3463    as possible.
3464 
3465   -- ALGLIB --
3466      Copyright 16.05.2011 by Bochkanov Sergey
3467 *************************************************************************/
3468 void minlbfgscreatef(const ae_int_t n, const ae_int_t m, const real_1d_array &x, const double diffstep, minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
3469 void minlbfgscreatef(const ae_int_t m, const real_1d_array &x, const double diffstep, minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
3470 
3471 
3472 /*************************************************************************
3473 This function sets stopping conditions for L-BFGS optimization algorithm.
3474 
3475 INPUT PARAMETERS:
3476     State   -   structure which stores algorithm state
3477     EpsG    -   >=0
3478                 The  subroutine  finishes  its  work   if   the  condition
3479                 |v|<EpsG is satisfied, where:
3480                 * |.| means Euclidian norm
3481                 * v - scaled gradient vector, v[i]=g[i]*s[i]
3482                 * g - gradient
3483                 * s - scaling coefficients set by MinLBFGSSetScale()
3484     EpsF    -   >=0
3485                 The  subroutine  finishes  its work if on k+1-th iteration
3486                 the  condition  |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
3487                 is satisfied.
3488     EpsX    -   >=0
3489                 The subroutine finishes its work if  on  k+1-th  iteration
3490                 the condition |v|<=EpsX is fulfilled, where:
3491                 * |.| means Euclidian norm
3492                 * v - scaled step vector, v[i]=dx[i]/s[i]
3493                 * dx - ste pvector, dx=X(k+1)-X(k)
3494                 * s - scaling coefficients set by MinLBFGSSetScale()
3495     MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
3496                 iterations is unlimited.
3497 
3498 Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to
3499 automatic stopping criterion selection (small EpsX).
3500 
3501   -- ALGLIB --
3502      Copyright 02.04.2010 by Bochkanov Sergey
3503 *************************************************************************/
3504 void minlbfgssetcond(const minlbfgsstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
3505 
3506 
3507 /*************************************************************************
3508 This function turns on/off reporting.
3509 
3510 INPUT PARAMETERS:
3511     State   -   structure which stores algorithm state
3512     NeedXRep-   whether iteration reports are needed or not
3513 
3514 If NeedXRep is True, algorithm will call rep() callback function if  it is
3515 provided to MinLBFGSOptimize().
3516 
3517 
3518   -- ALGLIB --
3519      Copyright 02.04.2010 by Bochkanov Sergey
3520 *************************************************************************/
3521 void minlbfgssetxrep(const minlbfgsstate &state, const bool needxrep, const xparams _xparams = alglib::xdefault);
3522 
3523 
3524 /*************************************************************************
3525 This function sets maximum step length
3526 
3527 INPUT PARAMETERS:
3528     State   -   structure which stores algorithm state
3529     StpMax  -   maximum step length, >=0. Set StpMax to 0.0 (default),  if
3530                 you don't want to limit step length.
3531 
3532 Use this subroutine when you optimize target function which contains exp()
3533 or  other  fast  growing  functions,  and optimization algorithm makes too
3534 large  steps  which  leads  to overflow. This function allows us to reject
3535 steps  that  are  too  large  (and  therefore  expose  us  to the possible
3536 overflow) without actually calculating function value at the x+stp*d.
3537 
3538   -- ALGLIB --
3539      Copyright 02.04.2010 by Bochkanov Sergey
3540 *************************************************************************/
3541 void minlbfgssetstpmax(const minlbfgsstate &state, const double stpmax, const xparams _xparams = alglib::xdefault);
3542 
3543 
3544 /*************************************************************************
3545 This function sets scaling coefficients for LBFGS optimizer.
3546 
3547 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
3548 size and gradient are scaled before comparison with tolerances).  Scale of
3549 the I-th variable is a translation invariant measure of:
3550 a) "how large" the variable is
3551 b) how large the step should be to make significant changes in the function
3552 
3553 Scaling is also used by finite difference variant of the optimizer  - step
3554 along I-th axis is equal to DiffStep*S[I].
3555 
3556 In  most  optimizers  (and  in  the  LBFGS  too)  scaling is NOT a form of
3557 preconditioning. It just  affects  stopping  conditions.  You  should  set
3558 preconditioner  by  separate  call  to  one  of  the  MinLBFGSSetPrec...()
3559 functions.
3560 
3561 There  is  special  preconditioning  mode, however,  which  uses   scaling
3562 coefficients to form diagonal preconditioning matrix. You  can  turn  this
3563 mode on, if you want.   But  you should understand that scaling is not the
3564 same thing as preconditioning - these are two different, although  related
3565 forms of tuning solver.
3566 
3567 INPUT PARAMETERS:
3568     State   -   structure stores algorithm state
3569     S       -   array[N], non-zero scaling coefficients
3570                 S[i] may be negative, sign doesn't matter.
3571 
3572   -- ALGLIB --
3573      Copyright 14.01.2011 by Bochkanov Sergey
3574 *************************************************************************/
3575 void minlbfgssetscale(const minlbfgsstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
3576 
3577 
3578 /*************************************************************************
3579 Modification  of  the  preconditioner:  default  preconditioner    (simple
3580 scaling, same for all elements of X) is used.
3581 
3582 INPUT PARAMETERS:
3583     State   -   structure which stores algorithm state
3584 
3585 NOTE:  you  can  change  preconditioner  "on  the  fly",  during algorithm
3586 iterations.
3587 
3588   -- ALGLIB --
3589      Copyright 13.10.2010 by Bochkanov Sergey
3590 *************************************************************************/
3591 void minlbfgssetprecdefault(const minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
3592 
3593 
3594 /*************************************************************************
3595 Modification of the preconditioner: Cholesky factorization of  approximate
3596 Hessian is used.
3597 
3598 INPUT PARAMETERS:
3599     State   -   structure which stores algorithm state
3600     P       -   triangular preconditioner, Cholesky factorization of
3601                 the approximate Hessian. array[0..N-1,0..N-1],
3602                 (if larger, only leading N elements are used).
3603     IsUpper -   whether upper or lower triangle of P is given
3604                 (other triangle is not referenced)
3605 
3606 After call to this function preconditioner is changed to P  (P  is  copied
3607 into the internal buffer).
3608 
3609 NOTE:  you  can  change  preconditioner  "on  the  fly",  during algorithm
3610 iterations.
3611 
3612 NOTE 2:  P  should  be nonsingular. Exception will be thrown otherwise.
3613 
3614   -- ALGLIB --
3615      Copyright 13.10.2010 by Bochkanov Sergey
3616 *************************************************************************/
3617 void minlbfgssetpreccholesky(const minlbfgsstate &state, const real_2d_array &p, const bool isupper, const xparams _xparams = alglib::xdefault);
3618 
3619 
3620 /*************************************************************************
3621 Modification  of  the  preconditioner:  diagonal of approximate Hessian is
3622 used.
3623 
3624 INPUT PARAMETERS:
3625     State   -   structure which stores algorithm state
3626     D       -   diagonal of the approximate Hessian, array[0..N-1],
3627                 (if larger, only leading N elements are used).
3628 
3629 NOTE:  you  can  change  preconditioner  "on  the  fly",  during algorithm
3630 iterations.
3631 
3632 NOTE 2: D[i] should be positive. Exception will be thrown otherwise.
3633 
3634 NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
3635 
3636   -- ALGLIB --
3637      Copyright 13.10.2010 by Bochkanov Sergey
3638 *************************************************************************/
3639 void minlbfgssetprecdiag(const minlbfgsstate &state, const real_1d_array &d, const xparams _xparams = alglib::xdefault);
3640 
3641 
3642 /*************************************************************************
3643 Modification of the preconditioner: scale-based diagonal preconditioning.
3644 
3645 This preconditioning mode can be useful when you  don't  have  approximate
3646 diagonal of Hessian, but you know that your  variables  are  badly  scaled
3647 (for  example,  one  variable is in [1,10], and another in [1000,100000]),
3648 and most part of the ill-conditioning comes from different scales of vars.
3649 
3650 In this case simple  scale-based  preconditioner,  with H[i] = 1/(s[i]^2),
3651 can greatly improve convergence.
3652 
3653 IMPRTANT: you should set scale of your variables  with  MinLBFGSSetScale()
3654 call  (before  or after MinLBFGSSetPrecScale() call). Without knowledge of
3655 the scale of your variables scale-based preconditioner will be  just  unit
3656 matrix.
3657 
3658 INPUT PARAMETERS:
3659     State   -   structure which stores algorithm state
3660 
3661   -- ALGLIB --
3662      Copyright 13.10.2010 by Bochkanov Sergey
3663 *************************************************************************/
3664 void minlbfgssetprecscale(const minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
3665 
3666 
3667 /*************************************************************************
3668 This function provides reverse communication interface
3669 Reverse communication interface is not documented or recommended to use.
3670 See below for functions which provide better documented API
3671 *************************************************************************/
3672 bool minlbfgsiteration(const minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
3673 
3674 
3675 /*************************************************************************
3676 This family of functions is used to launcn iterations of nonlinear optimizer
3677 
3678 These functions accept following parameters:
3679     state   -   algorithm state
3680     func    -   callback which calculates function (or merit function)
3681                 value func at given point x
3682     grad    -   callback which calculates function (or merit function)
3683                 value func and gradient grad at given point x
3684     rep     -   optional callback which is called after each iteration
3685                 can be NULL
3686     ptr     -   optional pointer which is passed to func/grad/hess/jac/rep
3687                 can be NULL
3688 
3689 NOTES:
3690 
3691 1. This function has two different implementations: one which  uses  exact
3692    (analytical) user-supplied gradient,  and one which uses function value
3693    only  and  numerically  differentiates  function  in  order  to  obtain
3694    gradient.
3695 
3696    Depending  on  the  specific  function  used to create optimizer object
3697    (either MinLBFGSCreate() for analytical gradient  or  MinLBFGSCreateF()
3698    for numerical differentiation) you should choose appropriate variant of
3699    MinLBFGSOptimize() - one  which  accepts  function  AND gradient or one
3700    which accepts function ONLY.
3701 
3702    Be careful to choose variant of MinLBFGSOptimize() which corresponds to
3703    your optimization scheme! Table below lists different  combinations  of
3704    callback (function/gradient) passed to MinLBFGSOptimize()  and specific
3705    function used to create optimizer.
3706 
3707 
3708                      |         USER PASSED TO MinLBFGSOptimize()
3709    CREATED WITH      |  function only   |  function and gradient
3710    ------------------------------------------------------------
3711    MinLBFGSCreateF() |     work                FAIL
3712    MinLBFGSCreate()  |     FAIL                work
3713 
3714    Here "FAIL" denotes inappropriate combinations  of  optimizer  creation
3715    function  and  MinLBFGSOptimize()  version.   Attemps   to   use   such
3716    combination (for example, to create optimizer with MinLBFGSCreateF() and
3717    to pass gradient information to MinCGOptimize()) will lead to exception
3718    being thrown. Either  you  did  not pass gradient when it WAS needed or
3719    you passed gradient when it was NOT needed.
3720 
3721   -- ALGLIB --
3722      Copyright 20.03.2009 by Bochkanov Sergey
3723 
3724 *************************************************************************/
3725 void minlbfgsoptimize(minlbfgsstate &state,
3726     void (*func)(const real_1d_array &x, double &func, void *ptr),
3727     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
3728     void *ptr = NULL,
3729     const xparams _xparams = alglib::xdefault);
3730 void minlbfgsoptimize(minlbfgsstate &state,
3731     void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
3732     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
3733     void *ptr = NULL,
3734     const xparams _xparams = alglib::xdefault);
3735 
3736 
3737 /*************************************************************************
3738 This  function  activates/deactivates verification  of  the  user-supplied
3739 analytic gradient.
3740 
3741 Upon  activation  of  this  option  OptGuard  integrity  checker  performs
3742 numerical differentiation of your target function  at  the  initial  point
3743 (note: future versions may also perform check  at  the  final  point)  and
3744 compares numerical gradient with analytic one provided by you.
3745 
3746 If difference is too large, an error flag is set and optimization  session
3747 continues. After optimization session is over, you can retrieve the report
3748 which  stores  both  gradients  and  specific  components  highlighted  as
3749 suspicious by the OptGuard.
3750 
3751 The primary OptGuard report can be retrieved with minlbfgsoptguardresults().
3752 
3753 IMPORTANT: gradient check is a high-overhead option which  will  cost  you
3754            about 3*N additional function evaluations. In many cases it may
3755            cost as much as the rest of the optimization session.
3756 
3757            YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO
3758            CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY.
3759 
3760 NOTE: unlike previous incarnation of the gradient checking code,  OptGuard
3761       does NOT interrupt optimization even if it discovers bad gradient.
3762 
3763 INPUT PARAMETERS:
3764     State       -   structure used to store algorithm state
3765     TestStep    -   verification step used for numerical differentiation:
3766                     * TestStep=0 turns verification off
3767                     * TestStep>0 activates verification
3768                     You should carefully choose TestStep. Value  which  is
3769                     too large (so large that  function  behavior  is  non-
3770                     cubic at this scale) will lead  to  false  alarms. Too
3771                     short step will result in rounding  errors  dominating
3772                     numerical derivative.
3773 
3774                     You may use different step for different parameters by
3775                     means of setting scale with minlbfgssetscale().
3776 
3777 === EXPLANATION ==========================================================
3778 
3779 In order to verify gradient algorithm performs following steps:
3780   * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
3781     where X[i] is i-th component of the initial point and S[i] is a  scale
3782     of i-th parameter
3783   * F(X) is evaluated at these trial points
3784   * we perform one more evaluation in the middle point of the interval
3785   * we  build  cubic  model using function values and derivatives at trial
3786     points and we compare its prediction with actual value in  the  middle
3787     point
3788 
3789   -- ALGLIB --
3790      Copyright 15.06.2014 by Bochkanov Sergey
3791 *************************************************************************/
3792 void minlbfgsoptguardgradient(const minlbfgsstate &state, const double teststep, const xparams _xparams = alglib::xdefault);
3793 
3794 
3795 /*************************************************************************
3796 This  function  activates/deactivates nonsmoothness monitoring  option  of
3797 the  OptGuard  integrity  checker. Smoothness  monitor  silently  observes
3798 solution process and tries to detect ill-posed problems, i.e. ones with:
3799 a) discontinuous target function (non-C0)
3800 b) nonsmooth     target function (non-C1)
3801 
3802 Smoothness monitoring does NOT interrupt optimization  even if it suspects
3803 that your problem is nonsmooth. It just sets corresponding  flags  in  the
3804 OptGuard report which can be retrieved after optimization is over.
3805 
3806 Smoothness monitoring is a moderate overhead option which often adds  less
3807 than 1% to the optimizer running time. Thus, you can use it even for large
3808 scale problems.
3809 
3810 NOTE: OptGuard does  NOT  guarantee  that  it  will  always  detect  C0/C1
3811       continuity violations.
3812 
3813       First, minor errors are hard to  catch - say, a 0.0001 difference in
3814       the model values at two sides of the gap may be due to discontinuity
3815       of the model - or simply because the model has changed.
3816 
3817       Second, C1-violations  are  especially  difficult  to  detect  in  a
3818       noninvasive way. The optimizer usually  performs  very  short  steps
3819       near the nonsmoothness, and differentiation  usually   introduces  a
3820       lot of numerical noise.  It  is  hard  to  tell  whether  some  tiny
3821       discontinuity in the slope is due to real nonsmoothness or just  due
3822       to numerical noise alone.
3823 
3824       Our top priority was to avoid false positives, so in some rare cases
3825       minor errors may went unnoticed (however, in most cases they can  be
3826       spotted with restart from different initial point).
3827 
3828 INPUT PARAMETERS:
3829     state   -   algorithm state
3830     level   -   monitoring level:
3831                 * 0 - monitoring is disabled
3832                 * 1 - noninvasive low-overhead monitoring; function values
3833                       and/or gradients are recorded, but OptGuard does not
3834                       try to perform additional evaluations  in  order  to
3835                       get more information about suspicious locations.
3836 
3837 === EXPLANATION ==========================================================
3838 
3839 One major source of headache during optimization  is  the  possibility  of
3840 the coding errors in the target function/constraints (or their gradients).
3841 Such  errors   most   often   manifest   themselves  as  discontinuity  or
3842 nonsmoothness of the target/constraints.
3843 
3844 Another frequent situation is when you try to optimize something involving
3845 lots of min() and max() operations, i.e. nonsmooth target. Although not  a
3846 coding error, it is nonsmoothness anyway - and smooth  optimizers  usually
3847 stop right after encountering nonsmoothness, well before reaching solution.
3848 
3849 OptGuard integrity checker helps you to catch such situations: it monitors
3850 function values/gradients being passed  to  the  optimizer  and  tries  to
3851 errors. Upon discovering suspicious pair of points it  raises  appropriate
3852 flag (and allows you to continue optimization). When optimization is done,
3853 you can study OptGuard result.
3854 
3855   -- ALGLIB --
3856      Copyright 21.11.2018 by Bochkanov Sergey
3857 *************************************************************************/
3858 void minlbfgsoptguardsmoothness(const minlbfgsstate &state, const ae_int_t level, const xparams _xparams = alglib::xdefault);
3859 void minlbfgsoptguardsmoothness(const minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
3860 
3861 
3862 /*************************************************************************
3863 Results of OptGuard integrity check, should be called  after  optimization
3864 session is over.
3865 
3866 === PRIMARY REPORT =======================================================
3867 
3868 OptGuard performs several checks which are intended to catch common errors
3869 in the implementation of nonlinear function/gradient:
3870 * incorrect analytic gradient
3871 * discontinuous (non-C0) target functions (constraints)
3872 * nonsmooth     (non-C1) target functions (constraints)
3873 
3874 Each of these checks is activated with appropriate function:
3875 * minlbfgsoptguardgradient() for gradient verification
3876 * minlbfgsoptguardsmoothness() for C0/C1 checks
3877 
3878 Following flags are set when these errors are suspected:
3879 * rep.badgradsuspected, and additionally:
3880   * rep.badgradvidx for specific variable (gradient element) suspected
3881   * rep.badgradxbase, a point where gradient is tested
3882   * rep.badgraduser, user-provided gradient  (stored  as  2D  matrix  with
3883     single row in order to make  report  structure  compatible  with  more
3884     complex optimizers like MinNLC or MinLM)
3885   * rep.badgradnum,   reference    gradient    obtained    via   numerical
3886     differentiation (stored as  2D matrix with single row in order to make
3887     report structure compatible with more complex optimizers  like  MinNLC
3888     or MinLM)
3889 * rep.nonc0suspected
3890 * rep.nonc1suspected
3891 
3892 === ADDITIONAL REPORTS/LOGS ==============================================
3893 
3894 Several different tests are performed to catch C0/C1 errors, you can  find
3895 out specific test signaled error by looking to:
3896 * rep.nonc0test0positive, for non-C0 test #0
3897 * rep.nonc1test0positive, for non-C1 test #0
3898 * rep.nonc1test1positive, for non-C1 test #1
3899 
3900 Additional information (including line search logs)  can  be  obtained  by
3901 means of:
3902 * minlbfgsoptguardnonc1test0results()
3903 * minlbfgsoptguardnonc1test1results()
3904 which return detailed error reports, specific points where discontinuities
3905 were found, and so on.
3906 
3907 ==========================================================================
3908 
3909 INPUT PARAMETERS:
3910     state   -   algorithm state
3911 
3912 OUTPUT PARAMETERS:
3913     rep     -   generic OptGuard report;  more  detailed  reports  can  be
3914                 retrieved with other functions.
3915 
3916 NOTE: false negatives (nonsmooth problems are not identified as  nonsmooth
3917       ones) are possible although unlikely.
3918 
3919       The reason  is  that  you  need  to  make several evaluations around
3920       nonsmoothness  in  order  to  accumulate  enough  information  about
3921       function curvature. Say, if you start right from the nonsmooth point,
3922       optimizer simply won't get enough data to understand what  is  going
3923       wrong before it terminates due to abrupt changes in the  derivative.
3924       It is also  possible  that  "unlucky"  step  will  move  us  to  the
3925       termination too quickly.
3926 
3927       Our current approach is to have less than 0.1%  false  negatives  in
3928       our test examples  (measured  with  multiple  restarts  from  random
3929       points), and to have exactly 0% false positives.
3930 
3931   -- ALGLIB --
3932      Copyright 21.11.2018 by Bochkanov Sergey
3933 *************************************************************************/
3934 void minlbfgsoptguardresults(const minlbfgsstate &state, optguardreport &rep, const xparams _xparams = alglib::xdefault);
3935 
3936 
3937 /*************************************************************************
3938 Detailed results of the OptGuard integrity check for nonsmoothness test #0
3939 
3940 Nonsmoothness (non-C1) test #0 studies  function  values  (not  gradient!)
3941 obtained during line searches and monitors  behavior  of  the  directional
3942 derivative estimate.
3943 
3944 This test is less powerful than test #1, but it does  not  depend  on  the
3945 gradient values and thus it is more robust against artifacts introduced by
3946 numerical differentiation.
3947 
3948 Two reports are returned:
3949 * a "strongest" one, corresponding  to  line   search  which  had  highest
3950   value of the nonsmoothness indicator
3951 * a "longest" one, corresponding to line search which  had  more  function
3952   evaluations, and thus is more detailed
3953 
3954 In both cases following fields are returned:
3955 
3956 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
3957   did not notice anything (in the latter cases fields below are empty).
3958 * x0[], d[] - arrays of length N which store initial point  and  direction
3959   for line search (d[] can be normalized, but does not have to)
3960 * stp[], f[] - arrays of length CNT which store step lengths and  function
3961   values at these points; f[i] is evaluated in x0+stp[i]*d.
3962 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
3963   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
3964   with  most  likely  position  of  the  violation  between  stpidxa+1 and
3965   stpidxa+2.
3966 
3967 ==========================================================================
3968 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
3969 =                   see where C1 continuity is violated.
3970 ==========================================================================
3971 
3972 INPUT PARAMETERS:
3973     state   -   algorithm state
3974 
3975 OUTPUT PARAMETERS:
3976     strrep  -   C1 test #0 "strong" report
3977     lngrep  -   C1 test #0 "long" report
3978 
3979   -- ALGLIB --
3980      Copyright 21.11.2018 by Bochkanov Sergey
3981 *************************************************************************/
3982 void minlbfgsoptguardnonc1test0results(const minlbfgsstate &state, optguardnonc1test0report &strrep, optguardnonc1test0report &lngrep, const xparams _xparams = alglib::xdefault);
3983 
3984 
3985 /*************************************************************************
3986 Detailed results of the OptGuard integrity check for nonsmoothness test #1
3987 
3988 Nonsmoothness (non-C1)  test  #1  studies  individual  components  of  the
3989 gradient computed during line search.
3990 
3991 When precise analytic gradient is provided this test is more powerful than
3992 test #0  which  works  with  function  values  and  ignores  user-provided
3993 gradient.  However,  test  #0  becomes  more   powerful   when   numerical
3994 differentiation is employed (in such cases test #1 detects  higher  levels
3995 of numerical noise and becomes too conservative).
3996 
3997 This test also tells specific components of the gradient which violate  C1
3998 continuity, which makes it more informative than #0, which just tells that
3999 continuity is violated.
4000 
4001 Two reports are returned:
4002 * a "strongest" one, corresponding  to  line   search  which  had  highest
4003   value of the nonsmoothness indicator
4004 * a "longest" one, corresponding to line search which  had  more  function
4005   evaluations, and thus is more detailed
4006 
4007 In both cases following fields are returned:
4008 
4009 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
4010   did not notice anything (in the latter cases fields below are empty).
4011 * vidx - is an index of the variable in [0,N) with nonsmooth derivative
4012 * x0[], d[] - arrays of length N which store initial point  and  direction
4013   for line search (d[] can be normalized, but does not have to)
4014 * stp[], g[] - arrays of length CNT which store step lengths and  gradient
4015   values at these points; g[i] is evaluated in  x0+stp[i]*d  and  contains
4016   vidx-th component of the gradient.
4017 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
4018   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
4019   with  most  likely  position  of  the  violation  between  stpidxa+1 and
4020   stpidxa+2.
4021 
4022 ==========================================================================
4023 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
4024 =                   see where C1 continuity is violated.
4025 ==========================================================================
4026 
4027 INPUT PARAMETERS:
4028     state   -   algorithm state
4029 
4030 OUTPUT PARAMETERS:
4031     strrep  -   C1 test #1 "strong" report
4032     lngrep  -   C1 test #1 "long" report
4033 
4034   -- ALGLIB --
4035      Copyright 21.11.2018 by Bochkanov Sergey
4036 *************************************************************************/
4037 void minlbfgsoptguardnonc1test1results(const minlbfgsstate &state, optguardnonc1test1report &strrep, optguardnonc1test1report &lngrep, const xparams _xparams = alglib::xdefault);
4038 
4039 
4040 /*************************************************************************
4041 L-BFGS algorithm results
4042 
4043 INPUT PARAMETERS:
4044     State   -   algorithm state
4045 
4046 OUTPUT PARAMETERS:
4047     X       -   array[0..N-1], solution
4048     Rep     -   optimization report:
4049                 * Rep.TerminationType completetion code:
4050                     * -8    internal integrity control  detected  infinite
4051                             or NAN values in  function/gradient.  Abnormal
4052                             termination signalled.
4053                     * -2    rounding errors prevent further improvement.
4054                             X contains best point found.
4055                     * -1    incorrect parameters were specified
4056                     *  1    relative function improvement is no more than
4057                             EpsF.
4058                     *  2    relative step is no more than EpsX.
4059                     *  4    gradient norm is no more than EpsG
4060                     *  5    MaxIts steps was taken
4061                     *  7    stopping conditions are too stringent,
4062                             further improvement is impossible
4063                     *  8    terminated by user who called minlbfgsrequesttermination().
4064                             X contains point which was "current accepted" when
4065                             termination request was submitted.
4066                 * Rep.IterationsCount contains iterations count
4067                 * NFEV countains number of function calculations
4068 
4069   -- ALGLIB --
4070      Copyright 02.04.2010 by Bochkanov Sergey
4071 *************************************************************************/
4072 void minlbfgsresults(const minlbfgsstate &state, real_1d_array &x, minlbfgsreport &rep, const xparams _xparams = alglib::xdefault);
4073 
4074 
4075 /*************************************************************************
4076 L-BFGS algorithm results
4077 
4078 Buffered implementation of MinLBFGSResults which uses pre-allocated buffer
4079 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
4080 intended to be used in the inner cycles of performance critical algorithms
4081 where array reallocation penalty is too large to be ignored.
4082 
4083   -- ALGLIB --
4084      Copyright 20.08.2010 by Bochkanov Sergey
4085 *************************************************************************/
4086 void minlbfgsresultsbuf(const minlbfgsstate &state, real_1d_array &x, minlbfgsreport &rep, const xparams _xparams = alglib::xdefault);
4087 
4088 
4089 /*************************************************************************
4090 This  subroutine restarts LBFGS algorithm from new point. All optimization
4091 parameters are left unchanged.
4092 
4093 This  function  allows  to  solve multiple  optimization  problems  (which
4094 must have same number of dimensions) without object reallocation penalty.
4095 
4096 INPUT PARAMETERS:
4097     State   -   structure used to store algorithm state
4098     X       -   new starting point.
4099 
4100   -- ALGLIB --
4101      Copyright 30.07.2010 by Bochkanov Sergey
4102 *************************************************************************/
4103 void minlbfgsrestartfrom(const minlbfgsstate &state, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
4104 
4105 
4106 /*************************************************************************
4107 This subroutine submits request for termination of running  optimizer.  It
4108 should be called from user-supplied callback when user decides that it  is
4109 time to "smoothly" terminate optimization process.  As  result,  optimizer
4110 stops at point which was "current accepted" when termination  request  was
4111 submitted and returns error code 8 (successful termination).
4112 
4113 INPUT PARAMETERS:
4114     State   -   optimizer structure
4115 
4116 NOTE: after  request  for  termination  optimizer  may   perform   several
4117       additional calls to user-supplied callbacks. It does  NOT  guarantee
4118       to stop immediately - it just guarantees that these additional calls
4119       will be discarded later.
4120 
4121 NOTE: calling this function on optimizer which is NOT running will have no
4122       effect.
4123 
4124 NOTE: multiple calls to this function are possible. First call is counted,
4125       subsequent calls are silently ignored.
4126 
4127   -- ALGLIB --
4128      Copyright 08.10.2014 by Bochkanov Sergey
4129 *************************************************************************/
4130 void minlbfgsrequesttermination(const minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
4131 #endif
4132 
4133 #if defined(AE_COMPILE_CQMODELS) || !defined(AE_PARTIAL_BUILD)
4134 
4135 #endif
4136 
4137 #if defined(AE_COMPILE_LPQPSERV) || !defined(AE_PARTIAL_BUILD)
4138 
4139 #endif
4140 
4141 #if defined(AE_COMPILE_SNNLS) || !defined(AE_PARTIAL_BUILD)
4142 
4143 #endif
4144 
4145 #if defined(AE_COMPILE_SACTIVESETS) || !defined(AE_PARTIAL_BUILD)
4146 
4147 #endif
4148 
4149 #if defined(AE_COMPILE_QQPSOLVER) || !defined(AE_PARTIAL_BUILD)
4150 
4151 #endif
4152 
4153 #if defined(AE_COMPILE_QPDENSEAULSOLVER) || !defined(AE_PARTIAL_BUILD)
4154 
4155 #endif
4156 
4157 #if defined(AE_COMPILE_MINBLEIC) || !defined(AE_PARTIAL_BUILD)
4158 /*************************************************************************
4159                      BOUND CONSTRAINED OPTIMIZATION
4160        WITH ADDITIONAL LINEAR EQUALITY AND INEQUALITY CONSTRAINTS
4161 
4162 DESCRIPTION:
4163 The  subroutine  minimizes  function   F(x)  of N arguments subject to any
4164 combination of:
4165 * bound constraints
4166 * linear inequality constraints
4167 * linear equality constraints
4168 
4169 REQUIREMENTS:
4170 * user must provide function value and gradient
4171 * starting point X0 must be feasible or
4172   not too far away from the feasible set
4173 * grad(f) must be Lipschitz continuous on a level set:
4174   L = { x : f(x)<=f(x0) }
4175 * function must be defined everywhere on the feasible set F
4176 
4177 USAGE:
4178 
4179 Constrained optimization if far more complex than the unconstrained one.
4180 Here we give very brief outline of the BLEIC optimizer. We strongly recommend
4181 you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide
4182 on optimization, which is available at http://www.alglib.net/optimization/
4183 
4184 1. User initializes algorithm state with MinBLEICCreate() call
4185 
4186 2. USer adds boundary and/or linear constraints by calling
4187    MinBLEICSetBC() and MinBLEICSetLC() functions.
4188 
4189 3. User sets stopping conditions with MinBLEICSetCond().
4190 
4191 4. User calls MinBLEICOptimize() function which takes algorithm  state and
4192    pointer (delegate, etc.) to callback function which calculates F/G.
4193 
4194 5. User calls MinBLEICResults() to get solution
4195 
4196 6. Optionally user may call MinBLEICRestartFrom() to solve another problem
4197    with same N but another starting point.
4198    MinBLEICRestartFrom() allows to reuse already initialized structure.
4199 
4200 NOTE: if you have box-only constraints (no  general  linear  constraints),
4201       then MinBC optimizer can be better option. It uses  special,  faster
4202       constraint activation method, which performs better on problems with
4203       multiple constraints active at the solution.
4204 
4205       On small-scale problems performance of MinBC is similar to  that  of
4206       MinBLEIC, but on large-scale ones (hundreds and thousands of  active
4207       constraints) it can be several times faster than MinBLEIC.
4208 
4209 INPUT PARAMETERS:
4210     N       -   problem dimension, N>0:
4211                 * if given, only leading N elements of X are used
4212                 * if not given, automatically determined from size ofX
4213     X       -   starting point, array[N]:
4214                 * it is better to set X to a feasible point
4215                 * but X can be infeasible, in which case algorithm will try
4216                   to find feasible point first, using X as initial
4217                   approximation.
4218 
4219 OUTPUT PARAMETERS:
4220     State   -   structure stores algorithm state
4221 
4222   -- ALGLIB --
4223      Copyright 28.11.2010 by Bochkanov Sergey
4224 *************************************************************************/
4225 void minbleiccreate(const ae_int_t n, const real_1d_array &x, minbleicstate &state, const xparams _xparams = alglib::xdefault);
4226 void minbleiccreate(const real_1d_array &x, minbleicstate &state, const xparams _xparams = alglib::xdefault);
4227 
4228 
4229 /*************************************************************************
4230 The subroutine is finite difference variant of MinBLEICCreate().  It  uses
4231 finite differences in order to differentiate target function.
4232 
4233 Description below contains information which is specific to  this function
4234 only. We recommend to read comments on MinBLEICCreate() in  order  to  get
4235 more information about creation of BLEIC optimizer.
4236 
4237 INPUT PARAMETERS:
4238     N       -   problem dimension, N>0:
4239                 * if given, only leading N elements of X are used
4240                 * if not given, automatically determined from size of X
4241     X       -   starting point, array[0..N-1].
4242     DiffStep-   differentiation step, >0
4243 
4244 OUTPUT PARAMETERS:
4245     State   -   structure which stores algorithm state
4246 
4247 NOTES:
4248 1. algorithm uses 4-point central formula for differentiation.
4249 2. differentiation step along I-th axis is equal to DiffStep*S[I] where
4250    S[] is scaling vector which can be set by MinBLEICSetScale() call.
4251 3. we recommend you to use moderate values of  differentiation  step.  Too
4252    large step will result in too large truncation  errors, while too small
4253    step will result in too large numerical  errors.  1.0E-6  can  be  good
4254    value to start with.
4255 4. Numerical  differentiation  is   very   inefficient  -   one   gradient
4256    calculation needs 4*N function evaluations. This function will work for
4257    any N - either small (1...10), moderate (10...100) or  large  (100...).
4258    However, performance penalty will be too severe for any N's except  for
4259    small ones.
4260    We should also say that code which relies on numerical  differentiation
4261    is  less  robust and precise. CG needs exact gradient values. Imprecise
4262    gradient may slow  down  convergence, especially  on  highly  nonlinear
4263    problems.
4264    Thus  we  recommend to use this function for fast prototyping on small-
4265    dimensional problems only, and to implement analytical gradient as soon
4266    as possible.
4267 
4268   -- ALGLIB --
4269      Copyright 16.05.2011 by Bochkanov Sergey
4270 *************************************************************************/
4271 void minbleiccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minbleicstate &state, const xparams _xparams = alglib::xdefault);
4272 void minbleiccreatef(const real_1d_array &x, const double diffstep, minbleicstate &state, const xparams _xparams = alglib::xdefault);
4273 
4274 
4275 /*************************************************************************
4276 This function sets boundary constraints for BLEIC optimizer.
4277 
4278 Boundary constraints are inactive by default (after initial creation).
4279 They are preserved after algorithm restart with MinBLEICRestartFrom().
4280 
4281 NOTE: if you have box-only constraints (no  general  linear  constraints),
4282       then MinBC optimizer can be better option. It uses  special,  faster
4283       constraint activation method, which performs better on problems with
4284       multiple constraints active at the solution.
4285 
4286       On small-scale problems performance of MinBC is similar to  that  of
4287       MinBLEIC, but on large-scale ones (hundreds and thousands of  active
4288       constraints) it can be several times faster than MinBLEIC.
4289 
4290 INPUT PARAMETERS:
4291     State   -   structure stores algorithm state
4292     BndL    -   lower bounds, array[N].
4293                 If some (all) variables are unbounded, you may specify
4294                 very small number or -INF.
4295     BndU    -   upper bounds, array[N].
4296                 If some (all) variables are unbounded, you may specify
4297                 very large number or +INF.
4298 
4299 NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
4300 variable will be "frozen" at X[i]=BndL[i]=BndU[i].
4301 
4302 NOTE 2: this solver has following useful properties:
4303 * bound constraints are always satisfied exactly
4304 * function is evaluated only INSIDE area specified by  bound  constraints,
4305   even  when  numerical  differentiation is used (algorithm adjusts  nodes
4306   according to boundary constraints)
4307 
4308   -- ALGLIB --
4309      Copyright 28.11.2010 by Bochkanov Sergey
4310 *************************************************************************/
4311 void minbleicsetbc(const minbleicstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams = alglib::xdefault);
4312 
4313 
4314 /*************************************************************************
4315 This function sets linear constraints for BLEIC optimizer.
4316 
4317 Linear constraints are inactive by default (after initial creation).
4318 They are preserved after algorithm restart with MinBLEICRestartFrom().
4319 
4320 INPUT PARAMETERS:
4321     State   -   structure previously allocated with MinBLEICCreate call.
4322     C       -   linear constraints, array[K,N+1].
4323                 Each row of C represents one constraint, either equality
4324                 or inequality (see below):
4325                 * first N elements correspond to coefficients,
4326                 * last element corresponds to the right part.
4327                 All elements of C (including right part) must be finite.
4328     CT      -   type of constraints, array[K]:
4329                 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n]
4330                 * if CT[i]=0, then I-th constraint is C[i,*]*x  = C[i,n]
4331                 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n]
4332     K       -   number of equality/inequality constraints, K>=0:
4333                 * if given, only leading K elements of C/CT are used
4334                 * if not given, automatically determined from sizes of C/CT
4335 
4336 NOTE 1: linear (non-bound) constraints are satisfied only approximately:
4337 * there always exists some minor violation (about Epsilon in magnitude)
4338   due to rounding errors
4339 * numerical differentiation, if used, may  lead  to  function  evaluations
4340   outside  of the feasible  area,   because   algorithm  does  NOT  change
4341   numerical differentiation formula according to linear constraints.
4342 If you want constraints to be  satisfied  exactly, try to reformulate your
4343 problem  in  such  manner  that  all constraints will become boundary ones
4344 (this kind of constraints is always satisfied exactly, both in  the  final
4345 solution and in all intermediate points).
4346 
4347   -- ALGLIB --
4348      Copyright 28.11.2010 by Bochkanov Sergey
4349 *************************************************************************/
4350 void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams = alglib::xdefault);
4351 void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, const integer_1d_array &ct, const xparams _xparams = alglib::xdefault);
4352 
4353 
4354 /*************************************************************************
4355 This function sets stopping conditions for the optimizer.
4356 
4357 INPUT PARAMETERS:
4358     State   -   structure which stores algorithm state
4359     EpsG    -   >=0
4360                 The  subroutine  finishes  its  work   if   the  condition
4361                 |v|<EpsG is satisfied, where:
4362                 * |.| means Euclidian norm
4363                 * v - scaled gradient vector, v[i]=g[i]*s[i]
4364                 * g - gradient
4365                 * s - scaling coefficients set by MinBLEICSetScale()
4366     EpsF    -   >=0
4367                 The  subroutine  finishes  its work if on k+1-th iteration
4368                 the  condition  |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
4369                 is satisfied.
4370     EpsX    -   >=0
4371                 The subroutine finishes its work if  on  k+1-th  iteration
4372                 the condition |v|<=EpsX is fulfilled, where:
4373                 * |.| means Euclidian norm
4374                 * v - scaled step vector, v[i]=dx[i]/s[i]
4375                 * dx - step vector, dx=X(k+1)-X(k)
4376                 * s - scaling coefficients set by MinBLEICSetScale()
4377     MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
4378                 iterations is unlimited.
4379 
4380 Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
4381 to automatic stopping criterion selection.
4382 
4383 NOTE: when SetCond() called with non-zero MaxIts, BLEIC solver may perform
4384       slightly more than MaxIts iterations. I.e., MaxIts  sets  non-strict
4385       limit on iterations count.
4386 
4387   -- ALGLIB --
4388      Copyright 28.11.2010 by Bochkanov Sergey
4389 *************************************************************************/
4390 void minbleicsetcond(const minbleicstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
4391 
4392 
4393 /*************************************************************************
4394 This function sets scaling coefficients for BLEIC optimizer.
4395 
4396 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
4397 size and gradient are scaled before comparison with tolerances).  Scale of
4398 the I-th variable is a translation invariant measure of:
4399 a) "how large" the variable is
4400 b) how large the step should be to make significant changes in the function
4401 
4402 Scaling is also used by finite difference variant of the optimizer  - step
4403 along I-th axis is equal to DiffStep*S[I].
4404 
4405 In  most  optimizers  (and  in  the  BLEIC  too)  scaling is NOT a form of
4406 preconditioning. It just  affects  stopping  conditions.  You  should  set
4407 preconditioner  by  separate  call  to  one  of  the  MinBLEICSetPrec...()
4408 functions.
4409 
4410 There is a special  preconditioning  mode, however,  which  uses   scaling
4411 coefficients to form diagonal preconditioning matrix. You  can  turn  this
4412 mode on, if you want.   But  you should understand that scaling is not the
4413 same thing as preconditioning - these are two different, although  related
4414 forms of tuning solver.
4415 
4416 INPUT PARAMETERS:
4417     State   -   structure stores algorithm state
4418     S       -   array[N], non-zero scaling coefficients
4419                 S[i] may be negative, sign doesn't matter.
4420 
4421   -- ALGLIB --
4422      Copyright 14.01.2011 by Bochkanov Sergey
4423 *************************************************************************/
4424 void minbleicsetscale(const minbleicstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
4425 
4426 
4427 /*************************************************************************
4428 Modification of the preconditioner: preconditioning is turned off.
4429 
4430 INPUT PARAMETERS:
4431     State   -   structure which stores algorithm state
4432 
4433   -- ALGLIB --
4434      Copyright 13.10.2010 by Bochkanov Sergey
4435 *************************************************************************/
4436 void minbleicsetprecdefault(const minbleicstate &state, const xparams _xparams = alglib::xdefault);
4437 
4438 
4439 /*************************************************************************
4440 Modification  of  the  preconditioner:  diagonal of approximate Hessian is
4441 used.
4442 
4443 INPUT PARAMETERS:
4444     State   -   structure which stores algorithm state
4445     D       -   diagonal of the approximate Hessian, array[0..N-1],
4446                 (if larger, only leading N elements are used).
4447 
4448 NOTE 1: D[i] should be positive. Exception will be thrown otherwise.
4449 
4450 NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
4451 
4452   -- ALGLIB --
4453      Copyright 13.10.2010 by Bochkanov Sergey
4454 *************************************************************************/
4455 void minbleicsetprecdiag(const minbleicstate &state, const real_1d_array &d, const xparams _xparams = alglib::xdefault);
4456 
4457 
4458 /*************************************************************************
4459 Modification of the preconditioner: scale-based diagonal preconditioning.
4460 
4461 This preconditioning mode can be useful when you  don't  have  approximate
4462 diagonal of Hessian, but you know that your  variables  are  badly  scaled
4463 (for  example,  one  variable is in [1,10], and another in [1000,100000]),
4464 and most part of the ill-conditioning comes from different scales of vars.
4465 
4466 In this case simple  scale-based  preconditioner,  with H[i] = 1/(s[i]^2),
4467 can greatly improve convergence.
4468 
4469 IMPRTANT: you should set scale of your variables  with  MinBLEICSetScale()
4470 call  (before  or after MinBLEICSetPrecScale() call). Without knowledge of
4471 the scale of your variables scale-based preconditioner will be  just  unit
4472 matrix.
4473 
4474 INPUT PARAMETERS:
4475     State   -   structure which stores algorithm state
4476 
4477   -- ALGLIB --
4478      Copyright 13.10.2010 by Bochkanov Sergey
4479 *************************************************************************/
4480 void minbleicsetprecscale(const minbleicstate &state, const xparams _xparams = alglib::xdefault);
4481 
4482 
4483 /*************************************************************************
4484 This function turns on/off reporting.
4485 
4486 INPUT PARAMETERS:
4487     State   -   structure which stores algorithm state
4488     NeedXRep-   whether iteration reports are needed or not
4489 
4490 If NeedXRep is True, algorithm will call rep() callback function if  it is
4491 provided to MinBLEICOptimize().
4492 
4493   -- ALGLIB --
4494      Copyright 28.11.2010 by Bochkanov Sergey
4495 *************************************************************************/
4496 void minbleicsetxrep(const minbleicstate &state, const bool needxrep, const xparams _xparams = alglib::xdefault);
4497 
4498 
4499 /*************************************************************************
4500 This function sets maximum step length
4501 
4502 IMPORTANT: this feature is hard to combine with preconditioning. You can't
4503 set upper limit on step length, when you solve optimization  problem  with
4504 linear (non-boundary) constraints AND preconditioner turned on.
4505 
4506 When  non-boundary  constraints  are  present,  you  have to either a) use
4507 preconditioner, or b) use upper limit on step length.  YOU CAN'T USE BOTH!
4508 In this case algorithm will terminate with appropriate error code.
4509 
4510 INPUT PARAMETERS:
4511     State   -   structure which stores algorithm state
4512     StpMax  -   maximum step length, >=0. Set StpMax to 0.0,  if you don't
4513                 want to limit step length.
4514 
4515 Use this subroutine when you optimize target function which contains exp()
4516 or  other  fast  growing  functions,  and optimization algorithm makes too
4517 large  steps  which  lead   to overflow. This function allows us to reject
4518 steps  that  are  too  large  (and  therefore  expose  us  to the possible
4519 overflow) without actually calculating function value at the x+stp*d.
4520 
4521   -- ALGLIB --
4522      Copyright 02.04.2010 by Bochkanov Sergey
4523 *************************************************************************/
4524 void minbleicsetstpmax(const minbleicstate &state, const double stpmax, const xparams _xparams = alglib::xdefault);
4525 
4526 
4527 /*************************************************************************
4528 This function provides reverse communication interface
4529 Reverse communication interface is not documented or recommended to use.
4530 See below for functions which provide better documented API
4531 *************************************************************************/
4532 bool minbleiciteration(const minbleicstate &state, const xparams _xparams = alglib::xdefault);
4533 
4534 
4535 /*************************************************************************
4536 This family of functions is used to launcn iterations of nonlinear optimizer
4537 
4538 These functions accept following parameters:
4539     state   -   algorithm state
4540     func    -   callback which calculates function (or merit function)
4541                 value func at given point x
4542     grad    -   callback which calculates function (or merit function)
4543                 value func and gradient grad at given point x
4544     rep     -   optional callback which is called after each iteration
4545                 can be NULL
4546     ptr     -   optional pointer which is passed to func/grad/hess/jac/rep
4547                 can be NULL
4548 
4549 NOTES:
4550 
4551 1. This function has two different implementations: one which  uses  exact
4552    (analytical) user-supplied gradient,  and one which uses function value
4553    only  and  numerically  differentiates  function  in  order  to  obtain
4554    gradient.
4555 
4556    Depending  on  the  specific  function  used to create optimizer object
4557    (either  MinBLEICCreate() for analytical gradient or  MinBLEICCreateF()
4558    for numerical differentiation) you should choose appropriate variant of
4559    MinBLEICOptimize() - one  which  accepts  function  AND gradient or one
4560    which accepts function ONLY.
4561 
4562    Be careful to choose variant of MinBLEICOptimize() which corresponds to
4563    your optimization scheme! Table below lists different  combinations  of
4564    callback (function/gradient) passed to MinBLEICOptimize()  and specific
4565    function used to create optimizer.
4566 
4567 
4568                      |         USER PASSED TO MinBLEICOptimize()
4569    CREATED WITH      |  function only   |  function and gradient
4570    ------------------------------------------------------------
4571    MinBLEICCreateF() |     work                FAIL
4572    MinBLEICCreate()  |     FAIL                work
4573 
4574    Here "FAIL" denotes inappropriate combinations  of  optimizer  creation
4575    function  and  MinBLEICOptimize()  version.   Attemps   to   use   such
4576    combination (for  example,  to  create optimizer with MinBLEICCreateF()
4577    and  to  pass  gradient information to MinBLEICOptimize()) will lead to
4578    exception being thrown. Either  you  did  not pass gradient when it WAS
4579    needed or you passed gradient when it was NOT needed.
4580 
4581   -- ALGLIB --
4582      Copyright 28.11.2010 by Bochkanov Sergey
4583 
4584 *************************************************************************/
4585 void minbleicoptimize(minbleicstate &state,
4586     void (*func)(const real_1d_array &x, double &func, void *ptr),
4587     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
4588     void *ptr = NULL,
4589     const xparams _xparams = alglib::xdefault);
4590 void minbleicoptimize(minbleicstate &state,
4591     void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
4592     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
4593     void *ptr = NULL,
4594     const xparams _xparams = alglib::xdefault);
4595 
4596 
4597 /*************************************************************************
4598 This  function  activates/deactivates verification  of  the  user-supplied
4599 analytic gradient.
4600 
4601 Upon  activation  of  this  option  OptGuard  integrity  checker  performs
4602 numerical differentiation of your target function  at  the  initial  point
4603 (note: future versions may also perform check  at  the  final  point)  and
4604 compares numerical gradient with analytic one provided by you.
4605 
4606 If difference is too large, an error flag is set and optimization  session
4607 continues. After optimization session is over, you can retrieve the report
4608 which  stores  both  gradients  and  specific  components  highlighted  as
4609 suspicious by the OptGuard.
4610 
4611 The primary OptGuard report can be retrieved with minbleicoptguardresults().
4612 
4613 IMPORTANT: gradient check is a high-overhead option which  will  cost  you
4614            about 3*N additional function evaluations. In many cases it may
4615            cost as much as the rest of the optimization session.
4616 
4617            YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO
4618            CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY.
4619 
4620 NOTE: unlike previous incarnation of the gradient checking code,  OptGuard
4621       does NOT interrupt optimization even if it discovers bad gradient.
4622 
4623 INPUT PARAMETERS:
4624     State       -   structure used to store algorithm state
4625     TestStep    -   verification step used for numerical differentiation:
4626                     * TestStep=0 turns verification off
4627                     * TestStep>0 activates verification
4628                     You should carefully choose TestStep. Value  which  is
4629                     too large (so large that  function  behavior  is  non-
4630                     cubic at this scale) will lead  to  false  alarms. Too
4631                     short step will result in rounding  errors  dominating
4632                     numerical derivative.
4633 
4634                     You may use different step for different parameters by
4635                     means of setting scale with minbleicsetscale().
4636 
4637 === EXPLANATION ==========================================================
4638 
4639 In order to verify gradient algorithm performs following steps:
4640   * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
4641     where X[i] is i-th component of the initial point and S[i] is a  scale
4642     of i-th parameter
4643   * F(X) is evaluated at these trial points
4644   * we perform one more evaluation in the middle point of the interval
4645   * we  build  cubic  model using function values and derivatives at trial
4646     points and we compare its prediction with actual value in  the  middle
4647     point
4648 
4649   -- ALGLIB --
4650      Copyright 15.06.2014 by Bochkanov Sergey
4651 *************************************************************************/
4652 void minbleicoptguardgradient(const minbleicstate &state, const double teststep, const xparams _xparams = alglib::xdefault);
4653 
4654 
4655 /*************************************************************************
4656 This  function  activates/deactivates nonsmoothness monitoring  option  of
4657 the  OptGuard  integrity  checker. Smoothness  monitor  silently  observes
4658 solution process and tries to detect ill-posed problems, i.e. ones with:
4659 a) discontinuous target function (non-C0)
4660 b) nonsmooth     target function (non-C1)
4661 
4662 Smoothness monitoring does NOT interrupt optimization  even if it suspects
4663 that your problem is nonsmooth. It just sets corresponding  flags  in  the
4664 OptGuard report which can be retrieved after optimization is over.
4665 
4666 Smoothness monitoring is a moderate overhead option which often adds  less
4667 than 1% to the optimizer running time. Thus, you can use it even for large
4668 scale problems.
4669 
4670 NOTE: OptGuard does  NOT  guarantee  that  it  will  always  detect  C0/C1
4671       continuity violations.
4672 
4673       First, minor errors are hard to  catch - say, a 0.0001 difference in
4674       the model values at two sides of the gap may be due to discontinuity
4675       of the model - or simply because the model has changed.
4676 
4677       Second, C1-violations  are  especially  difficult  to  detect  in  a
4678       noninvasive way. The optimizer usually  performs  very  short  steps
4679       near the nonsmoothness, and differentiation  usually   introduces  a
4680       lot of numerical noise.  It  is  hard  to  tell  whether  some  tiny
4681       discontinuity in the slope is due to real nonsmoothness or just  due
4682       to numerical noise alone.
4683 
4684       Our top priority was to avoid false positives, so in some rare cases
4685       minor errors may went unnoticed (however, in most cases they can  be
4686       spotted with restart from different initial point).
4687 
4688 INPUT PARAMETERS:
4689     state   -   algorithm state
4690     level   -   monitoring level:
4691                 * 0 - monitoring is disabled
4692                 * 1 - noninvasive low-overhead monitoring; function values
4693                       and/or gradients are recorded, but OptGuard does not
4694                       try to perform additional evaluations  in  order  to
4695                       get more information about suspicious locations.
4696 
4697 === EXPLANATION ==========================================================
4698 
4699 One major source of headache during optimization  is  the  possibility  of
4700 the coding errors in the target function/constraints (or their gradients).
4701 Such  errors   most   often   manifest   themselves  as  discontinuity  or
4702 nonsmoothness of the target/constraints.
4703 
4704 Another frequent situation is when you try to optimize something involving
4705 lots of min() and max() operations, i.e. nonsmooth target. Although not  a
4706 coding error, it is nonsmoothness anyway - and smooth  optimizers  usually
4707 stop right after encountering nonsmoothness, well before reaching solution.
4708 
4709 OptGuard integrity checker helps you to catch such situations: it monitors
4710 function values/gradients being passed  to  the  optimizer  and  tries  to
4711 errors. Upon discovering suspicious pair of points it  raises  appropriate
4712 flag (and allows you to continue optimization). When optimization is done,
4713 you can study OptGuard result.
4714 
4715   -- ALGLIB --
4716      Copyright 21.11.2018 by Bochkanov Sergey
4717 *************************************************************************/
4718 void minbleicoptguardsmoothness(const minbleicstate &state, const ae_int_t level, const xparams _xparams = alglib::xdefault);
4719 void minbleicoptguardsmoothness(const minbleicstate &state, const xparams _xparams = alglib::xdefault);
4720 
4721 
4722 /*************************************************************************
4723 Results of OptGuard integrity check, should be called  after  optimization
4724 session is over.
4725 
4726 === PRIMARY REPORT =======================================================
4727 
4728 OptGuard performs several checks which are intended to catch common errors
4729 in the implementation of nonlinear function/gradient:
4730 * incorrect analytic gradient
4731 * discontinuous (non-C0) target functions (constraints)
4732 * nonsmooth     (non-C1) target functions (constraints)
4733 
4734 Each of these checks is activated with appropriate function:
4735 * minbleicoptguardgradient() for gradient verification
4736 * minbleicoptguardsmoothness() for C0/C1 checks
4737 
4738 Following flags are set when these errors are suspected:
4739 * rep.badgradsuspected, and additionally:
4740   * rep.badgradvidx for specific variable (gradient element) suspected
4741   * rep.badgradxbase, a point where gradient is tested
4742   * rep.badgraduser, user-provided gradient  (stored  as  2D  matrix  with
4743     single row in order to make  report  structure  compatible  with  more
4744     complex optimizers like MinNLC or MinLM)
4745   * rep.badgradnum,   reference    gradient    obtained    via   numerical
4746     differentiation (stored as  2D matrix with single row in order to make
4747     report structure compatible with more complex optimizers  like  MinNLC
4748     or MinLM)
4749 * rep.nonc0suspected
4750 * rep.nonc1suspected
4751 
4752 === ADDITIONAL REPORTS/LOGS ==============================================
4753 
4754 Several different tests are performed to catch C0/C1 errors, you can  find
4755 out specific test signaled error by looking to:
4756 * rep.nonc0test0positive, for non-C0 test #0
4757 * rep.nonc1test0positive, for non-C1 test #0
4758 * rep.nonc1test1positive, for non-C1 test #1
4759 
4760 Additional information (including line search logs)  can  be  obtained  by
4761 means of:
4762 * minbleicoptguardnonc1test0results()
4763 * minbleicoptguardnonc1test1results()
4764 which return detailed error reports, specific points where discontinuities
4765 were found, and so on.
4766 
4767 ==========================================================================
4768 
4769 INPUT PARAMETERS:
4770     state   -   algorithm state
4771 
4772 OUTPUT PARAMETERS:
4773     rep     -   generic OptGuard report;  more  detailed  reports  can  be
4774                 retrieved with other functions.
4775 
4776 NOTE: false negatives (nonsmooth problems are not identified as  nonsmooth
4777       ones) are possible although unlikely.
4778 
4779       The reason  is  that  you  need  to  make several evaluations around
4780       nonsmoothness  in  order  to  accumulate  enough  information  about
4781       function curvature. Say, if you start right from the nonsmooth point,
4782       optimizer simply won't get enough data to understand what  is  going
4783       wrong before it terminates due to abrupt changes in the  derivative.
4784       It is also  possible  that  "unlucky"  step  will  move  us  to  the
4785       termination too quickly.
4786 
4787       Our current approach is to have less than 0.1%  false  negatives  in
4788       our test examples  (measured  with  multiple  restarts  from  random
4789       points), and to have exactly 0% false positives.
4790 
4791   -- ALGLIB --
4792      Copyright 21.11.2018 by Bochkanov Sergey
4793 *************************************************************************/
4794 void minbleicoptguardresults(const minbleicstate &state, optguardreport &rep, const xparams _xparams = alglib::xdefault);
4795 
4796 
4797 /*************************************************************************
4798 Detailed results of the OptGuard integrity check for nonsmoothness test #0
4799 
4800 Nonsmoothness (non-C1) test #0 studies  function  values  (not  gradient!)
4801 obtained during line searches and monitors  behavior  of  the  directional
4802 derivative estimate.
4803 
4804 This test is less powerful than test #1, but it does  not  depend  on  the
4805 gradient values and thus it is more robust against artifacts introduced by
4806 numerical differentiation.
4807 
4808 Two reports are returned:
4809 * a "strongest" one, corresponding  to  line   search  which  had  highest
4810   value of the nonsmoothness indicator
4811 * a "longest" one, corresponding to line search which  had  more  function
4812   evaluations, and thus is more detailed
4813 
4814 In both cases following fields are returned:
4815 
4816 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
4817   did not notice anything (in the latter cases fields below are empty).
4818 * x0[], d[] - arrays of length N which store initial point  and  direction
4819   for line search (d[] can be normalized, but does not have to)
4820 * stp[], f[] - arrays of length CNT which store step lengths and  function
4821   values at these points; f[i] is evaluated in x0+stp[i]*d.
4822 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
4823   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
4824   with  most  likely  position  of  the  violation  between  stpidxa+1 and
4825   stpidxa+2.
4826 
4827 ==========================================================================
4828 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
4829 =                   see where C1 continuity is violated.
4830 ==========================================================================
4831 
4832 INPUT PARAMETERS:
4833     state   -   algorithm state
4834 
4835 OUTPUT PARAMETERS:
4836     strrep  -   C1 test #0 "strong" report
4837     lngrep  -   C1 test #0 "long" report
4838 
4839   -- ALGLIB --
4840      Copyright 21.11.2018 by Bochkanov Sergey
4841 *************************************************************************/
4842 void minbleicoptguardnonc1test0results(const minbleicstate &state, optguardnonc1test0report &strrep, optguardnonc1test0report &lngrep, const xparams _xparams = alglib::xdefault);
4843 
4844 
4845 /*************************************************************************
4846 Detailed results of the OptGuard integrity check for nonsmoothness test #1
4847 
4848 Nonsmoothness (non-C1)  test  #1  studies  individual  components  of  the
4849 gradient computed during line search.
4850 
4851 When precise analytic gradient is provided this test is more powerful than
4852 test #0  which  works  with  function  values  and  ignores  user-provided
4853 gradient.  However,  test  #0  becomes  more   powerful   when   numerical
4854 differentiation is employed (in such cases test #1 detects  higher  levels
4855 of numerical noise and becomes too conservative).
4856 
4857 This test also tells specific components of the gradient which violate  C1
4858 continuity, which makes it more informative than #0, which just tells that
4859 continuity is violated.
4860 
4861 Two reports are returned:
4862 * a "strongest" one, corresponding  to  line   search  which  had  highest
4863   value of the nonsmoothness indicator
4864 * a "longest" one, corresponding to line search which  had  more  function
4865   evaluations, and thus is more detailed
4866 
4867 In both cases following fields are returned:
4868 
4869 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
4870   did not notice anything (in the latter cases fields below are empty).
4871 * vidx - is an index of the variable in [0,N) with nonsmooth derivative
4872 * x0[], d[] - arrays of length N which store initial point  and  direction
4873   for line search (d[] can be normalized, but does not have to)
4874 * stp[], g[] - arrays of length CNT which store step lengths and  gradient
4875   values at these points; g[i] is evaluated in  x0+stp[i]*d  and  contains
4876   vidx-th component of the gradient.
4877 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
4878   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
4879   with  most  likely  position  of  the  violation  between  stpidxa+1 and
4880   stpidxa+2.
4881 
4882 ==========================================================================
4883 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
4884 =                   see where C1 continuity is violated.
4885 ==========================================================================
4886 
4887 INPUT PARAMETERS:
4888     state   -   algorithm state
4889 
4890 OUTPUT PARAMETERS:
4891     strrep  -   C1 test #1 "strong" report
4892     lngrep  -   C1 test #1 "long" report
4893 
4894   -- ALGLIB --
4895      Copyright 21.11.2018 by Bochkanov Sergey
4896 *************************************************************************/
4897 void minbleicoptguardnonc1test1results(const minbleicstate &state, optguardnonc1test1report &strrep, optguardnonc1test1report &lngrep, const xparams _xparams = alglib::xdefault);
4898 
4899 
4900 /*************************************************************************
4901 BLEIC results
4902 
4903 INPUT PARAMETERS:
4904     State   -   algorithm state
4905 
4906 OUTPUT PARAMETERS:
4907     X       -   array[0..N-1], solution
4908     Rep     -   optimization report. You should check Rep.TerminationType
4909                 in  order  to  distinguish  successful  termination  from
4910                 unsuccessful one:
4911                 * -8    internal integrity control  detected  infinite or
4912                         NAN   values   in   function/gradient.   Abnormal
4913                         termination signalled.
4914                 * -3   inconsistent constraints. Feasible point is
4915                        either nonexistent or too hard to find. Try to
4916                        restart optimizer with better initial approximation
4917                 *  1   relative function improvement is no more than EpsF.
4918                 *  2   scaled step is no more than EpsX.
4919                 *  4   scaled gradient norm is no more than EpsG.
4920                 *  5   MaxIts steps was taken
4921                 *  8   terminated by user who called minbleicrequesttermination().
4922                        X contains point which was "current accepted"  when
4923                        termination request was submitted.
4924                 More information about fields of this  structure  can  be
4925                 found in the comments on MinBLEICReport datatype.
4926 
4927   -- ALGLIB --
4928      Copyright 28.11.2010 by Bochkanov Sergey
4929 *************************************************************************/
4930 void minbleicresults(const minbleicstate &state, real_1d_array &x, minbleicreport &rep, const xparams _xparams = alglib::xdefault);
4931 
4932 
4933 /*************************************************************************
4934 BLEIC results
4935 
4936 Buffered implementation of MinBLEICResults() which uses pre-allocated buffer
4937 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
4938 intended to be used in the inner cycles of performance critical algorithms
4939 where array reallocation penalty is too large to be ignored.
4940 
4941   -- ALGLIB --
4942      Copyright 28.11.2010 by Bochkanov Sergey
4943 *************************************************************************/
4944 void minbleicresultsbuf(const minbleicstate &state, real_1d_array &x, minbleicreport &rep, const xparams _xparams = alglib::xdefault);
4945 
4946 
4947 /*************************************************************************
4948 This subroutine restarts algorithm from new point.
4949 All optimization parameters (including constraints) are left unchanged.
4950 
4951 This  function  allows  to  solve multiple  optimization  problems  (which
4952 must have  same number of dimensions) without object reallocation penalty.
4953 
4954 INPUT PARAMETERS:
4955     State   -   structure previously allocated with MinBLEICCreate call.
4956     X       -   new starting point.
4957 
4958   -- ALGLIB --
4959      Copyright 28.11.2010 by Bochkanov Sergey
4960 *************************************************************************/
4961 void minbleicrestartfrom(const minbleicstate &state, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
4962 
4963 
4964 /*************************************************************************
4965 This subroutine submits request for termination of running  optimizer.  It
4966 should be called from user-supplied callback when user decides that it  is
4967 time to "smoothly" terminate optimization process.  As  result,  optimizer
4968 stops at point which was "current accepted" when termination  request  was
4969 submitted and returns error code 8 (successful termination).
4970 
4971 INPUT PARAMETERS:
4972     State   -   optimizer structure
4973 
4974 NOTE: after  request  for  termination  optimizer  may   perform   several
4975       additional calls to user-supplied callbacks. It does  NOT  guarantee
4976       to stop immediately - it just guarantees that these additional calls
4977       will be discarded later.
4978 
4979 NOTE: calling this function on optimizer which is NOT running will have no
4980       effect.
4981 
4982 NOTE: multiple calls to this function are possible. First call is counted,
4983       subsequent calls are silently ignored.
4984 
4985   -- ALGLIB --
4986      Copyright 08.10.2014 by Bochkanov Sergey
4987 *************************************************************************/
4988 void minbleicrequesttermination(const minbleicstate &state, const xparams _xparams = alglib::xdefault);
4989 #endif
4990 
4991 #if defined(AE_COMPILE_QPBLEICSOLVER) || !defined(AE_PARTIAL_BUILD)
4992 
4993 #endif
4994 
4995 #if defined(AE_COMPILE_VIPMSOLVER) || !defined(AE_PARTIAL_BUILD)
4996 
4997 #endif
4998 
4999 #if defined(AE_COMPILE_MINQP) || !defined(AE_PARTIAL_BUILD)
5000 /*************************************************************************
5001                     CONSTRAINED QUADRATIC PROGRAMMING
5002 
5003 The subroutine creates QP optimizer. After initial creation,  it  contains
5004 default optimization problem with zero quadratic and linear terms  and  no
5005 constraints.
5006 
5007 In order to actually solve something you should:
5008 * set cost vector with minqpsetlinearterm()
5009 * set variable bounds with minqpsetbc() or minqpsetbcall()
5010 * specify constraint matrix with one of the following functions:
5011   * modern API:
5012     * minqpsetlc2()       for sparse two-sided constraints AL <= A*x <= AU
5013     * minqpsetlc2dense()  for dense  two-sided constraints AL <= A*x <= AU
5014     * minqpsetlc2mixed()  for mixed  two-sided constraints AL <= A*x <= AU
5015     * minqpaddlc2dense()  to add one dense row to the dense constraint submatrix
5016     * minqpaddlc2()       to add one sparse row to the sparse constraint submatrix
5017     * minqpaddlc2sparsefromdense() to add one sparse row (passed as a dense array) to the sparse constraint submatrix
5018   * legacy API:
5019     * minqpsetlc()        for dense one-sided equality/inequality constraints
5020     * minqpsetlcsparse()  for sparse one-sided equality/inequality constraints
5021     * minqpsetlcmixed()   for mixed dense/sparse one-sided equality/inequality constraints
5022 * choose appropriate QP solver and set it  and  its stopping  criteria  by
5023   means of minqpsetalgo??????() function
5024 * call minqpoptimize() to run the solver and  minqpresults()  to  get  the
5025   solution vector and additional information.
5026 
5027 Following solvers are recommended for convex and semidefinite problems:
5028 * QuickQP for dense problems with box-only constraints (or no constraints
5029   at all)
5030 * DENSE-IPM-QP for  convex  or  semidefinite  problems  with   medium  (up
5031   to several thousands) variable count, dense/sparse  quadratic  term  and
5032   any number  (up  to  many  thousands)  of  dense/sparse  general  linear
5033   constraints
5034 * SPARSE-IPM-QP for convex  or  semidefinite  problems  with   large (many
5035   thousands) variable count, sparse quadratic term AND linear constraints.
5036 
5037 If your problem happens to be nonconvex,  but  either  (a) is  effectively
5038 convexified under constraints,  or  (b)  has  unique  solution  even  with
5039 nonconvex target, then you can use:
5040 * QuickQP for dense nonconvex problems with box-only constraints
5041 * DENSE-AUL-QP  for   dense   nonconvex   problems  which  are effectively
5042   convexified under constraints with up to several thousands of  variables
5043   and any (small or large) number of general linear constraints
5044 * QP-BLEIC for dense/sparse problems with small (up to  several  hundreds)
5045   number of general linear  constraints  and  arbitrarily  large  variable
5046   count.
5047 
5048 INPUT PARAMETERS:
5049     N       -   problem size
5050 
5051 OUTPUT PARAMETERS:
5052     State   -   optimizer with zero quadratic/linear terms
5053                 and no constraints
5054 
5055   -- ALGLIB --
5056      Copyright 11.01.2011 by Bochkanov Sergey
5057 *************************************************************************/
5058 void minqpcreate(const ae_int_t n, minqpstate &state, const xparams _xparams = alglib::xdefault);
5059 
5060 
5061 /*************************************************************************
5062 This function sets linear term for QP solver.
5063 
5064 By default, linear term is zero.
5065 
5066 INPUT PARAMETERS:
5067     State   -   structure which stores algorithm state
5068     B       -   linear term, array[N].
5069 
5070   -- ALGLIB --
5071      Copyright 11.01.2011 by Bochkanov Sergey
5072 *************************************************************************/
5073 void minqpsetlinearterm(const minqpstate &state, const real_1d_array &b, const xparams _xparams = alglib::xdefault);
5074 
5075 
5076 /*************************************************************************
5077 This  function  sets  dense  quadratic  term  for  QP solver. By  default,
5078 quadratic term is zero.
5079 
5080 IMPORTANT:
5081 
5082 This solver minimizes following  function:
5083     f(x) = 0.5*x'*A*x + b'*x.
5084 Note that quadratic term has 0.5 before it. So if  you  want  to  minimize
5085     f(x) = x^2 + x
5086 you should rewrite your problem as follows:
5087     f(x) = 0.5*(2*x^2) + x
5088 and your matrix A will be equal to [[2.0]], not to [[1.0]]
5089 
5090 INPUT PARAMETERS:
5091     State   -   structure which stores algorithm state
5092     A       -   matrix, array[N,N]
5093     IsUpper -   (optional) storage type:
5094                 * if True, symmetric matrix  A  is  given  by  its  upper
5095                   triangle, and the lower triangle isn't used
5096                 * if False, symmetric matrix  A  is  given  by  its lower
5097                   triangle, and the upper triangle isn't used
5098                 * if not given, both lower and upper  triangles  must  be
5099                   filled.
5100 
5101   -- ALGLIB --
5102      Copyright 11.01.2011 by Bochkanov Sergey
5103 *************************************************************************/
5104 void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a, const bool isupper, const xparams _xparams = alglib::xdefault);
5105 void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a, const xparams _xparams = alglib::xdefault);
5106 
5107 
5108 /*************************************************************************
5109 This  function  sets  sparse  quadratic  term  for  QP solver. By default,
5110 quadratic  term  is  zero.  This  function  overrides  previous  calls  to
5111 minqpsetquadraticterm() or minqpsetquadratictermsparse().
5112 
5113 NOTE: dense solvers like DENSE-AUL-QP or DENSE-IPM-QP  will  convert  this
5114       matrix to dense storage anyway.
5115 
5116 IMPORTANT:
5117 
5118 This solver minimizes following  function:
5119     f(x) = 0.5*x'*A*x + b'*x.
5120 Note that quadratic term has 0.5 before it. So if  you  want  to  minimize
5121     f(x) = x^2 + x
5122 you should rewrite your problem as follows:
5123     f(x) = 0.5*(2*x^2) + x
5124 and your matrix A will be equal to [[2.0]], not to [[1.0]]
5125 
5126 INPUT PARAMETERS:
5127     State   -   structure which stores algorithm state
5128     A       -   matrix, array[N,N]
5129     IsUpper -   (optional) storage type:
5130                 * if True, symmetric matrix  A  is  given  by  its  upper
5131                   triangle, and the lower triangle isn't used
5132                 * if False, symmetric matrix  A  is  given  by  its lower
5133                   triangle, and the upper triangle isn't used
5134                 * if not given, both lower and upper  triangles  must  be
5135                   filled.
5136 
5137   -- ALGLIB --
5138      Copyright 11.01.2011 by Bochkanov Sergey
5139 *************************************************************************/
5140 void minqpsetquadratictermsparse(const minqpstate &state, const sparsematrix &a, const bool isupper, const xparams _xparams = alglib::xdefault);
5141 
5142 
5143 /*************************************************************************
5144 This function sets starting point for QP solver. It is useful to have good
5145 initial approximation to the solution, because it will increase  speed  of
5146 convergence and identification of active constraints.
5147 
5148 NOTE: interior point solvers ignore initial point provided by user.
5149 
5150 INPUT PARAMETERS:
5151     State   -   structure which stores algorithm state
5152     X       -   starting point, array[N].
5153 
5154   -- ALGLIB --
5155      Copyright 11.01.2011 by Bochkanov Sergey
5156 *************************************************************************/
5157 void minqpsetstartingpoint(const minqpstate &state, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
5158 
5159 
5160 /*************************************************************************
5161 This  function sets origin for QP solver. By default, following QP program
5162 is solved:
5163 
5164     min(0.5*x'*A*x+b'*x)
5165 
5166 This function allows to solve different problem:
5167 
5168     min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin))
5169 
5170 Specification of non-zero origin affects function being minimized, but not
5171 constraints. Box and  linear  constraints  are  still  calculated  without
5172 origin.
5173 
5174 INPUT PARAMETERS:
5175     State   -   structure which stores algorithm state
5176     XOrigin -   origin, array[N].
5177 
5178   -- ALGLIB --
5179      Copyright 11.01.2011 by Bochkanov Sergey
5180 *************************************************************************/
5181 void minqpsetorigin(const minqpstate &state, const real_1d_array &xorigin, const xparams _xparams = alglib::xdefault);
5182 
5183 
5184 /*************************************************************************
5185 This function sets scaling coefficients.
5186 
5187 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
5188 size and gradient are scaled before comparison  with  tolerances)  and  as
5189 preconditioner.
5190 
5191 Scale of the I-th variable is a translation invariant measure of:
5192 a) "how large" the variable is
5193 b) how large the step should be to make significant changes in the
5194    function
5195 
5196 If you do not know how to choose scales of your variables, you can:
5197 * read www.alglib.net/optimization/scaling.php article
5198 * use minqpsetscaleautodiag(), which calculates scale  using  diagonal  of
5199   the  quadratic  term:  S  is  set to 1/sqrt(diag(A)), which works well
5200   sometimes.
5201 
5202 INPUT PARAMETERS:
5203     State   -   structure stores algorithm state
5204     S       -   array[N], non-zero scaling coefficients
5205                 S[i] may be negative, sign doesn't matter.
5206 
5207   -- ALGLIB --
5208      Copyright 14.01.2011 by Bochkanov Sergey
5209 *************************************************************************/
5210 void minqpsetscale(const minqpstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
5211 
5212 
5213 /*************************************************************************
5214 This function sets automatic evaluation of variable scaling.
5215 
5216 IMPORTANT: this function works only for  matrices  with positive  diagonal
5217            elements! Zero or negative elements will  result  in  -9  error
5218            code  being  returned.  Specify  scale  vector  manually   with
5219            minqpsetscale() in such cases.
5220 
5221 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
5222 size and gradient are scaled before comparison  with  tolerances)  and  as
5223 preconditioner.
5224 
5225 The  best  way  to  set  scaling  is  to manually specify variable scales.
5226 However, sometimes you just need quick-and-dirty solution  -  either  when
5227 you perform fast prototyping, or when you know your problem well  and  you
5228 are 100% sure that this quick solution is robust enough in your case.
5229 
5230 One such solution is to evaluate scale of I-th variable as 1/Sqrt(A[i,i]),
5231 where A[i,i] is an I-th diagonal element of the quadratic term.
5232 
5233 Such approach works well sometimes, but you have to be careful here.
5234 
5235 INPUT PARAMETERS:
5236     State   -   structure stores algorithm state
5237 
5238   -- ALGLIB --
5239      Copyright 26.12.2017 by Bochkanov Sergey
5240 *************************************************************************/
5241 void minqpsetscaleautodiag(const minqpstate &state, const xparams _xparams = alglib::xdefault);
5242 
5243 
5244 /*************************************************************************
5245 This function tells solver to use BLEIC-based algorithm and sets  stopping
5246 criteria for the algorithm.
5247 
5248 This algorithm is intended for large-scale  problems,  possibly nonconvex,
5249 with small number of general linear constraints. Feasible initial point is
5250 essential for good performance.
5251 
5252 IMPORTANT: when DENSE-IPM (or DENSE-AUL for  nonconvex  problems)  solvers
5253            are applicable, their performance is much better than  that  of
5254            BLEIC-QP.
5255            We recommend  you to use BLEIC only when other solvers can  not
5256            be used.
5257 
5258 ALGORITHM FEATURES:
5259 
5260 * supports dense and sparse QP problems
5261 * supports box and general linear equality/inequality constraints
5262 * can solve all types of problems  (convex,  semidefinite,  nonconvex)  as
5263   long as they are bounded from below under constraints.
5264   Say, it is possible to solve "min{-x^2} subject to -1<=x<=+1".
5265   Of course, global  minimum  is found only  for  positive  definite   and
5266   semidefinite  problems.  As  for indefinite ones - only local minimum is
5267   found.
5268 
5269 ALGORITHM OUTLINE:
5270 
5271 * BLEIC-QP solver is just a driver function for MinBLEIC solver; it solves
5272   quadratic  programming   problem   as   general   linearly   constrained
5273   optimization problem, which is solved by means of BLEIC solver  (part of
5274   ALGLIB, active set method).
5275 
5276 ALGORITHM LIMITATIONS:
5277 * This algorithm is inefficient on  problems with hundreds  and  thousands
5278   of general inequality constraints and infeasible initial point.  Initial
5279   feasibility detection stage may take too long on such constraint sets.
5280   Consider using DENSE-IPM or DENSE-AUL instead.
5281 * unlike QuickQP solver, this algorithm does not perform Newton steps  and
5282   does not use Level 3 BLAS. Being general-purpose active set  method,  it
5283   can activate constraints only one-by-one. Thus, its performance is lower
5284   than that of QuickQP.
5285 * its precision is also a bit  inferior  to  that  of   QuickQP.  BLEIC-QP
5286   performs only LBFGS steps (no Newton steps), which are good at detecting
5287   neighborhood of the solution, buy needs many iterations to find solution
5288   with more than 6 digits of precision.
5289 
5290 INPUT PARAMETERS:
5291     State   -   structure which stores algorithm state
5292     EpsG    -   >=0
5293                 The  subroutine  finishes  its  work   if   the  condition
5294                 |v|<EpsG is satisfied, where:
5295                 * |.| means Euclidian norm
5296                 * v - scaled constrained gradient vector, v[i]=g[i]*s[i]
5297                 * g - gradient
5298                 * s - scaling coefficients set by MinQPSetScale()
5299     EpsF    -   >=0
5300                 The  subroutine  finishes its work if exploratory steepest
5301                 descent  step  on  k+1-th iteration  satisfies   following
5302                 condition:  |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
5303     EpsX    -   >=0
5304                 The  subroutine  finishes its work if exploratory steepest
5305                 descent  step  on  k+1-th iteration  satisfies   following
5306                 condition:
5307                 * |.| means Euclidian norm
5308                 * v - scaled step vector, v[i]=dx[i]/s[i]
5309                 * dx - step vector, dx=X(k+1)-X(k)
5310                 * s - scaling coefficients set by MinQPSetScale()
5311     MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
5312                 iterations is unlimited. NOTE: this  algorithm uses  LBFGS
5313                 iterations,  which  are  relatively  cheap,  but   improve
5314                 function value only a bit. So you will need many iterations
5315                 to converge - from 0.1*N to 10*N, depending  on  problem's
5316                 condition number.
5317 
5318 IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS  ALGORITHM
5319 BECAUSE ITS STOPPING CRITERIA ARE SCALE-DEPENDENT!
5320 
5321 Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
5322 to automatic stopping criterion selection (presently it is  small    step
5323 length, but it may change in the future versions of ALGLIB).
5324 
5325   -- ALGLIB --
5326      Copyright 11.01.2011 by Bochkanov Sergey
5327 *************************************************************************/
5328 void minqpsetalgobleic(const minqpstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
5329 
5330 
5331 /*************************************************************************
5332 This function tells QP solver to use DENSE-AUL algorithm and sets stopping
5333 criteria for the algorithm.
5334 
5335 This  algorithm  is  intended  for  non-convex problems with moderate  (up
5336 to several thousands) variable count and arbitrary number  of  constraints
5337 which are either (a) effectively convexified under constraints or (b) have
5338 unique solution even with nonconvex target.
5339 
5340 IMPORTANT: when DENSE-IPM solver is applicable, its performance is usually
5341            much better than that of DENSE-AUL.
5342            We recommend  you to use DENSE-AUL only when other solvers  can
5343            not be used.
5344 
5345 ALGORITHM FEATURES:
5346 
5347 * supports  box  and  dense/sparse  general   linear   equality/inequality
5348   constraints
5349 * convergence is theoretically proved for positive-definite  (convex)   QP
5350   problems. Semidefinite and non-convex problems can be solved as long  as
5351   they  are   bounded  from  below  under  constraints,  although  without
5352   theoretical guarantees.
5353 
5354 ALGORITHM OUTLINE:
5355 
5356 * this  algorithm   is   an   augmented   Lagrangian   method  with  dense
5357   preconditioner (hence  its  name).
5358 * it performs several outer iterations in order to refine  values  of  the
5359   Lagrange multipliers. Single outer  iteration  is  a  solution  of  some
5360   unconstrained optimization problem: first  it  performs  dense  Cholesky
5361   factorization of the Hessian in order to build preconditioner  (adaptive
5362   regularization is applied to enforce positive  definiteness),  and  then
5363   it uses L-BFGS optimizer to solve optimization problem.
5364 * typically you need about 5-10 outer iterations to converge to solution
5365 
5366 ALGORITHM LIMITATIONS:
5367 
5368 * because dense Cholesky driver is used, this algorithm has O(N^2)  memory
5369   requirements and O(OuterIterations*N^3) minimum running time.  From  the
5370   practical  point  of  view,  it  limits  its  applicability  by  several
5371   thousands of variables.
5372   From  the  other  side,  variables  count  is  the most limiting factor,
5373   and dependence on constraint count is  much  more  lower. Assuming  that
5374   constraint matrix is sparse, it may handle tens of thousands  of general
5375   linear constraints.
5376 
5377 INPUT PARAMETERS:
5378     State   -   structure which stores algorithm state
5379     EpsX    -   >=0, stopping criteria for inner optimizer.
5380                 Inner  iterations  are  stopped  when  step  length  (with
5381                 variable scaling being applied) is less than EpsX.
5382                 See  minqpsetscale()  for  more  information  on  variable
5383                 scaling.
5384     Rho     -   penalty coefficient, Rho>0:
5385                 * large enough  that  algorithm  converges  with   desired
5386                   precision.
5387                 * not TOO large to prevent ill-conditioning
5388                 * recommended values are 100, 1000 or 10000
5389     ItsCnt  -   number of outer iterations:
5390                 * recommended values: 10-15 (although  in  most  cases  it
5391                   converges within 5 iterations, you may need a  few  more
5392                   to be sure).
5393                 * ItsCnt=0 means that small number of outer iterations  is
5394                   automatically chosen (10 iterations in current version).
5395                 * ItsCnt=1 means that AUL algorithm performs just as usual
5396                   penalty method.
5397                 * ItsCnt>1 means that  AUL  algorithm  performs  specified
5398                   number of outer iterations
5399 
5400 IT IS VERY IMPORTANT TO CALL minqpsetscale() WHEN YOU USE THIS  ALGORITHM
5401 BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING CRITERIA ARE SCALE-DEPENDENT!
5402 
5403 NOTE: Passing  EpsX=0  will  lead  to  automatic  step  length  selection
5404       (specific step length chosen may change in the future  versions  of
5405       ALGLIB, so it is better to specify step length explicitly).
5406 
5407   -- ALGLIB --
5408      Copyright 20.08.2016 by Bochkanov Sergey
5409 *************************************************************************/
5410 void minqpsetalgodenseaul(const minqpstate &state, const double epsx, const double rho, const ae_int_t itscnt, const xparams _xparams = alglib::xdefault);
5411 
5412 
5413 /*************************************************************************
5414 This function tells QP solver to  use  DENSE-IPM  QP  algorithm  and  sets
5415 stopping criteria for the algorithm.
5416 
5417 This  algorithm  is  intended  for convex and semidefinite  problems  with
5418 moderate (up to several thousands) variable count and arbitrary number  of
5419 constraints.
5420 
5421 IMPORTANT: this algorithm won't work for nonconvex problems, use DENSE-AUL
5422            or BLEIC-QP instead. If you try to  run  DENSE-IPM  on  problem
5423            with  indefinite  matrix  (matrix having  at least one negative
5424            eigenvalue) then depending on circumstances it may  either  (a)
5425            stall at some  arbitrary  point,  or  (b)  throw  exception  on
5426            failure of Cholesky decomposition.
5427 
5428 ALGORITHM FEATURES:
5429 
5430 * supports  box  and  dense/sparse  general   linear   equality/inequality
5431   constraints
5432 
5433 ALGORITHM OUTLINE:
5434 
5435 * this  algorithm  is  our implementation  of  interior  point  method  as
5436   formulated by  R.J.Vanderbei, with minor modifications to the  algorithm
5437   (damped Newton directions are extensively used)
5438 * like all interior point methods, this algorithm  tends  to  converge  in
5439   roughly same number of iterations (between 15 and 50) independently from
5440   the problem dimensionality
5441 
5442 ALGORITHM LIMITATIONS:
5443 
5444 * because dense Cholesky driver is used, for  N-dimensional  problem  with
5445   M dense constaints this algorithm has O(N^2+N*M) memory requirements and
5446   O(N^3+N*M^2) running time.
5447   Having sparse constraints with Z nonzeros per row  relaxes  storage  and
5448   running time down to O(N^2+M*Z) and O(N^3+N*Z^2)
5449   From the practical  point  of  view,  it  limits  its  applicability  by
5450   several thousands of variables.
5451   From  the  other  side,  variables  count  is  the most limiting factor,
5452   and dependence on constraint count is  much  more  lower. Assuming  that
5453   constraint matrix is sparse, it may handle tens of thousands  of general
5454   linear constraints.
5455 
5456 INPUT PARAMETERS:
5457     State   -   structure which stores algorithm state
5458     Eps     -   >=0, stopping criteria. The algorithm stops  when   primal
5459                 and dual infeasiblities as well as complementarity gap are
5460                 less than Eps.
5461 
5462 IT IS VERY IMPORTANT TO CALL minqpsetscale() WHEN YOU USE THIS  ALGORITHM
5463 BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING CRITERIA ARE SCALE-DEPENDENT!
5464 
5465 NOTE: Passing EpsX=0 will lead to automatic selection of small epsilon.
5466 
5467 ===== TRACING IPM SOLVER =================================================
5468 
5469 IPM solver supports advanced tracing capabilities. You can trace algorithm
5470 output by specifying following trace symbols (case-insensitive)  by  means
5471 of trace_file() call:
5472 * 'IPM'         - for basic trace of algorithm  steps and decisions.  Only
5473                   short scalars (function values and deltas) are  printed.
5474                   N-dimensional quantities like search directions are  NOT
5475                   printed.
5476 * 'IPM.DETAILED'- for output of points being visited and search directions
5477                   This  symbol  also  implicitly  defines  'IPM'. You  can
5478                   control output format by additionally specifying:
5479                   * nothing     to output in  6-digit exponential format
5480                   * 'PREC.E15'  to output in 15-digit exponential format
5481                   * 'PREC.F6'   to output in  6-digit fixed-point format
5482 
5483 By default trace is disabled and adds  no  overhead  to  the  optimization
5484 process. However, specifying any of the symbols adds some  formatting  and
5485 output-related overhead.
5486 
5487 You may specify multiple symbols by separating them with commas:
5488 >
5489 > alglib::trace_file("IPM,PREC.F6", "path/to/trace.log")
5490 >
5491 
5492   -- ALGLIB --
5493      Copyright 01.11.2019 by Bochkanov Sergey
5494 *************************************************************************/
5495 void minqpsetalgodenseipm(const minqpstate &state, const double eps, const xparams _xparams = alglib::xdefault);
5496 
5497 
5498 /*************************************************************************
5499 This function tells QP solver to  use  SPARSE-IPM  QP algorithm  and  sets
5500 stopping criteria for the algorithm.
5501 
5502 This  algorithm  is  intended  for convex and semidefinite  problems  with
5503 large  variable  and  constraint  count  and  sparse  quadratic  term  and
5504 constraints. It is possible to have  some  limited  set  of  dense  linear
5505 constraints - they will be handled separately by dense BLAS - but the more
5506 dense constraints you have, the more time solver needs.
5507 
5508 IMPORTANT: internally this solver performs large  and  sparse  (N+M)x(N+M)
5509            triangular factorization. So it expects both quadratic term and
5510            constraints to be highly sparse. However, its  running  time is
5511            influenced by BOTH fill factor and sparsity pattern.
5512 
5513            Generally we expect that no more than few nonzero  elements per
5514            row are present. However different sparsity patterns may result
5515            in completely different running  times  even  given  same  fill
5516            factor.
5517 
5518            In many cases this algorithm outperforms DENSE-IPM by order  of
5519            magnitude. However, in some cases you may  get  better  results
5520            with DENSE-IPM even when solving sparse task.
5521 
5522 IMPORTANT: this algorithm won't work for nonconvex problems, use DENSE-AUL
5523            or BLEIC-QP instead. If you try to  run  DENSE-IPM  on  problem
5524            with  indefinite  matrix  (matrix having  at least one negative
5525            eigenvalue) then depending on circumstances it may  either  (a)
5526            stall at some  arbitrary  point,  or  (b)  throw  exception  on
5527            failure of Cholesky decomposition.
5528 
5529 ALGORITHM FEATURES:
5530 
5531 * supports  box  and  dense/sparse  general   linear   equality/inequality
5532   constraints
5533 * specializes on large-scale sparse problems
5534 
5535 ALGORITHM OUTLINE:
5536 
5537 * this  algorithm  is  our implementation  of  interior  point  method  as
5538   formulated by  R.J.Vanderbei, with minor modifications to the  algorithm
5539   (damped Newton directions are extensively used)
5540 * like all interior point methods, this algorithm  tends  to  converge  in
5541   roughly same number of iterations (between 15 and 50) independently from
5542   the problem dimensionality
5543 
5544 ALGORITHM LIMITATIONS:
5545 
5546 * this algorithm may handle moderate number  of dense constraints, usually
5547   no more than a thousand of dense ones without losing its efficiency.
5548 
5549 INPUT PARAMETERS:
5550     State   -   structure which stores algorithm state
5551     Eps     -   >=0, stopping criteria. The algorithm stops  when   primal
5552                 and dual infeasiblities as well as complementarity gap are
5553                 less than Eps.
5554 
5555 IT IS VERY IMPORTANT TO CALL minqpsetscale() WHEN YOU USE THIS  ALGORITHM
5556 BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING CRITERIA ARE SCALE-DEPENDENT!
5557 
5558 NOTE: Passing EpsX=0 will lead to automatic selection of small epsilon.
5559 
5560 ===== TRACING IPM SOLVER =================================================
5561 
5562 IPM solver supports advanced tracing capabilities. You can trace algorithm
5563 output by specifying following trace symbols (case-insensitive)  by  means
5564 of trace_file() call:
5565 * 'IPM'         - for basic trace of algorithm  steps and decisions.  Only
5566                   short scalars (function values and deltas) are  printed.
5567                   N-dimensional quantities like search directions are  NOT
5568                   printed.
5569 * 'IPM.DETAILED'- for output of points being visited and search directions
5570                   This  symbol  also  implicitly  defines  'IPM'. You  can
5571                   control output format by additionally specifying:
5572                   * nothing     to output in  6-digit exponential format
5573                   * 'PREC.E15'  to output in 15-digit exponential format
5574                   * 'PREC.F6'   to output in  6-digit fixed-point format
5575 
5576 By default trace is disabled and adds  no  overhead  to  the  optimization
5577 process. However, specifying any of the symbols adds some  formatting  and
5578 output-related overhead.
5579 
5580 You may specify multiple symbols by separating them with commas:
5581 >
5582 > alglib::trace_file("IPM,PREC.F6", "path/to/trace.log")
5583 >
5584 
5585   -- ALGLIB --
5586      Copyright 01.11.2019 by Bochkanov Sergey
5587 *************************************************************************/
5588 void minqpsetalgosparseipm(const minqpstate &state, const double eps, const xparams _xparams = alglib::xdefault);
5589 
5590 
5591 /*************************************************************************
5592 This function tells solver to use QuickQP  algorithm:  special  extra-fast
5593 algorithm for problems with box-only constrants. It may  solve  non-convex
5594 problems as long as they are bounded from below under constraints.
5595 
5596 ALGORITHM FEATURES:
5597 * several times faster than DENSE-IPM when running on box-only problem
5598 * utilizes accelerated methods for activation of constraints.
5599 * supports dense and sparse QP problems
5600 * supports ONLY box constraints; general linear constraints are NOT
5601   supported by this solver
5602 * can solve all types of problems  (convex,  semidefinite,  nonconvex)  as
5603   long as they are bounded from below under constraints.
5604   Say, it is possible to solve "min{-x^2} subject to -1<=x<=+1".
5605   In convex/semidefinite case global minimum  is  returned,  in  nonconvex
5606   case - algorithm returns one of the local minimums.
5607 
5608 ALGORITHM OUTLINE:
5609 
5610 * algorithm  performs  two kinds of iterations: constrained CG  iterations
5611   and constrained Newton iterations
5612 * initially it performs small number of constrained CG  iterations,  which
5613   can efficiently activate/deactivate multiple constraints
5614 * after CG phase algorithm tries to calculate Cholesky  decomposition  and
5615   to perform several constrained Newton steps. If  Cholesky  decomposition
5616   failed (matrix is indefinite even under constraints),  we  perform  more
5617   CG iterations until we converge to such set of constraints  that  system
5618   matrix becomes  positive  definite.  Constrained  Newton  steps  greatly
5619   increase convergence speed and precision.
5620 * algorithm interleaves CG and Newton iterations which  allows  to  handle
5621   indefinite matrices (CG phase) and quickly converge after final  set  of
5622   constraints is found (Newton phase). Combination of CG and Newton phases
5623   is called "outer iteration".
5624 * it is possible to turn off Newton  phase  (beneficial  for  semidefinite
5625   problems - Cholesky decomposition will fail too often)
5626 
5627 ALGORITHM LIMITATIONS:
5628 
5629 * algorithm does not support general  linear  constraints;  only  box ones
5630   are supported
5631 * Cholesky decomposition for sparse problems  is  performed  with  Skyline
5632   Cholesky solver, which is intended for low-profile matrices. No profile-
5633   reducing reordering of variables is performed in this version of ALGLIB.
5634 * problems with near-zero negative eigenvalues (or exacty zero  ones)  may
5635   experience about 2-3x performance penalty. The reason is  that  Cholesky
5636   decomposition can not be performed until we identify directions of  zero
5637   and negative curvature and activate corresponding boundary constraints -
5638   but we need a lot of trial and errors because these directions  are hard
5639   to notice in the matrix spectrum.
5640   In this case you may turn off Newton phase of algorithm.
5641   Large negative eigenvalues  are  not  an  issue,  so  highly  non-convex
5642   problems can be solved very efficiently.
5643 
5644 INPUT PARAMETERS:
5645     State   -   structure which stores algorithm state
5646     EpsG    -   >=0
5647                 The  subroutine  finishes  its  work   if   the  condition
5648                 |v|<EpsG is satisfied, where:
5649                 * |.| means Euclidian norm
5650                 * v - scaled constrained gradient vector, v[i]=g[i]*s[i]
5651                 * g - gradient
5652                 * s - scaling coefficients set by MinQPSetScale()
5653     EpsF    -   >=0
5654                 The  subroutine  finishes its work if exploratory steepest
5655                 descent  step  on  k+1-th iteration  satisfies   following
5656                 condition:  |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
5657     EpsX    -   >=0
5658                 The  subroutine  finishes its work if exploratory steepest
5659                 descent  step  on  k+1-th iteration  satisfies   following
5660                 condition:
5661                 * |.| means Euclidian norm
5662                 * v - scaled step vector, v[i]=dx[i]/s[i]
5663                 * dx - step vector, dx=X(k+1)-X(k)
5664                 * s - scaling coefficients set by MinQPSetScale()
5665     MaxOuterIts-maximum number of OUTER iterations.  One  outer  iteration
5666                 includes some amount of CG iterations (from 5 to  ~N)  and
5667                 one or several (usually small amount) Newton steps.  Thus,
5668                 one outer iteration has high cost, but can greatly  reduce
5669                 funcation value.
5670                 Use 0 if you do not want to limit number of outer iterations.
5671     UseNewton-  use Newton phase or not:
5672                 * Newton phase improves performance of  positive  definite
5673                   dense problems (about 2 times improvement can be observed)
5674                 * can result in some performance penalty  on  semidefinite
5675                   or slightly negative definite  problems  -  each  Newton
5676                   phase will bring no improvement (Cholesky failure),  but
5677                   still will require computational time.
5678                 * if you doubt, you can turn off this  phase  -  optimizer
5679                   will retain its most of its high speed.
5680 
5681 IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS  ALGORITHM
5682 BECAUSE ITS STOPPING CRITERIA ARE SCALE-DEPENDENT!
5683 
5684 Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
5685 to automatic stopping criterion selection (presently it is  small    step
5686 length, but it may change in the future versions of ALGLIB).
5687 
5688   -- ALGLIB --
5689      Copyright 22.05.2014 by Bochkanov Sergey
5690 *************************************************************************/
5691 void minqpsetalgoquickqp(const minqpstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxouterits, const bool usenewton, const xparams _xparams = alglib::xdefault);
5692 
5693 
5694 /*************************************************************************
5695 This function sets box constraints for QP solver
5696 
5697 Box constraints are inactive by default (after  initial  creation).  After
5698 being  set,  they are  preserved until explicitly overwritten with another
5699 minqpsetbc()  or  minqpsetbcall()  call,  or  partially  overwritten  with
5700 minqpsetbci() call.
5701 
5702 Following types of constraints are supported:
5703 
5704     DESCRIPTION         CONSTRAINT              HOW TO SPECIFY
5705     fixed variable      x[i]=Bnd[i]             BndL[i]=BndU[i]
5706     lower bound         BndL[i]<=x[i]           BndU[i]=+INF
5707     upper bound         x[i]<=BndU[i]           BndL[i]=-INF
5708     range               BndL[i]<=x[i]<=BndU[i]  ...
5709     free variable       -                       BndL[I]=-INF, BndU[I]+INF
5710 
5711 INPUT PARAMETERS:
5712     State   -   structure stores algorithm state
5713     BndL    -   lower bounds, array[N].
5714                 If some (all) variables are unbounded, you may specify
5715                 very small number or -INF (latter is recommended because
5716                 it will allow solver to use better algorithm).
5717     BndU    -   upper bounds, array[N].
5718                 If some (all) variables are unbounded, you may specify
5719                 very large number or +INF (latter is recommended because
5720                 it will allow solver to use better algorithm).
5721 
5722 NOTE: infinite values can be specified by means of Double.PositiveInfinity
5723       and  Double.NegativeInfinity  (in  C#)  and  alglib::fp_posinf   and
5724       alglib::fp_neginf (in C++).
5725 
5726 NOTE: you may replace infinities by very small/very large values,  but  it
5727       is not recommended because large numbers may introduce large numerical
5728       errors in the algorithm.
5729 
5730 NOTE: if constraints for all variables are same you may use minqpsetbcall()
5731       which allows to specify constraints without using arrays.
5732 
5733 NOTE: BndL>BndU will result in QP problem being recognized as infeasible.
5734 
5735   -- ALGLIB --
5736      Copyright 11.01.2011 by Bochkanov Sergey
5737 *************************************************************************/
5738 void minqpsetbc(const minqpstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams = alglib::xdefault);
5739 
5740 
5741 /*************************************************************************
5742 This function sets box constraints for QP solver (all variables  at  once,
5743 same constraints for all variables)
5744 
5745 Box constraints are inactive by default (after  initial  creation).  After
5746 being  set,  they are  preserved until explicitly overwritten with another
5747 minqpsetbc()  or  minqpsetbcall()  call,  or  partially  overwritten  with
5748 minqpsetbci() call.
5749 
5750 Following types of constraints are supported:
5751 
5752     DESCRIPTION         CONSTRAINT              HOW TO SPECIFY
5753     fixed variable      x[i]=Bnd                BndL=BndU
5754     lower bound         BndL<=x[i]              BndU=+INF
5755     upper bound         x[i]<=BndU              BndL=-INF
5756     range               BndL<=x[i]<=BndU        ...
5757     free variable       -                       BndL=-INF, BndU+INF
5758 
5759 INPUT PARAMETERS:
5760     State   -   structure stores algorithm state
5761     BndL    -   lower bound, same for all variables
5762     BndU    -   upper bound, same for all variables
5763 
5764 NOTE: infinite values can be specified by means of Double.PositiveInfinity
5765       and  Double.NegativeInfinity  (in  C#)  and  alglib::fp_posinf   and
5766       alglib::fp_neginf (in C++).
5767 
5768 NOTE: you may replace infinities by very small/very large values,  but  it
5769       is not recommended because large numbers may introduce large numerical
5770       errors in the algorithm.
5771 
5772 NOTE: BndL>BndU will result in QP problem being recognized as infeasible.
5773 
5774   -- ALGLIB --
5775      Copyright 11.01.2011 by Bochkanov Sergey
5776 *************************************************************************/
5777 void minqpsetbcall(const minqpstate &state, const double bndl, const double bndu, const xparams _xparams = alglib::xdefault);
5778 
5779 
5780 /*************************************************************************
5781 This function sets box constraints for I-th variable (other variables are
5782 not modified).
5783 
5784 Following types of constraints are supported:
5785 
5786     DESCRIPTION         CONSTRAINT              HOW TO SPECIFY
5787     fixed variable      x[i]=Bnd                BndL=BndU
5788     lower bound         BndL<=x[i]              BndU=+INF
5789     upper bound         x[i]<=BndU              BndL=-INF
5790     range               BndL<=x[i]<=BndU        ...
5791     free variable       -                       BndL=-INF, BndU+INF
5792 
5793 INPUT PARAMETERS:
5794     State   -   structure stores algorithm state
5795     BndL    -   lower bound
5796     BndU    -   upper bound
5797 
5798 NOTE: infinite values can be specified by means of Double.PositiveInfinity
5799       and  Double.NegativeInfinity  (in  C#)  and  alglib::fp_posinf   and
5800       alglib::fp_neginf (in C++).
5801 
5802 NOTE: you may replace infinities by very small/very large values,  but  it
5803       is not recommended because large numbers may introduce large numerical
5804       errors in the algorithm.
5805 
5806 NOTE: BndL>BndU will result in QP problem being recognized as infeasible.
5807 
5808   -- ALGLIB --
5809      Copyright 11.01.2011 by Bochkanov Sergey
5810 *************************************************************************/
5811 void minqpsetbci(const minqpstate &state, const ae_int_t i, const double bndl, const double bndu, const xparams _xparams = alglib::xdefault);
5812 
5813 
5814 /*************************************************************************
5815 This function sets dense linear constraints for QP optimizer.
5816 
5817 This  function  overrides  results  of  previous  calls  to  minqpsetlc(),
5818 minqpsetlcsparse() and minqpsetlcmixed().  After  call  to  this  function
5819 all non-box constraints are dropped, and you have only  those  constraints
5820 which were specified in the present call.
5821 
5822 If you want  to  specify  mixed  (with  dense  and  sparse  terms)  linear
5823 constraints, you should call minqpsetlcmixed().
5824 
5825 INPUT PARAMETERS:
5826     State   -   structure previously allocated with MinQPCreate call.
5827     C       -   linear constraints, array[K,N+1].
5828                 Each row of C represents one constraint, either equality
5829                 or inequality (see below):
5830                 * first N elements correspond to coefficients,
5831                 * last element corresponds to the right part.
5832                 All elements of C (including right part) must be finite.
5833     CT      -   type of constraints, array[K]:
5834                 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
5835                 * if CT[i]=0, then I-th constraint is C[i,*]*x  = C[i,n+1]
5836                 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
5837     K       -   number of equality/inequality constraints, K>=0:
5838                 * if given, only leading K elements of C/CT are used
5839                 * if not given, automatically determined from sizes of C/CT
5840 
5841 NOTE 1: linear (non-bound) constraints are satisfied only approximately  -
5842         there always exists some violation due  to  numerical  errors  and
5843         algorithmic limitations (BLEIC-QP solver is most  precise,  AUL-QP
5844         solver is less precise).
5845 
5846   -- ALGLIB --
5847      Copyright 19.06.2012 by Bochkanov Sergey
5848 *************************************************************************/
5849 void minqpsetlc(const minqpstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams = alglib::xdefault);
5850 void minqpsetlc(const minqpstate &state, const real_2d_array &c, const integer_1d_array &ct, const xparams _xparams = alglib::xdefault);
5851 
5852 
5853 /*************************************************************************
5854 This function sets sparse linear constraints for QP optimizer.
5855 
5856 This  function  overrides  results  of  previous  calls  to  minqpsetlc(),
5857 minqpsetlcsparse() and minqpsetlcmixed().  After  call  to  this  function
5858 all non-box constraints are dropped, and you have only  those  constraints
5859 which were specified in the present call.
5860 
5861 If you want  to  specify  mixed  (with  dense  and  sparse  terms)  linear
5862 constraints, you should call minqpsetlcmixed().
5863 
5864 INPUT PARAMETERS:
5865     State   -   structure previously allocated with MinQPCreate call.
5866     C       -   linear  constraints,  sparse  matrix  with  dimensions  at
5867                 least [K,N+1]. If matrix has  larger  size,  only  leading
5868                 Kx(N+1) rectangle is used.
5869                 Each row of C represents one constraint, either equality
5870                 or inequality (see below):
5871                 * first N elements correspond to coefficients,
5872                 * last element corresponds to the right part.
5873                 All elements of C (including right part) must be finite.
5874     CT      -   type of constraints, array[K]:
5875                 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
5876                 * if CT[i]=0, then I-th constraint is C[i,*]*x  = C[i,n+1]
5877                 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
5878     K       -   number of equality/inequality constraints, K>=0
5879 
5880 NOTE 1: linear (non-bound) constraints are satisfied only approximately  -
5881         there always exists some violation due  to  numerical  errors  and
5882         algorithmic limitations (BLEIC-QP solver is most  precise,  AUL-QP
5883         solver is less precise).
5884 
5885   -- ALGLIB --
5886      Copyright 22.08.2016 by Bochkanov Sergey
5887 *************************************************************************/
5888 void minqpsetlcsparse(const minqpstate &state, const sparsematrix &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams = alglib::xdefault);
5889 
5890 
5891 /*************************************************************************
5892 This function sets mixed linear constraints, which include a set of  dense
5893 rows, and a set of sparse rows.
5894 
5895 This  function  overrides  results  of  previous  calls  to  minqpsetlc(),
5896 minqpsetlcsparse() and minqpsetlcmixed().
5897 
5898 This function may be useful if constraint matrix includes large number  of
5899 both types of rows - dense and sparse. If you have just a few sparse rows,
5900 you  may  represent  them  in  dense  format  without losing  performance.
5901 Similarly, if you have just a few dense rows, you may store them in sparse
5902 format with almost same performance.
5903 
5904 INPUT PARAMETERS:
5905     State   -   structure previously allocated with MinQPCreate call.
5906     SparseC -   linear constraints, sparse  matrix with dimensions EXACTLY
5907                 EQUAL TO [SparseK,N+1].  Each  row  of  C  represents  one
5908                 constraint, either equality or inequality (see below):
5909                 * first N elements correspond to coefficients,
5910                 * last element corresponds to the right part.
5911                 All elements of C (including right part) must be finite.
5912     SparseCT-   type of sparse constraints, array[K]:
5913                 * if SparseCT[i]>0, then I-th constraint is SparseC[i,*]*x >= SparseC[i,n+1]
5914                 * if SparseCT[i]=0, then I-th constraint is SparseC[i,*]*x  = SparseC[i,n+1]
5915                 * if SparseCT[i]<0, then I-th constraint is SparseC[i,*]*x <= SparseC[i,n+1]
5916     SparseK -   number of sparse equality/inequality constraints, K>=0
5917     DenseC  -   dense linear constraints, array[K,N+1].
5918                 Each row of DenseC represents one constraint, either equality
5919                 or inequality (see below):
5920                 * first N elements correspond to coefficients,
5921                 * last element corresponds to the right part.
5922                 All elements of DenseC (including right part) must be finite.
5923     DenseCT -   type of constraints, array[K]:
5924                 * if DenseCT[i]>0, then I-th constraint is DenseC[i,*]*x >= DenseC[i,n+1]
5925                 * if DenseCT[i]=0, then I-th constraint is DenseC[i,*]*x  = DenseC[i,n+1]
5926                 * if DenseCT[i]<0, then I-th constraint is DenseC[i,*]*x <= DenseC[i,n+1]
5927     DenseK  -   number of equality/inequality constraints, DenseK>=0
5928 
5929 NOTE 1: linear (non-box) constraints  are  satisfied only approximately  -
5930         there always exists some violation due  to  numerical  errors  and
5931         algorithmic limitations (BLEIC-QP solver is most  precise,  AUL-QP
5932         solver is less precise).
5933 
5934 NOTE 2: due to backward compatibility reasons SparseC can be  larger  than
5935         [SparseK,N+1]. In this case only leading  [SparseK,N+1]  submatrix
5936         will be  used.  However,  the  rest  of  ALGLIB  has  more  strict
5937         requirements on the input size, so we recommend you to pass sparse
5938         term whose size exactly matches algorithm expectations.
5939 
5940   -- ALGLIB --
5941      Copyright 22.08.2016 by Bochkanov Sergey
5942 *************************************************************************/
5943 void minqpsetlcmixed(const minqpstate &state, const sparsematrix &sparsec, const integer_1d_array &sparsect, const ae_int_t sparsek, const real_2d_array &densec, const integer_1d_array &densect, const ae_int_t densek, const xparams _xparams = alglib::xdefault);
5944 
5945 
5946 /*************************************************************************
5947 This function provides legacy API for specification of mixed  dense/sparse
5948 linear constraints.
5949 
5950 New conventions used by ALGLIB since release  3.16.0  state  that  set  of
5951 sparse constraints comes first,  followed  by  set  of  dense  ones.  This
5952 convention is essential when you talk about things like order of  Lagrange
5953 multipliers.
5954 
5955 However, legacy API accepted mixed  constraints  in  reverse  order.  This
5956 function is here to simplify situation with code relying on legacy API. It
5957 simply accepts constraints in one order (old) and passes them to new  API,
5958 now in correct order.
5959 
5960   -- ALGLIB --
5961      Copyright 01.11.2019 by Bochkanov Sergey
5962 *************************************************************************/
5963 void minqpsetlcmixedlegacy(const minqpstate &state, const real_2d_array &densec, const integer_1d_array &densect, const ae_int_t densek, const sparsematrix &sparsec, const integer_1d_array &sparsect, const ae_int_t sparsek, const xparams _xparams = alglib::xdefault);
5964 
5965 
5966 /*************************************************************************
5967 This function sets two-sided linear constraints AL <= A*x <= AU with dense
5968 constraint matrix A.
5969 
5970 NOTE: knowing  that  constraint  matrix  is  dense  helps  some QP solvers
5971       (especially modern IPM method) to utilize efficient  dense  Level  3
5972       BLAS for dense parts of the problem. If your problem has both  dense
5973       and sparse constraints, you  can  use  minqpsetlc2mixed()  function,
5974       which will result in dense algebra being applied to dense terms, and
5975       sparse sparse linear algebra applied to sparse terms.
5976 
5977 INPUT PARAMETERS:
5978     State   -   structure previously allocated with minqpcreate() call.
5979     A       -   linear constraints, array[K,N]. Each row of  A  represents
5980                 one  constraint. One-sided  inequality   constraints, two-
5981                 sided inequality  constraints,  equality  constraints  are
5982                 supported (see below)
5983     AL, AU  -   lower and upper bounds, array[K];
5984                 * AL[i]=AU[i] => equality constraint Ai*x
5985                 * AL[i]<AU[i] => two-sided constraint AL[i]<=Ai*x<=AU[i]
5986                 * AL[i]=-INF  => one-sided constraint Ai*x<=AU[i]
5987                 * AU[i]=+INF  => one-sided constraint AL[i]<=Ai*x
5988                 * AL[i]=-INF, AU[i]=+INF => constraint is ignored
5989     K       -   number of equality/inequality constraints,  K>=0;  if  not
5990                 given, inferred from sizes of A, AL, AU.
5991 
5992   -- ALGLIB --
5993      Copyright 01.11.2019 by Bochkanov Sergey
5994 *************************************************************************/
5995 void minqpsetlc2dense(const minqpstate &state, const real_2d_array &a, const real_1d_array &al, const real_1d_array &au, const ae_int_t k, const xparams _xparams = alglib::xdefault);
5996 void minqpsetlc2dense(const minqpstate &state, const real_2d_array &a, const real_1d_array &al, const real_1d_array &au, const xparams _xparams = alglib::xdefault);
5997 
5998 
5999 /*************************************************************************
6000 This  function  sets  two-sided linear  constraints  AL <= A*x <= AU  with
6001 sparse constraining matrix A. Recommended for large-scale problems.
6002 
6003 This  function  overwrites  linear  (non-box)  constraints set by previous
6004 calls (if such calls were made).
6005 
6006 INPUT PARAMETERS:
6007     State   -   structure previously allocated with minqpcreate() call.
6008     A       -   sparse matrix with size [K,N] (exactly!).
6009                 Each row of A represents one general linear constraint.
6010                 A can be stored in any sparse storage format.
6011     AL, AU  -   lower and upper bounds, array[K];
6012                 * AL[i]=AU[i] => equality constraint Ai*x
6013                 * AL[i]<AU[i] => two-sided constraint AL[i]<=Ai*x<=AU[i]
6014                 * AL[i]=-INF  => one-sided constraint Ai*x<=AU[i]
6015                 * AU[i]=+INF  => one-sided constraint AL[i]<=Ai*x
6016                 * AL[i]=-INF, AU[i]=+INF => constraint is ignored
6017     K       -   number  of equality/inequality constraints, K>=0.  If  K=0
6018                 is specified, A, AL, AU are ignored.
6019 
6020   -- ALGLIB --
6021      Copyright 01.11.2019 by Bochkanov Sergey
6022 *************************************************************************/
6023 void minqpsetlc2(const minqpstate &state, const sparsematrix &a, const real_1d_array &al, const real_1d_array &au, const ae_int_t k, const xparams _xparams = alglib::xdefault);
6024 
6025 
6026 /*************************************************************************
6027 This  function  sets  two-sided linear  constraints  AL <= A*x <= AU  with
6028 mixed constraining matrix A including sparse part (first SparseK rows) and
6029 dense part (last DenseK rows). Recommended for large-scale problems.
6030 
6031 This  function  overwrites  linear  (non-box)  constraints set by previous
6032 calls (if such calls were made).
6033 
6034 This function may be useful if constraint matrix includes large number  of
6035 both types of rows - dense and sparse. If you have just a few sparse rows,
6036 you  may  represent  them  in  dense  format  without losing  performance.
6037 Similarly, if you have just a few dense rows, you may store them in sparse
6038 format with almost same performance.
6039 
6040 INPUT PARAMETERS:
6041     State   -   structure previously allocated with minqpcreate() call.
6042     SparseA -   sparse matrix with size [K,N] (exactly!).
6043                 Each row of A represents one general linear constraint.
6044                 A can be stored in any sparse storage format.
6045     SparseK -   number of sparse constraints, SparseK>=0
6046     DenseA  -   linear constraints, array[K,N], set of dense constraints.
6047                 Each row of A represents one general linear constraint.
6048     DenseK  -   number of dense constraints, DenseK>=0
6049     AL, AU  -   lower and upper bounds, array[SparseK+DenseK], with former
6050                 SparseK elements corresponding to sparse constraints,  and
6051                 latter DenseK elements corresponding to dense constraints;
6052                 * AL[i]=AU[i] => equality constraint Ai*x
6053                 * AL[i]<AU[i] => two-sided constraint AL[i]<=Ai*x<=AU[i]
6054                 * AL[i]=-INF  => one-sided constraint Ai*x<=AU[i]
6055                 * AU[i]=+INF  => one-sided constraint AL[i]<=Ai*x
6056                 * AL[i]=-INF, AU[i]=+INF => constraint is ignored
6057     K       -   number  of equality/inequality constraints, K>=0.  If  K=0
6058                 is specified, A, AL, AU are ignored.
6059 
6060   -- ALGLIB --
6061      Copyright 01.11.2019 by Bochkanov Sergey
6062 *************************************************************************/
6063 void minqpsetlc2mixed(const minqpstate &state, const sparsematrix &sparsea, const ae_int_t ksparse, const real_2d_array &densea, const ae_int_t kdense, const real_1d_array &al, const real_1d_array &au, const xparams _xparams = alglib::xdefault);
6064 
6065 
6066 /*************************************************************************
6067 This function appends two-sided linear constraint  AL <= A*x <= AU  to the
6068 matrix of currently present dense constraints.
6069 
6070 INPUT PARAMETERS:
6071     State   -   structure previously allocated with minqpcreate() call.
6072     A       -   linear constraint coefficient, array[N], right side is NOT
6073                 included.
6074     AL, AU  -   lower and upper bounds;
6075                 * AL=AU    => equality constraint Ai*x
6076                 * AL<AU    => two-sided constraint AL<=A*x<=AU
6077                 * AL=-INF  => one-sided constraint Ai*x<=AU
6078                 * AU=+INF  => one-sided constraint AL<=Ai*x
6079                 * AL=-INF, AU=+INF => constraint is ignored
6080 
6081   -- ALGLIB --
6082      Copyright 19.07.2018 by Bochkanov Sergey
6083 *************************************************************************/
6084 void minqpaddlc2dense(const minqpstate &state, const real_1d_array &a, const double al, const double au, const xparams _xparams = alglib::xdefault);
6085 
6086 
6087 /*************************************************************************
6088 This function appends two-sided linear constraint  AL <= A*x <= AU  to the
6089 list of currently present sparse constraints.
6090 
6091 Constraint is passed in compressed format: as list of non-zero entries  of
6092 coefficient vector A. Such approach is more efficient than  dense  storage
6093 for highly sparse constraint vectors.
6094 
6095 INPUT PARAMETERS:
6096     State   -   structure previously allocated with minqpcreate() call.
6097     IdxA    -   array[NNZ], indexes of non-zero elements of A:
6098                 * can be unsorted
6099                 * can include duplicate indexes (corresponding entries  of
6100                   ValA[] will be summed)
6101     ValA    -   array[NNZ], values of non-zero elements of A
6102     NNZ     -   number of non-zero coefficients in A
6103     AL, AU  -   lower and upper bounds;
6104                 * AL=AU    => equality constraint A*x
6105                 * AL<AU    => two-sided constraint AL<=A*x<=AU
6106                 * AL=-INF  => one-sided constraint A*x<=AU
6107                 * AU=+INF  => one-sided constraint AL<=A*x
6108                 * AL=-INF, AU=+INF => constraint is ignored
6109 
6110   -- ALGLIB --
6111      Copyright 19.07.2018 by Bochkanov Sergey
6112 *************************************************************************/
6113 void minqpaddlc2(const minqpstate &state, const integer_1d_array &idxa, const real_1d_array &vala, const ae_int_t nnz, const double al, const double au, const xparams _xparams = alglib::xdefault);
6114 
6115 
6116 /*************************************************************************
6117 This function appends two-sided linear constraint  AL <= A*x <= AU  to the
6118 list of currently present sparse constraints.
6119 
6120 Constraint vector A is  passed  as  a  dense  array  which  is  internally
6121 sparsified by this function.
6122 
6123 INPUT PARAMETERS:
6124     State   -   structure previously allocated with minqpcreate() call.
6125     DA      -   array[N], constraint vector
6126     AL, AU  -   lower and upper bounds;
6127                 * AL=AU    => equality constraint A*x
6128                 * AL<AU    => two-sided constraint AL<=A*x<=AU
6129                 * AL=-INF  => one-sided constraint A*x<=AU
6130                 * AU=+INF  => one-sided constraint AL<=A*x
6131                 * AL=-INF, AU=+INF => constraint is ignored
6132 
6133   -- ALGLIB --
6134      Copyright 19.07.2018 by Bochkanov Sergey
6135 *************************************************************************/
6136 void minqpaddlc2sparsefromdense(const minqpstate &state, const real_1d_array &da, const double al, const double au, const xparams _xparams = alglib::xdefault);
6137 
6138 
6139 /*************************************************************************
6140 This function solves quadratic programming problem.
6141 
6142 Prior to calling this function you should choose solver by means of one of
6143 the following functions:
6144 
6145 * minqpsetalgoquickqp()     - for QuickQP solver
6146 * minqpsetalgobleic()       - for BLEIC-QP solver
6147 * minqpsetalgodenseaul()    - for Dense-AUL-QP solver
6148 * minqpsetalgodenseipm()    - for Dense-IPM-QP solver
6149 
6150 These functions also allow you to control stopping criteria of the solver.
6151 If you did not set solver,  MinQP  subpackage  will  automatically  select
6152 solver for your problem and will run it with default stopping criteria.
6153 
6154 However, it is better to set explicitly solver and its stopping criteria.
6155 
6156 INPUT PARAMETERS:
6157     State   -   algorithm state
6158 
6159 You should use MinQPResults() function to access results after calls
6160 to this function.
6161 
6162   -- ALGLIB --
6163      Copyright 11.01.2011 by Bochkanov Sergey.
6164      Special thanks to Elvira Illarionova  for  important  suggestions  on
6165      the linearly constrained QP algorithm.
6166 *************************************************************************/
6167 void minqpoptimize(const minqpstate &state, const xparams _xparams = alglib::xdefault);
6168 
6169 
6170 /*************************************************************************
6171 QP solver results
6172 
6173 INPUT PARAMETERS:
6174     State   -   algorithm state
6175 
6176 OUTPUT PARAMETERS:
6177     X       -   array[0..N-1], solution (on failure - the best point found
6178                 so far).
6179     Rep     -   optimization report, contains:
6180                 * completion code in Rep.TerminationType (positive  values
6181                   denote some kind of success, negative - failures)
6182                 * Lagrange multipliers - for QP solvers which support them
6183                 * other statistics
6184                 See comments on minqpreport structure for more information
6185 
6186 Following completion codes are returned in Rep.TerminationType:
6187 * -9    failure of the automatic scale evaluation:  one  of  the  diagonal
6188         elements of the quadratic term is non-positive.  Specify  variable
6189         scales manually!
6190 * -5    inappropriate solver was used:
6191         * QuickQP solver for problem with general linear constraints
6192 * -4    the function is unbounded from below even under constraints,
6193         no meaningful minimum can be found.
6194 * -3    inconsistent constraints (or, maybe, feasible point is too hard to
6195         find).
6196 * -2    IPM solver has difficulty finding primal/dual feasible point.
6197         It is likely that the problem is either infeasible or unbounded,
6198         but it is difficult to determine exact reason for termination.
6199         X contains best point found so far.
6200 *  >0   success
6201 *  7    stopping conditions are too stringent,
6202         further improvement is impossible,
6203         X contains best point found so far.
6204 
6205   -- ALGLIB --
6206      Copyright 11.01.2011 by Bochkanov Sergey
6207 *************************************************************************/
6208 void minqpresults(const minqpstate &state, real_1d_array &x, minqpreport &rep, const xparams _xparams = alglib::xdefault);
6209 
6210 
6211 /*************************************************************************
6212 QP results
6213 
6214 Buffered implementation of MinQPResults() which uses pre-allocated  buffer
6215 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
6216 intended to be used in the inner cycles of performance critical algorithms
6217 where array reallocation penalty is too large to be ignored.
6218 
6219   -- ALGLIB --
6220      Copyright 11.01.2011 by Bochkanov Sergey
6221 *************************************************************************/
6222 void minqpresultsbuf(const minqpstate &state, real_1d_array &x, minqpreport &rep, const xparams _xparams = alglib::xdefault);
6223 #endif
6224 
6225 #if defined(AE_COMPILE_MINLM) || !defined(AE_PARTIAL_BUILD)
6226 /*************************************************************************
6227                 IMPROVED LEVENBERG-MARQUARDT METHOD FOR
6228                  NON-LINEAR LEAST SQUARES OPTIMIZATION
6229 
6230 DESCRIPTION:
6231 This function is used to find minimum of function which is represented  as
6232 sum of squares:
6233     F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1])
6234 using value of function vector f[] and Jacobian of f[].
6235 
6236 
6237 REQUIREMENTS:
6238 This algorithm will request following information during its operation:
6239 
6240 * function vector f[] at given point X
6241 * function vector f[] and Jacobian of f[] (simultaneously) at given point
6242 
6243 There are several overloaded versions of  MinLMOptimize()  function  which
6244 correspond  to  different LM-like optimization algorithms provided by this
6245 unit. You should choose version which accepts fvec()  and jac() callbacks.
6246 First  one  is used to calculate f[] at given point, second one calculates
6247 f[] and Jacobian df[i]/dx[j].
6248 
6249 You can try to initialize MinLMState structure with VJ  function and  then
6250 use incorrect version  of  MinLMOptimize()  (for  example,  version  which
6251 works  with  general  form function and does not provide Jacobian), but it
6252 will  lead  to  exception  being  thrown  after first attempt to calculate
6253 Jacobian.
6254 
6255 
6256 USAGE:
6257 1. User initializes algorithm state with MinLMCreateVJ() call
6258 2. User tunes solver parameters with MinLMSetCond(),  MinLMSetStpMax() and
6259    other functions
6260 3. User calls MinLMOptimize() function which  takes algorithm  state   and
6261    callback functions.
6262 4. User calls MinLMResults() to get solution
6263 5. Optionally, user may call MinLMRestartFrom() to solve  another  problem
6264    with same N/M but another starting point and/or another function.
6265    MinLMRestartFrom() allows to reuse already initialized structure.
6266 
6267 
6268 INPUT PARAMETERS:
6269     N       -   dimension, N>1
6270                 * if given, only leading N elements of X are used
6271                 * if not given, automatically determined from size of X
6272     M       -   number of functions f[i]
6273     X       -   initial solution, array[0..N-1]
6274 
6275 OUTPUT PARAMETERS:
6276     State   -   structure which stores algorithm state
6277 
6278 NOTES:
6279 1. you may tune stopping conditions with MinLMSetCond() function
6280 2. if target function contains exp() or other fast growing functions,  and
6281    optimization algorithm makes too large steps which leads  to  overflow,
6282    use MinLMSetStpMax() function to bound algorithm's steps.
6283 
6284   -- ALGLIB --
6285      Copyright 30.03.2009 by Bochkanov Sergey
6286 *************************************************************************/
6287 void minlmcreatevj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
6288 void minlmcreatevj(const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
6289 
6290 
6291 /*************************************************************************
6292                 IMPROVED LEVENBERG-MARQUARDT METHOD FOR
6293                  NON-LINEAR LEAST SQUARES OPTIMIZATION
6294 
6295 DESCRIPTION:
6296 This function is used to find minimum of function which is represented  as
6297 sum of squares:
6298     F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1])
6299 using value of function vector f[] only. Finite differences  are  used  to
6300 calculate Jacobian.
6301 
6302 
6303 REQUIREMENTS:
6304 This algorithm will request following information during its operation:
6305 * function vector f[] at given point X
6306 
6307 There are several overloaded versions of  MinLMOptimize()  function  which
6308 correspond  to  different LM-like optimization algorithms provided by this
6309 unit. You should choose version which accepts fvec() callback.
6310 
6311 You can try to initialize MinLMState structure with VJ  function and  then
6312 use incorrect version  of  MinLMOptimize()  (for  example,  version  which
6313 works with general form function and does not accept function vector), but
6314 it will  lead  to  exception being thrown after first attempt to calculate
6315 Jacobian.
6316 
6317 
6318 USAGE:
6319 1. User initializes algorithm state with MinLMCreateV() call
6320 2. User tunes solver parameters with MinLMSetCond(),  MinLMSetStpMax() and
6321    other functions
6322 3. User calls MinLMOptimize() function which  takes algorithm  state   and
6323    callback functions.
6324 4. User calls MinLMResults() to get solution
6325 5. Optionally, user may call MinLMRestartFrom() to solve  another  problem
6326    with same N/M but another starting point and/or another function.
6327    MinLMRestartFrom() allows to reuse already initialized structure.
6328 
6329 
6330 INPUT PARAMETERS:
6331     N       -   dimension, N>1
6332                 * if given, only leading N elements of X are used
6333                 * if not given, automatically determined from size of X
6334     M       -   number of functions f[i]
6335     X       -   initial solution, array[0..N-1]
6336     DiffStep-   differentiation step, >0
6337 
6338 OUTPUT PARAMETERS:
6339     State   -   structure which stores algorithm state
6340 
6341 See also MinLMIteration, MinLMResults.
6342 
6343 NOTES:
6344 1. you may tune stopping conditions with MinLMSetCond() function
6345 2. if target function contains exp() or other fast growing functions,  and
6346    optimization algorithm makes too large steps which leads  to  overflow,
6347    use MinLMSetStpMax() function to bound algorithm's steps.
6348 
6349   -- ALGLIB --
6350      Copyright 30.03.2009 by Bochkanov Sergey
6351 *************************************************************************/
6352 void minlmcreatev(const ae_int_t n, const ae_int_t m, const real_1d_array &x, const double diffstep, minlmstate &state, const xparams _xparams = alglib::xdefault);
6353 void minlmcreatev(const ae_int_t m, const real_1d_array &x, const double diffstep, minlmstate &state, const xparams _xparams = alglib::xdefault);
6354 
6355 
6356 /*************************************************************************
6357     LEVENBERG-MARQUARDT-LIKE METHOD FOR NON-LINEAR OPTIMIZATION
6358 
6359 DESCRIPTION:
6360 This  function  is  used  to  find  minimum  of general form (not "sum-of-
6361 -squares") function
6362     F = F(x[0], ..., x[n-1])
6363 using  its  gradient  and  Hessian.  Levenberg-Marquardt modification with
6364 L-BFGS pre-optimization and internal pre-conditioned  L-BFGS  optimization
6365 after each Levenberg-Marquardt step is used.
6366 
6367 
6368 REQUIREMENTS:
6369 This algorithm will request following information during its operation:
6370 
6371 * function value F at given point X
6372 * F and gradient G (simultaneously) at given point X
6373 * F, G and Hessian H (simultaneously) at given point X
6374 
6375 There are several overloaded versions of  MinLMOptimize()  function  which
6376 correspond  to  different LM-like optimization algorithms provided by this
6377 unit. You should choose version which accepts func(),  grad()  and  hess()
6378 function pointers. First pointer is used to calculate F  at  given  point,
6379 second  one  calculates  F(x)  and  grad F(x),  third one calculates F(x),
6380 grad F(x), hess F(x).
6381 
6382 You can try to initialize MinLMState structure with FGH-function and  then
6383 use incorrect version of MinLMOptimize() (for example, version which  does
6384 not provide Hessian matrix), but it will lead to  exception  being  thrown
6385 after first attempt to calculate Hessian.
6386 
6387 
6388 USAGE:
6389 1. User initializes algorithm state with MinLMCreateFGH() call
6390 2. User tunes solver parameters with MinLMSetCond(),  MinLMSetStpMax() and
6391    other functions
6392 3. User calls MinLMOptimize() function which  takes algorithm  state   and
6393    pointers (delegates, etc.) to callback functions.
6394 4. User calls MinLMResults() to get solution
6395 5. Optionally, user may call MinLMRestartFrom() to solve  another  problem
6396    with same N but another starting point and/or another function.
6397    MinLMRestartFrom() allows to reuse already initialized structure.
6398 
6399 
6400 INPUT PARAMETERS:
6401     N       -   dimension, N>1
6402                 * if given, only leading N elements of X are used
6403                 * if not given, automatically determined from size of X
6404     X       -   initial solution, array[0..N-1]
6405 
6406 OUTPUT PARAMETERS:
6407     State   -   structure which stores algorithm state
6408 
6409 NOTES:
6410 1. you may tune stopping conditions with MinLMSetCond() function
6411 2. if target function contains exp() or other fast growing functions,  and
6412    optimization algorithm makes too large steps which leads  to  overflow,
6413    use MinLMSetStpMax() function to bound algorithm's steps.
6414 
6415   -- ALGLIB --
6416      Copyright 30.03.2009 by Bochkanov Sergey
6417 *************************************************************************/
6418 void minlmcreatefgh(const ae_int_t n, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
6419 void minlmcreatefgh(const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
6420 
6421 
6422 /*************************************************************************
6423 This function sets stopping conditions for Levenberg-Marquardt optimization
6424 algorithm.
6425 
6426 INPUT PARAMETERS:
6427     State   -   structure which stores algorithm state
6428     EpsX    -   >=0
6429                 The subroutine finishes its work if  on  k+1-th  iteration
6430                 the condition |v|<=EpsX is fulfilled, where:
6431                 * |.| means Euclidian norm
6432                 * v - scaled step vector, v[i]=dx[i]/s[i]
6433                 * dx - ste pvector, dx=X(k+1)-X(k)
6434                 * s - scaling coefficients set by MinLMSetScale()
6435                 Recommended values: 1E-9 ... 1E-12.
6436     MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
6437                 iterations   is    unlimited.   Only   Levenberg-Marquardt
6438                 iterations  are  counted  (L-BFGS/CG  iterations  are  NOT
6439                 counted because their cost is very low compared to that of
6440                 LM).
6441 
6442 Passing  EpsX=0  and  MaxIts=0  (simultaneously)  will  lead  to automatic
6443 stopping criterion selection (small EpsX).
6444 
6445 NOTE: it is not recommended to set large EpsX (say, 0.001). Because LM  is
6446       a second-order method, it performs very precise steps anyway.
6447 
6448   -- ALGLIB --
6449      Copyright 02.04.2010 by Bochkanov Sergey
6450 *************************************************************************/
6451 void minlmsetcond(const minlmstate &state, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
6452 
6453 
6454 /*************************************************************************
6455 This function turns on/off reporting.
6456 
6457 INPUT PARAMETERS:
6458     State   -   structure which stores algorithm state
6459     NeedXRep-   whether iteration reports are needed or not
6460 
6461 If NeedXRep is True, algorithm will call rep() callback function if  it is
6462 provided to MinLMOptimize(). Both Levenberg-Marquardt and internal  L-BFGS
6463 iterations are reported.
6464 
6465   -- ALGLIB --
6466      Copyright 02.04.2010 by Bochkanov Sergey
6467 *************************************************************************/
6468 void minlmsetxrep(const minlmstate &state, const bool needxrep, const xparams _xparams = alglib::xdefault);
6469 
6470 
6471 /*************************************************************************
6472 This function sets maximum step length
6473 
6474 INPUT PARAMETERS:
6475     State   -   structure which stores algorithm state
6476     StpMax  -   maximum step length, >=0. Set StpMax to 0.0,  if you don't
6477                 want to limit step length.
6478 
6479 Use this subroutine when you optimize target function which contains exp()
6480 or  other  fast  growing  functions,  and optimization algorithm makes too
6481 large  steps  which  leads  to overflow. This function allows us to reject
6482 steps  that  are  too  large  (and  therefore  expose  us  to the possible
6483 overflow) without actually calculating function value at the x+stp*d.
6484 
6485 NOTE: non-zero StpMax leads to moderate  performance  degradation  because
6486 intermediate  step  of  preconditioned L-BFGS optimization is incompatible
6487 with limits on step size.
6488 
6489   -- ALGLIB --
6490      Copyright 02.04.2010 by Bochkanov Sergey
6491 *************************************************************************/
6492 void minlmsetstpmax(const minlmstate &state, const double stpmax, const xparams _xparams = alglib::xdefault);
6493 
6494 
6495 /*************************************************************************
6496 This function sets scaling coefficients for LM optimizer.
6497 
6498 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
6499 size and gradient are scaled before comparison with tolerances).  Scale of
6500 the I-th variable is a translation invariant measure of:
6501 a) "how large" the variable is
6502 b) how large the step should be to make significant changes in the function
6503 
6504 Generally, scale is NOT considered to be a form of preconditioner.  But LM
6505 optimizer is unique in that it uses scaling matrix both  in  the  stopping
6506 condition tests and as Marquardt damping factor.
6507 
6508 Proper scaling is very important for the algorithm performance. It is less
6509 important for the quality of results, but still has some influence (it  is
6510 easier  to  converge  when  variables  are  properly  scaled, so premature
6511 stopping is possible when very badly scalled variables are  combined  with
6512 relaxed stopping conditions).
6513 
6514 INPUT PARAMETERS:
6515     State   -   structure stores algorithm state
6516     S       -   array[N], non-zero scaling coefficients
6517                 S[i] may be negative, sign doesn't matter.
6518 
6519   -- ALGLIB --
6520      Copyright 14.01.2011 by Bochkanov Sergey
6521 *************************************************************************/
6522 void minlmsetscale(const minlmstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
6523 
6524 
6525 /*************************************************************************
6526 This function sets boundary constraints for LM optimizer
6527 
6528 Boundary constraints are inactive by default (after initial creation).
6529 They are preserved until explicitly turned off with another SetBC() call.
6530 
6531 INPUT PARAMETERS:
6532     State   -   structure stores algorithm state
6533     BndL    -   lower bounds, array[N].
6534                 If some (all) variables are unbounded, you may specify
6535                 very small number or -INF (latter is recommended because
6536                 it will allow solver to use better algorithm).
6537     BndU    -   upper bounds, array[N].
6538                 If some (all) variables are unbounded, you may specify
6539                 very large number or +INF (latter is recommended because
6540                 it will allow solver to use better algorithm).
6541 
6542 NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
6543 variable will be "frozen" at X[i]=BndL[i]=BndU[i].
6544 
6545 NOTE 2: this solver has following useful properties:
6546 * bound constraints are always satisfied exactly
6547 * function is evaluated only INSIDE area specified by bound constraints
6548   or at its boundary
6549 
6550   -- ALGLIB --
6551      Copyright 14.01.2011 by Bochkanov Sergey
6552 *************************************************************************/
6553 void minlmsetbc(const minlmstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams = alglib::xdefault);
6554 
6555 
6556 /*************************************************************************
6557 This function sets general linear constraints for LM optimizer
6558 
6559 Linear constraints are inactive by default (after initial creation).  They
6560 are preserved until explicitly turned off with another minlmsetlc() call.
6561 
6562 INPUT PARAMETERS:
6563     State   -   structure stores algorithm state
6564     C       -   linear constraints, array[K,N+1].
6565                 Each row of C represents one constraint, either equality
6566                 or inequality (see below):
6567                 * first N elements correspond to coefficients,
6568                 * last element corresponds to the right part.
6569                 All elements of C (including right part) must be finite.
6570     CT      -   type of constraints, array[K]:
6571                 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
6572                 * if CT[i]=0, then I-th constraint is C[i,*]*x  = C[i,n+1]
6573                 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
6574     K       -   number of equality/inequality constraints, K>=0:
6575                 * if given, only leading K elements of C/CT are used
6576                 * if not given, automatically determined from sizes of C/CT
6577 
6578 IMPORTANT: if you have linear constraints, it is strongly  recommended  to
6579            set scale of variables with minlmsetscale(). QP solver which is
6580            used to calculate linearly constrained steps heavily relies  on
6581            good scaling of input problems.
6582 
6583 IMPORTANT: solvers created with minlmcreatefgh()  do  not  support  linear
6584            constraints.
6585 
6586 NOTE: linear  (non-bound)  constraints are satisfied only approximately  -
6587       there  always  exists some violation due  to  numerical  errors  and
6588       algorithmic limitations.
6589 
6590 NOTE: general linear constraints  add  significant  overhead  to  solution
6591       process. Although solver performs roughly same amount of  iterations
6592       (when compared  with  similar  box-only  constrained  problem), each
6593       iteration   now    involves  solution  of  linearly  constrained  QP
6594       subproblem, which requires ~3-5 times more Cholesky  decompositions.
6595       Thus, if you can reformulate your problem in such way  this  it  has
6596       only box constraints, it may be beneficial to do so.
6597 
6598   -- ALGLIB --
6599      Copyright 14.01.2011 by Bochkanov Sergey
6600 *************************************************************************/
6601 void minlmsetlc(const minlmstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams = alglib::xdefault);
6602 void minlmsetlc(const minlmstate &state, const real_2d_array &c, const integer_1d_array &ct, const xparams _xparams = alglib::xdefault);
6603 
6604 
6605 /*************************************************************************
6606 This function is used to change acceleration settings
6607 
6608 You can choose between three acceleration strategies:
6609 * AccType=0, no acceleration.
6610 * AccType=1, secant updates are used to update quadratic model after  each
6611   iteration. After fixed number of iterations (or after  model  breakdown)
6612   we  recalculate  quadratic  model  using  analytic  Jacobian  or  finite
6613   differences. Number of secant-based iterations depends  on  optimization
6614   settings: about 3 iterations - when we have analytic Jacobian, up to 2*N
6615   iterations - when we use finite differences to calculate Jacobian.
6616 
6617 AccType=1 is recommended when Jacobian  calculation  cost is prohibitively
6618 high (several Mx1 function vector calculations  followed  by  several  NxN
6619 Cholesky factorizations are faster than calculation of one M*N  Jacobian).
6620 It should also be used when we have no Jacobian, because finite difference
6621 approximation takes too much time to compute.
6622 
6623 Table below list  optimization  protocols  (XYZ  protocol  corresponds  to
6624 MinLMCreateXYZ) and acceleration types they support (and use by  default).
6625 
6626 ACCELERATION TYPES SUPPORTED BY OPTIMIZATION PROTOCOLS:
6627 
6628 protocol    0   1   comment
6629 V           +   +
6630 VJ          +   +
6631 FGH         +
6632 
6633 DEFAULT VALUES:
6634 
6635 protocol    0   1   comment
6636 V               x   without acceleration it is so slooooooooow
6637 VJ          x
6638 FGH         x
6639 
6640 NOTE: this  function should be called before optimization. Attempt to call
6641 it during algorithm iterations may result in unexpected behavior.
6642 
6643 NOTE: attempt to call this function with unsupported protocol/acceleration
6644 combination will result in exception being thrown.
6645 
6646   -- ALGLIB --
6647      Copyright 14.10.2010 by Bochkanov Sergey
6648 *************************************************************************/
6649 void minlmsetacctype(const minlmstate &state, const ae_int_t acctype, const xparams _xparams = alglib::xdefault);
6650 
6651 
6652 /*************************************************************************
6653 This function provides reverse communication interface
6654 Reverse communication interface is not documented or recommended to use.
6655 See below for functions which provide better documented API
6656 *************************************************************************/
6657 bool minlmiteration(const minlmstate &state, const xparams _xparams = alglib::xdefault);
6658 
6659 
6660 /*************************************************************************
6661 This family of functions is used to launcn iterations of nonlinear optimizer
6662 
6663 These functions accept following parameters:
6664     state   -   algorithm state
6665     func    -   callback which calculates function (or merit function)
6666                 value func at given point x
6667     grad    -   callback which calculates function (or merit function)
6668                 value func and gradient grad at given point x
6669     hess    -   callback which calculates function (or merit function)
6670                 value func, gradient grad and Hessian hess at given point x
6671     fvec    -   callback which calculates function vector fi[]
6672                 at given point x
6673     jac     -   callback which calculates function vector fi[]
6674                 and Jacobian jac at given point x
6675     rep     -   optional callback which is called after each iteration
6676                 can be NULL
6677     ptr     -   optional pointer which is passed to func/grad/hess/jac/rep
6678                 can be NULL
6679 
6680 NOTES:
6681 
6682 1. Depending on function used to create state  structure,  this  algorithm
6683    may accept Jacobian and/or Hessian and/or gradient.  According  to  the
6684    said above, there ase several versions of this function,  which  accept
6685    different sets of callbacks.
6686 
6687    This flexibility opens way to subtle errors - you may create state with
6688    MinLMCreateFGH() (optimization using Hessian), but call function  which
6689    does not accept Hessian. So when algorithm will request Hessian,  there
6690    will be no callback to call. In this case exception will be thrown.
6691 
6692    Be careful to avoid such errors because there is no way to find them at
6693    compile time - you can see them at runtime only.
6694 
6695   -- ALGLIB --
6696      Copyright 10.03.2009 by Bochkanov Sergey
6697 
6698 *************************************************************************/
6699 void minlmoptimize(minlmstate &state,
6700     void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
6701     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6702     void *ptr = NULL,
6703     const xparams _xparams = alglib::xdefault);
6704 void minlmoptimize(minlmstate &state,
6705     void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
6706     void  (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
6707     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6708     void *ptr = NULL,
6709     const xparams _xparams = alglib::xdefault);
6710 void minlmoptimize(minlmstate &state,
6711     void (*func)(const real_1d_array &x, double &func, void *ptr),
6712     void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
6713     void (*hess)(const real_1d_array &x, double &func, real_1d_array &grad, real_2d_array &hess, void *ptr),
6714     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6715     void *ptr = NULL,
6716     const xparams _xparams = alglib::xdefault);
6717 void minlmoptimize(minlmstate &state,
6718     void (*func)(const real_1d_array &x, double &func, void *ptr),
6719     void  (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
6720     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6721     void *ptr = NULL,
6722     const xparams _xparams = alglib::xdefault);
6723 void minlmoptimize(minlmstate &state,
6724     void (*func)(const real_1d_array &x, double &func, void *ptr),
6725     void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
6726     void  (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
6727     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6728     void *ptr = NULL,
6729     const xparams _xparams = alglib::xdefault);
6730 
6731 
6732 /*************************************************************************
6733 This  function  activates/deactivates verification  of  the  user-supplied
6734 analytic Jacobian.
6735 
6736 Upon  activation  of  this  option  OptGuard  integrity  checker  performs
6737 numerical differentiation of your target function vector  at  the  initial
6738 point (note: future versions may also perform check  at  the final  point)
6739 and compares numerical Jacobian with analytic one provided by you.
6740 
6741 If difference is too large, an error flag is set and optimization  session
6742 continues. After optimization session is over, you can retrieve the report
6743 which stores  both  Jacobians,  and  specific  components  highlighted  as
6744 suspicious by the OptGuard.
6745 
6746 The OptGuard report can be retrieved with minlmoptguardresults().
6747 
6748 IMPORTANT: gradient check is a high-overhead option which  will  cost  you
6749            about 3*N additional function evaluations. In many cases it may
6750            cost as much as the rest of the optimization session.
6751 
6752            YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO
6753            CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY.
6754 
6755 NOTE: unlike previous incarnation of the gradient checking code,  OptGuard
6756       does NOT interrupt optimization even if it discovers bad gradient.
6757 
6758 INPUT PARAMETERS:
6759     State       -   structure used to store algorithm state
6760     TestStep    -   verification step used for numerical differentiation:
6761                     * TestStep=0 turns verification off
6762                     * TestStep>0 activates verification
6763                     You should carefully choose TestStep. Value  which  is
6764                     too large (so large that  function  behavior  is  non-
6765                     cubic at this scale) will lead  to  false  alarms. Too
6766                     short step will result in rounding  errors  dominating
6767                     numerical derivative.
6768 
6769                     You may use different step for different parameters by
6770                     means of setting scale with minlmsetscale().
6771 
6772 === EXPLANATION ==========================================================
6773 
6774 In order to verify gradient algorithm performs following steps:
6775   * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
6776     where X[i] is i-th component of the initial point and S[i] is a  scale
6777     of i-th parameter
6778   * F(X) is evaluated at these trial points
6779   * we perform one more evaluation in the middle point of the interval
6780   * we  build  cubic  model using function values and derivatives at trial
6781     points and we compare its prediction with actual value in  the  middle
6782     point
6783 
6784   -- ALGLIB --
6785      Copyright 15.06.2014 by Bochkanov Sergey
6786 *************************************************************************/
6787 void minlmoptguardgradient(const minlmstate &state, const double teststep, const xparams _xparams = alglib::xdefault);
6788 
6789 
6790 /*************************************************************************
6791 Results of OptGuard integrity check, should be called  after  optimization
6792 session is over.
6793 
6794 OptGuard checks analytic Jacobian  against  reference  value  obtained  by
6795 numerical differentiation with user-specified step.
6796 
6797 NOTE: other optimizers perform additional OptGuard checks for things  like
6798       C0/C1-continuity violations. However, LM optimizer  can  check  only
6799       for incorrect Jacobian.
6800 
6801       The reason is that unlike line search methods LM optimizer does  not
6802       perform extensive evaluations along the line. Thus, we simply do not
6803       have enough data to catch C0/C1-violations.
6804 
6805 This check is activated with  minlmoptguardgradient() function.
6806 
6807 Following flags are set when these errors are suspected:
6808 * rep.badgradsuspected, and additionally:
6809   * rep.badgradfidx for specific function (Jacobian row) suspected
6810   * rep.badgradvidx for specific variable (Jacobian column) suspected
6811   * rep.badgradxbase, a point where gradient/Jacobian is tested
6812   * rep.badgraduser, user-provided gradient/Jacobian
6813   * rep.badgradnum, reference gradient/Jacobian obtained via numerical
6814     differentiation
6815 
6816 INPUT PARAMETERS:
6817     state   -   algorithm state
6818 
6819 OUTPUT PARAMETERS:
6820     rep     -   OptGuard report
6821 
6822   -- ALGLIB --
6823      Copyright 21.11.2018 by Bochkanov Sergey
6824 *************************************************************************/
6825 void minlmoptguardresults(const minlmstate &state, optguardreport &rep, const xparams _xparams = alglib::xdefault);
6826 
6827 
6828 /*************************************************************************
6829 Levenberg-Marquardt algorithm results
6830 
6831 NOTE: if you activated OptGuard integrity checking functionality and  want
6832       to get OptGuard report,  it  can  be  retrieved  with  the  help  of
6833       minlmoptguardresults() function.
6834 
6835 INPUT PARAMETERS:
6836     State   -   algorithm state
6837 
6838 OUTPUT PARAMETERS:
6839     X       -   array[0..N-1], solution
6840     Rep     -   optimization  report;  includes  termination   codes   and
6841                 additional information. Termination codes are listed below,
6842                 see comments for this structure for more info.
6843                 Termination code is stored in rep.terminationtype field:
6844                 * -8    optimizer detected NAN/INF values either in the
6845                         function itself, or in its Jacobian
6846                 * -3    constraints are inconsistent
6847                 *  2    relative step is no more than EpsX.
6848                 *  5    MaxIts steps was taken
6849                 *  7    stopping conditions are too stringent,
6850                         further improvement is impossible
6851                 *  8    terminated by user who called minlmrequesttermination().
6852                         X contains point which was "current accepted" when
6853                         termination request was submitted.
6854 
6855   -- ALGLIB --
6856      Copyright 10.03.2009 by Bochkanov Sergey
6857 *************************************************************************/
6858 void minlmresults(const minlmstate &state, real_1d_array &x, minlmreport &rep, const xparams _xparams = alglib::xdefault);
6859 
6860 
6861 /*************************************************************************
6862 Levenberg-Marquardt algorithm results
6863 
6864 Buffered implementation of MinLMResults(), which uses pre-allocated buffer
6865 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
6866 intended to be used in the inner cycles of performance critical algorithms
6867 where array reallocation penalty is too large to be ignored.
6868 
6869   -- ALGLIB --
6870      Copyright 10.03.2009 by Bochkanov Sergey
6871 *************************************************************************/
6872 void minlmresultsbuf(const minlmstate &state, real_1d_array &x, minlmreport &rep, const xparams _xparams = alglib::xdefault);
6873 
6874 
6875 /*************************************************************************
6876 This  subroutine  restarts  LM  algorithm from new point. All optimization
6877 parameters are left unchanged.
6878 
6879 This  function  allows  to  solve multiple  optimization  problems  (which
6880 must have same number of dimensions) without object reallocation penalty.
6881 
6882 INPUT PARAMETERS:
6883     State   -   structure used for reverse communication previously
6884                 allocated with MinLMCreateXXX call.
6885     X       -   new starting point.
6886 
6887   -- ALGLIB --
6888      Copyright 30.07.2010 by Bochkanov Sergey
6889 *************************************************************************/
6890 void minlmrestartfrom(const minlmstate &state, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
6891 
6892 
6893 /*************************************************************************
6894 This subroutine submits request for termination of running  optimizer.  It
6895 should be called from user-supplied callback when user decides that it  is
6896 time to "smoothly" terminate optimization process.  As  result,  optimizer
6897 stops at point which was "current accepted" when termination  request  was
6898 submitted and returns error code 8 (successful termination).
6899 
6900 INPUT PARAMETERS:
6901     State   -   optimizer structure
6902 
6903 NOTE: after  request  for  termination  optimizer  may   perform   several
6904       additional calls to user-supplied callbacks. It does  NOT  guarantee
6905       to stop immediately - it just guarantees that these additional calls
6906       will be discarded later.
6907 
6908 NOTE: calling this function on optimizer which is NOT running will have no
6909       effect.
6910 
6911 NOTE: multiple calls to this function are possible. First call is counted,
6912       subsequent calls are silently ignored.
6913 
6914   -- ALGLIB --
6915      Copyright 08.10.2014 by Bochkanov Sergey
6916 *************************************************************************/
6917 void minlmrequesttermination(const minlmstate &state, const xparams _xparams = alglib::xdefault);
6918 
6919 
6920 /*************************************************************************
6921 This is obsolete function.
6922 
6923 Since ALGLIB 3.3 it is equivalent to MinLMCreateVJ().
6924 
6925   -- ALGLIB --
6926      Copyright 30.03.2009 by Bochkanov Sergey
6927 *************************************************************************/
6928 void minlmcreatevgj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
6929 void minlmcreatevgj(const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
6930 
6931 
6932 /*************************************************************************
6933 This is obsolete function.
6934 
6935 Since ALGLIB 3.3 it is equivalent to MinLMCreateFJ().
6936 
6937   -- ALGLIB --
6938      Copyright 30.03.2009 by Bochkanov Sergey
6939 *************************************************************************/
6940 void minlmcreatefgj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
6941 void minlmcreatefgj(const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
6942 
6943 
6944 /*************************************************************************
6945 This function is considered obsolete since ALGLIB 3.1.0 and is present for
6946 backward  compatibility  only.  We  recommend  to use MinLMCreateVJ, which
6947 provides similar, but more consistent and feature-rich interface.
6948 
6949   -- ALGLIB --
6950      Copyright 30.03.2009 by Bochkanov Sergey
6951 *************************************************************************/
6952 void minlmcreatefj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
6953 void minlmcreatefj(const ae_int_t m, const real_1d_array &x, minlmstate &state, const xparams _xparams = alglib::xdefault);
6954 #endif
6955 
6956 #if defined(AE_COMPILE_MINCG) || !defined(AE_PARTIAL_BUILD)
6957 /*************************************************************************
6958         NONLINEAR CONJUGATE GRADIENT METHOD
6959 
6960 DESCRIPTION:
6961 The subroutine minimizes function F(x) of N arguments by using one of  the
6962 nonlinear conjugate gradient methods.
6963 
6964 These CG methods are globally convergent (even on non-convex functions) as
6965 long as grad(f) is Lipschitz continuous in  a  some  neighborhood  of  the
6966 L = { x : f(x)<=f(x0) }.
6967 
6968 
6969 REQUIREMENTS:
6970 Algorithm will request following information during its operation:
6971 * function value F and its gradient G (simultaneously) at given point X
6972 
6973 
6974 USAGE:
6975 1. User initializes algorithm state with MinCGCreate() call
6976 2. User tunes solver parameters with MinCGSetCond(), MinCGSetStpMax() and
6977    other functions
6978 3. User calls MinCGOptimize() function which takes algorithm  state   and
6979    pointer (delegate, etc.) to callback function which calculates F/G.
6980 4. User calls MinCGResults() to get solution
6981 5. Optionally, user may call MinCGRestartFrom() to solve another  problem
6982    with same N but another starting point and/or another function.
6983    MinCGRestartFrom() allows to reuse already initialized structure.
6984 
6985 
6986 INPUT PARAMETERS:
6987     N       -   problem dimension, N>0:
6988                 * if given, only leading N elements of X are used
6989                 * if not given, automatically determined from size of X
6990     X       -   starting point, array[0..N-1].
6991 
6992 OUTPUT PARAMETERS:
6993     State   -   structure which stores algorithm state
6994 
6995   -- ALGLIB --
6996      Copyright 25.03.2010 by Bochkanov Sergey
6997 *************************************************************************/
6998 void mincgcreate(const ae_int_t n, const real_1d_array &x, mincgstate &state, const xparams _xparams = alglib::xdefault);
6999 void mincgcreate(const real_1d_array &x, mincgstate &state, const xparams _xparams = alglib::xdefault);
7000 
7001 
7002 /*************************************************************************
7003 The subroutine is finite difference variant of MinCGCreate(). It uses
7004 finite differences in order to differentiate target function.
7005 
7006 Description below contains information which is specific to this function
7007 only. We recommend to read comments on MinCGCreate() in order to get more
7008 information about creation of CG optimizer.
7009 
7010 INPUT PARAMETERS:
7011     N       -   problem dimension, N>0:
7012                 * if given, only leading N elements of X are used
7013                 * if not given, automatically determined from size of X
7014     X       -   starting point, array[0..N-1].
7015     DiffStep-   differentiation step, >0
7016 
7017 OUTPUT PARAMETERS:
7018     State   -   structure which stores algorithm state
7019 
7020 NOTES:
7021 1. algorithm uses 4-point central formula for differentiation.
7022 2. differentiation step along I-th axis is equal to DiffStep*S[I] where
7023    S[] is scaling vector which can be set by MinCGSetScale() call.
7024 3. we recommend you to use moderate values of  differentiation  step.  Too
7025    large step will result in too large truncation  errors, while too small
7026    step will result in too large numerical  errors.  1.0E-6  can  be  good
7027    value to start with.
7028 4. Numerical  differentiation  is   very   inefficient  -   one   gradient
7029    calculation needs 4*N function evaluations. This function will work for
7030    any N - either small (1...10), moderate (10...100) or  large  (100...).
7031    However, performance penalty will be too severe for any N's except  for
7032    small ones.
7033    We should also say that code which relies on numerical  differentiation
7034    is  less  robust  and  precise.  L-BFGS  needs  exact  gradient values.
7035    Imprecise  gradient may slow down  convergence,  especially  on  highly
7036    nonlinear problems.
7037    Thus  we  recommend to use this function for fast prototyping on small-
7038    dimensional problems only, and to implement analytical gradient as soon
7039    as possible.
7040 
7041   -- ALGLIB --
7042      Copyright 16.05.2011 by Bochkanov Sergey
7043 *************************************************************************/
7044 void mincgcreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, mincgstate &state, const xparams _xparams = alglib::xdefault);
7045 void mincgcreatef(const real_1d_array &x, const double diffstep, mincgstate &state, const xparams _xparams = alglib::xdefault);
7046 
7047 
7048 /*************************************************************************
7049 This function sets stopping conditions for CG optimization algorithm.
7050 
7051 INPUT PARAMETERS:
7052     State   -   structure which stores algorithm state
7053     EpsG    -   >=0
7054                 The  subroutine  finishes  its  work   if   the  condition
7055                 |v|<EpsG is satisfied, where:
7056                 * |.| means Euclidian norm
7057                 * v - scaled gradient vector, v[i]=g[i]*s[i]
7058                 * g - gradient
7059                 * s - scaling coefficients set by MinCGSetScale()
7060     EpsF    -   >=0
7061                 The  subroutine  finishes  its work if on k+1-th iteration
7062                 the  condition  |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
7063                 is satisfied.
7064     EpsX    -   >=0
7065                 The subroutine finishes its work if  on  k+1-th  iteration
7066                 the condition |v|<=EpsX is fulfilled, where:
7067                 * |.| means Euclidian norm
7068                 * v - scaled step vector, v[i]=dx[i]/s[i]
7069                 * dx - ste pvector, dx=X(k+1)-X(k)
7070                 * s - scaling coefficients set by MinCGSetScale()
7071     MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
7072                 iterations is unlimited.
7073 
7074 Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to
7075 automatic stopping criterion selection (small EpsX).
7076 
7077   -- ALGLIB --
7078      Copyright 02.04.2010 by Bochkanov Sergey
7079 *************************************************************************/
7080 void mincgsetcond(const mincgstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
7081 
7082 
7083 /*************************************************************************
7084 This function sets scaling coefficients for CG optimizer.
7085 
7086 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
7087 size and gradient are scaled before comparison with tolerances).  Scale of
7088 the I-th variable is a translation invariant measure of:
7089 a) "how large" the variable is
7090 b) how large the step should be to make significant changes in the function
7091 
7092 Scaling is also used by finite difference variant of CG optimizer  -  step
7093 along I-th axis is equal to DiffStep*S[I].
7094 
7095 In   most   optimizers  (and  in  the  CG  too)  scaling is NOT a form  of
7096 preconditioning. It just  affects  stopping  conditions.  You  should  set
7097 preconditioner by separate call to one of the MinCGSetPrec...() functions.
7098 
7099 There  is  special  preconditioning  mode, however,  which  uses   scaling
7100 coefficients to form diagonal preconditioning matrix. You  can  turn  this
7101 mode on, if you want.   But  you should understand that scaling is not the
7102 same thing as preconditioning - these are two different, although  related
7103 forms of tuning solver.
7104 
7105 INPUT PARAMETERS:
7106     State   -   structure stores algorithm state
7107     S       -   array[N], non-zero scaling coefficients
7108                 S[i] may be negative, sign doesn't matter.
7109 
7110   -- ALGLIB --
7111      Copyright 14.01.2011 by Bochkanov Sergey
7112 *************************************************************************/
7113 void mincgsetscale(const mincgstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
7114 
7115 
7116 /*************************************************************************
7117 This function turns on/off reporting.
7118 
7119 INPUT PARAMETERS:
7120     State   -   structure which stores algorithm state
7121     NeedXRep-   whether iteration reports are needed or not
7122 
7123 If NeedXRep is True, algorithm will call rep() callback function if  it is
7124 provided to MinCGOptimize().
7125 
7126   -- ALGLIB --
7127      Copyright 02.04.2010 by Bochkanov Sergey
7128 *************************************************************************/
7129 void mincgsetxrep(const mincgstate &state, const bool needxrep, const xparams _xparams = alglib::xdefault);
7130 
7131 
7132 /*************************************************************************
7133 This function sets CG algorithm.
7134 
7135 INPUT PARAMETERS:
7136     State   -   structure which stores algorithm state
7137     CGType  -   algorithm type:
7138                 * -1    automatic selection of the best algorithm
7139                 * 0     DY (Dai and Yuan) algorithm
7140                 * 1     Hybrid DY-HS algorithm
7141 
7142   -- ALGLIB --
7143      Copyright 02.04.2010 by Bochkanov Sergey
7144 *************************************************************************/
7145 void mincgsetcgtype(const mincgstate &state, const ae_int_t cgtype, const xparams _xparams = alglib::xdefault);
7146 
7147 
7148 /*************************************************************************
7149 This function sets maximum step length
7150 
7151 INPUT PARAMETERS:
7152     State   -   structure which stores algorithm state
7153     StpMax  -   maximum step length, >=0. Set StpMax to 0.0,  if you don't
7154                 want to limit step length.
7155 
7156 Use this subroutine when you optimize target function which contains exp()
7157 or  other  fast  growing  functions,  and optimization algorithm makes too
7158 large  steps  which  leads  to overflow. This function allows us to reject
7159 steps  that  are  too  large  (and  therefore  expose  us  to the possible
7160 overflow) without actually calculating function value at the x+stp*d.
7161 
7162   -- ALGLIB --
7163      Copyright 02.04.2010 by Bochkanov Sergey
7164 *************************************************************************/
7165 void mincgsetstpmax(const mincgstate &state, const double stpmax, const xparams _xparams = alglib::xdefault);
7166 
7167 
7168 /*************************************************************************
7169 This function allows to suggest initial step length to the CG algorithm.
7170 
7171 Suggested  step  length  is used as starting point for the line search. It
7172 can be useful when you have  badly  scaled  problem,  i.e.  when  ||grad||
7173 (which is used as initial estimate for the first step) is many  orders  of
7174 magnitude different from the desired step.
7175 
7176 Line search  may  fail  on  such problems without good estimate of initial
7177 step length. Imagine, for example, problem with ||grad||=10^50 and desired
7178 step equal to 0.1 Line  search function will use 10^50  as  initial  step,
7179 then  it  will  decrease step length by 2 (up to 20 attempts) and will get
7180 10^44, which is still too large.
7181 
7182 This function allows us to tell than line search should  be  started  from
7183 some moderate step length, like 1.0, so algorithm will be able  to  detect
7184 desired step length in a several searches.
7185 
7186 Default behavior (when no step is suggested) is to use preconditioner,  if
7187 it is available, to generate initial estimate of step length.
7188 
7189 This function influences only first iteration of algorithm. It  should  be
7190 called between MinCGCreate/MinCGRestartFrom() call and MinCGOptimize call.
7191 Suggested step is ignored if you have preconditioner.
7192 
7193 INPUT PARAMETERS:
7194     State   -   structure used to store algorithm state.
7195     Stp     -   initial estimate of the step length.
7196                 Can be zero (no estimate).
7197 
7198   -- ALGLIB --
7199      Copyright 30.07.2010 by Bochkanov Sergey
7200 *************************************************************************/
7201 void mincgsuggeststep(const mincgstate &state, const double stp, const xparams _xparams = alglib::xdefault);
7202 
7203 
7204 /*************************************************************************
7205 Modification of the preconditioner: preconditioning is turned off.
7206 
7207 INPUT PARAMETERS:
7208     State   -   structure which stores algorithm state
7209 
7210 NOTE:  you  can  change  preconditioner  "on  the  fly",  during algorithm
7211 iterations.
7212 
7213   -- ALGLIB --
7214      Copyright 13.10.2010 by Bochkanov Sergey
7215 *************************************************************************/
7216 void mincgsetprecdefault(const mincgstate &state, const xparams _xparams = alglib::xdefault);
7217 
7218 
7219 /*************************************************************************
7220 Modification  of  the  preconditioner:  diagonal of approximate Hessian is
7221 used.
7222 
7223 INPUT PARAMETERS:
7224     State   -   structure which stores algorithm state
7225     D       -   diagonal of the approximate Hessian, array[0..N-1],
7226                 (if larger, only leading N elements are used).
7227 
7228 NOTE:  you  can  change  preconditioner  "on  the  fly",  during algorithm
7229 iterations.
7230 
7231 NOTE 2: D[i] should be positive. Exception will be thrown otherwise.
7232 
7233 NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
7234 
7235   -- ALGLIB --
7236      Copyright 13.10.2010 by Bochkanov Sergey
7237 *************************************************************************/
7238 void mincgsetprecdiag(const mincgstate &state, const real_1d_array &d, const xparams _xparams = alglib::xdefault);
7239 
7240 
7241 /*************************************************************************
7242 Modification of the preconditioner: scale-based diagonal preconditioning.
7243 
7244 This preconditioning mode can be useful when you  don't  have  approximate
7245 diagonal of Hessian, but you know that your  variables  are  badly  scaled
7246 (for  example,  one  variable is in [1,10], and another in [1000,100000]),
7247 and most part of the ill-conditioning comes from different scales of vars.
7248 
7249 In this case simple  scale-based  preconditioner,  with H[i] = 1/(s[i]^2),
7250 can greatly improve convergence.
7251 
7252 IMPRTANT: you should set scale of your variables with MinCGSetScale() call
7253 (before or after MinCGSetPrecScale() call). Without knowledge of the scale
7254 of your variables scale-based preconditioner will be just unit matrix.
7255 
7256 INPUT PARAMETERS:
7257     State   -   structure which stores algorithm state
7258 
7259 NOTE:  you  can  change  preconditioner  "on  the  fly",  during algorithm
7260 iterations.
7261 
7262   -- ALGLIB --
7263      Copyright 13.10.2010 by Bochkanov Sergey
7264 *************************************************************************/
7265 void mincgsetprecscale(const mincgstate &state, const xparams _xparams = alglib::xdefault);
7266 
7267 
7268 /*************************************************************************
7269 This function provides reverse communication interface
7270 Reverse communication interface is not documented or recommended to use.
7271 See below for functions which provide better documented API
7272 *************************************************************************/
7273 bool mincgiteration(const mincgstate &state, const xparams _xparams = alglib::xdefault);
7274 
7275 
7276 /*************************************************************************
7277 This family of functions is used to launcn iterations of nonlinear optimizer
7278 
7279 These functions accept following parameters:
7280     state   -   algorithm state
7281     func    -   callback which calculates function (or merit function)
7282                 value func at given point x
7283     grad    -   callback which calculates function (or merit function)
7284                 value func and gradient grad at given point x
7285     rep     -   optional callback which is called after each iteration
7286                 can be NULL
7287     ptr     -   optional pointer which is passed to func/grad/hess/jac/rep
7288                 can be NULL
7289 
7290 NOTES:
7291 
7292 1. This function has two different implementations: one which  uses  exact
7293    (analytical) user-supplied  gradient, and one which uses function value
7294    only  and  numerically  differentiates  function  in  order  to  obtain
7295    gradient.
7296 
7297    Depending  on  the  specific  function  used to create optimizer object
7298    (either MinCGCreate()  for analytical gradient  or  MinCGCreateF()  for
7299    numerical differentiation) you should  choose  appropriate  variant  of
7300    MinCGOptimize() - one which accepts function AND gradient or one  which
7301    accepts function ONLY.
7302 
7303    Be careful to choose variant of MinCGOptimize()  which  corresponds  to
7304    your optimization scheme! Table below lists different  combinations  of
7305    callback (function/gradient) passed  to  MinCGOptimize()  and  specific
7306    function used to create optimizer.
7307 
7308 
7309                   |         USER PASSED TO MinCGOptimize()
7310    CREATED WITH   |  function only   |  function and gradient
7311    ------------------------------------------------------------
7312    MinCGCreateF() |     work                FAIL
7313    MinCGCreate()  |     FAIL                work
7314 
7315    Here "FAIL" denotes inappropriate combinations  of  optimizer  creation
7316    function and MinCGOptimize() version. Attemps to use  such  combination
7317    (for  example,  to create optimizer with  MinCGCreateF()  and  to  pass
7318    gradient information to MinCGOptimize()) will lead to  exception  being
7319    thrown. Either  you  did  not  pass  gradient when it WAS needed or you
7320    passed gradient when it was NOT needed.
7321 
7322   -- ALGLIB --
7323      Copyright 20.04.2009 by Bochkanov Sergey
7324 
7325 *************************************************************************/
7326 void mincgoptimize(mincgstate &state,
7327     void (*func)(const real_1d_array &x, double &func, void *ptr),
7328     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
7329     void *ptr = NULL,
7330     const xparams _xparams = alglib::xdefault);
7331 void mincgoptimize(mincgstate &state,
7332     void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
7333     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
7334     void *ptr = NULL,
7335     const xparams _xparams = alglib::xdefault);
7336 
7337 
7338 /*************************************************************************
7339 This  function  activates/deactivates verification  of  the  user-supplied
7340 analytic gradient.
7341 
7342 Upon  activation  of  this  option  OptGuard  integrity  checker  performs
7343 numerical differentiation of your target function  at  the  initial  point
7344 (note: future versions may also perform check  at  the  final  point)  and
7345 compares numerical gradient with analytic one provided by you.
7346 
7347 If difference is too large, an error flag is set and optimization  session
7348 continues. After optimization session is over, you can retrieve the report
7349 which  stores  both  gradients  and  specific  components  highlighted  as
7350 suspicious by the OptGuard.
7351 
7352 The primary OptGuard report can be retrieved with mincgoptguardresults().
7353 
7354 IMPORTANT: gradient check is a high-overhead option which  will  cost  you
7355            about 3*N additional function evaluations. In many cases it may
7356            cost as much as the rest of the optimization session.
7357 
7358            YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO
7359            CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY.
7360 
7361 NOTE: unlike previous incarnation of the gradient checking code,  OptGuard
7362       does NOT interrupt optimization even if it discovers bad gradient.
7363 
7364 INPUT PARAMETERS:
7365     State       -   structure used to store algorithm state
7366     TestStep    -   verification step used for numerical differentiation:
7367                     * TestStep=0 turns verification off
7368                     * TestStep>0 activates verification
7369                     You should carefully choose TestStep. Value  which  is
7370                     too large (so large that  function  behavior  is  non-
7371                     cubic at this scale) will lead  to  false  alarms. Too
7372                     short step will result in rounding  errors  dominating
7373                     numerical derivative.
7374 
7375                     You may use different step for different parameters by
7376                     means of setting scale with mincgsetscale().
7377 
7378 === EXPLANATION ==========================================================
7379 
7380 In order to verify gradient algorithm performs following steps:
7381   * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
7382     where X[i] is i-th component of the initial point and S[i] is a  scale
7383     of i-th parameter
7384   * F(X) is evaluated at these trial points
7385   * we perform one more evaluation in the middle point of the interval
7386   * we  build  cubic  model using function values and derivatives at trial
7387     points and we compare its prediction with actual value in  the  middle
7388     point
7389 
7390   -- ALGLIB --
7391      Copyright 15.06.2014 by Bochkanov Sergey
7392 *************************************************************************/
7393 void mincgoptguardgradient(const mincgstate &state, const double teststep, const xparams _xparams = alglib::xdefault);
7394 
7395 
7396 /*************************************************************************
7397 This  function  activates/deactivates nonsmoothness monitoring  option  of
7398 the  OptGuard  integrity  checker. Smoothness  monitor  silently  observes
7399 solution process and tries to detect ill-posed problems, i.e. ones with:
7400 a) discontinuous target function (non-C0)
7401 b) nonsmooth     target function (non-C1)
7402 
7403 Smoothness monitoring does NOT interrupt optimization  even if it suspects
7404 that your problem is nonsmooth. It just sets corresponding  flags  in  the
7405 OptGuard report which can be retrieved after optimization is over.
7406 
7407 Smoothness monitoring is a moderate overhead option which often adds  less
7408 than 1% to the optimizer running time. Thus, you can use it even for large
7409 scale problems.
7410 
7411 NOTE: OptGuard does  NOT  guarantee  that  it  will  always  detect  C0/C1
7412       continuity violations.
7413 
7414       First, minor errors are hard to  catch - say, a 0.0001 difference in
7415       the model values at two sides of the gap may be due to discontinuity
7416       of the model - or simply because the model has changed.
7417 
7418       Second, C1-violations  are  especially  difficult  to  detect  in  a
7419       noninvasive way. The optimizer usually  performs  very  short  steps
7420       near the nonsmoothness, and differentiation  usually   introduces  a
7421       lot of numerical noise.  It  is  hard  to  tell  whether  some  tiny
7422       discontinuity in the slope is due to real nonsmoothness or just  due
7423       to numerical noise alone.
7424 
7425       Our top priority was to avoid false positives, so in some rare cases
7426       minor errors may went unnoticed (however, in most cases they can  be
7427       spotted with restart from different initial point).
7428 
7429 INPUT PARAMETERS:
7430     state   -   algorithm state
7431     level   -   monitoring level:
7432                 * 0 - monitoring is disabled
7433                 * 1 - noninvasive low-overhead monitoring; function values
7434                       and/or gradients are recorded, but OptGuard does not
7435                       try to perform additional evaluations  in  order  to
7436                       get more information about suspicious locations.
7437 
7438 === EXPLANATION ==========================================================
7439 
7440 One major source of headache during optimization  is  the  possibility  of
7441 the coding errors in the target function/constraints (or their gradients).
7442 Such  errors   most   often   manifest   themselves  as  discontinuity  or
7443 nonsmoothness of the target/constraints.
7444 
7445 Another frequent situation is when you try to optimize something involving
7446 lots of min() and max() operations, i.e. nonsmooth target. Although not  a
7447 coding error, it is nonsmoothness anyway - and smooth  optimizers  usually
7448 stop right after encountering nonsmoothness, well before reaching solution.
7449 
7450 OptGuard integrity checker helps you to catch such situations: it monitors
7451 function values/gradients being passed  to  the  optimizer  and  tries  to
7452 errors. Upon discovering suspicious pair of points it  raises  appropriate
7453 flag (and allows you to continue optimization). When optimization is done,
7454 you can study OptGuard result.
7455 
7456   -- ALGLIB --
7457      Copyright 21.11.2018 by Bochkanov Sergey
7458 *************************************************************************/
7459 void mincgoptguardsmoothness(const mincgstate &state, const ae_int_t level, const xparams _xparams = alglib::xdefault);
7460 void mincgoptguardsmoothness(const mincgstate &state, const xparams _xparams = alglib::xdefault);
7461 
7462 
7463 /*************************************************************************
7464 Results of OptGuard integrity check, should be called  after  optimization
7465 session is over.
7466 
7467 === PRIMARY REPORT =======================================================
7468 
7469 OptGuard performs several checks which are intended to catch common errors
7470 in the implementation of nonlinear function/gradient:
7471 * incorrect analytic gradient
7472 * discontinuous (non-C0) target functions (constraints)
7473 * nonsmooth     (non-C1) target functions (constraints)
7474 
7475 Each of these checks is activated with appropriate function:
7476 * mincgoptguardgradient() for gradient verification
7477 * mincgoptguardsmoothness() for C0/C1 checks
7478 
7479 Following flags are set when these errors are suspected:
7480 * rep.badgradsuspected, and additionally:
7481   * rep.badgradvidx for specific variable (gradient element) suspected
7482   * rep.badgradxbase, a point where gradient is tested
7483   * rep.badgraduser, user-provided gradient  (stored  as  2D  matrix  with
7484     single row in order to make  report  structure  compatible  with  more
7485     complex optimizers like MinNLC or MinLM)
7486   * rep.badgradnum,   reference    gradient    obtained    via   numerical
7487     differentiation (stored as  2D matrix with single row in order to make
7488     report structure compatible with more complex optimizers  like  MinNLC
7489     or MinLM)
7490 * rep.nonc0suspected
7491 * rep.nonc1suspected
7492 
7493 === ADDITIONAL REPORTS/LOGS ==============================================
7494 
7495 Several different tests are performed to catch C0/C1 errors, you can  find
7496 out specific test signaled error by looking to:
7497 * rep.nonc0test0positive, for non-C0 test #0
7498 * rep.nonc1test0positive, for non-C1 test #0
7499 * rep.nonc1test1positive, for non-C1 test #1
7500 
7501 Additional information (including line search logs)  can  be  obtained  by
7502 means of:
7503 * mincgoptguardnonc1test0results()
7504 * mincgoptguardnonc1test1results()
7505 which return detailed error reports, specific points where discontinuities
7506 were found, and so on.
7507 
7508 ==========================================================================
7509 
7510 INPUT PARAMETERS:
7511     state   -   algorithm state
7512 
7513 OUTPUT PARAMETERS:
7514     rep     -   generic OptGuard report;  more  detailed  reports  can  be
7515                 retrieved with other functions.
7516 
7517 NOTE: false negatives (nonsmooth problems are not identified as  nonsmooth
7518       ones) are possible although unlikely.
7519 
7520       The reason  is  that  you  need  to  make several evaluations around
7521       nonsmoothness  in  order  to  accumulate  enough  information  about
7522       function curvature. Say, if you start right from the nonsmooth point,
7523       optimizer simply won't get enough data to understand what  is  going
7524       wrong before it terminates due to abrupt changes in the  derivative.
7525       It is also  possible  that  "unlucky"  step  will  move  us  to  the
7526       termination too quickly.
7527 
7528       Our current approach is to have less than 0.1%  false  negatives  in
7529       our test examples  (measured  with  multiple  restarts  from  random
7530       points), and to have exactly 0% false positives.
7531 
7532   -- ALGLIB --
7533      Copyright 21.11.2018 by Bochkanov Sergey
7534 *************************************************************************/
7535 void mincgoptguardresults(const mincgstate &state, optguardreport &rep, const xparams _xparams = alglib::xdefault);
7536 
7537 
7538 /*************************************************************************
7539 Detailed results of the OptGuard integrity check for nonsmoothness test #0
7540 
7541 Nonsmoothness (non-C1) test #0 studies  function  values  (not  gradient!)
7542 obtained during line searches and monitors  behavior  of  the  directional
7543 derivative estimate.
7544 
7545 This test is less powerful than test #1, but it does  not  depend  on  the
7546 gradient values and thus it is more robust against artifacts introduced by
7547 numerical differentiation.
7548 
7549 Two reports are returned:
7550 * a "strongest" one, corresponding  to  line   search  which  had  highest
7551   value of the nonsmoothness indicator
7552 * a "longest" one, corresponding to line search which  had  more  function
7553   evaluations, and thus is more detailed
7554 
7555 In both cases following fields are returned:
7556 
7557 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
7558   did not notice anything (in the latter cases fields below are empty).
7559 * x0[], d[] - arrays of length N which store initial point  and  direction
7560   for line search (d[] can be normalized, but does not have to)
7561 * stp[], f[] - arrays of length CNT which store step lengths and  function
7562   values at these points; f[i] is evaluated in x0+stp[i]*d.
7563 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
7564   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
7565   with  most  likely  position  of  the  violation  between  stpidxa+1 and
7566   stpidxa+2.
7567 
7568 ==========================================================================
7569 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
7570 =                   see where C1 continuity is violated.
7571 ==========================================================================
7572 
7573 INPUT PARAMETERS:
7574     state   -   algorithm state
7575 
7576 OUTPUT PARAMETERS:
7577     strrep  -   C1 test #0 "strong" report
7578     lngrep  -   C1 test #0 "long" report
7579 
7580   -- ALGLIB --
7581      Copyright 21.11.2018 by Bochkanov Sergey
7582 *************************************************************************/
7583 void mincgoptguardnonc1test0results(const mincgstate &state, optguardnonc1test0report &strrep, optguardnonc1test0report &lngrep, const xparams _xparams = alglib::xdefault);
7584 
7585 
7586 /*************************************************************************
7587 Detailed results of the OptGuard integrity check for nonsmoothness test #1
7588 
7589 Nonsmoothness (non-C1)  test  #1  studies  individual  components  of  the
7590 gradient computed during line search.
7591 
7592 When precise analytic gradient is provided this test is more powerful than
7593 test #0  which  works  with  function  values  and  ignores  user-provided
7594 gradient.  However,  test  #0  becomes  more   powerful   when   numerical
7595 differentiation is employed (in such cases test #1 detects  higher  levels
7596 of numerical noise and becomes too conservative).
7597 
7598 This test also tells specific components of the gradient which violate  C1
7599 continuity, which makes it more informative than #0, which just tells that
7600 continuity is violated.
7601 
7602 Two reports are returned:
7603 * a "strongest" one, corresponding  to  line   search  which  had  highest
7604   value of the nonsmoothness indicator
7605 * a "longest" one, corresponding to line search which  had  more  function
7606   evaluations, and thus is more detailed
7607 
7608 In both cases following fields are returned:
7609 
7610 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
7611   did not notice anything (in the latter cases fields below are empty).
7612 * vidx - is an index of the variable in [0,N) with nonsmooth derivative
7613 * x0[], d[] - arrays of length N which store initial point  and  direction
7614   for line search (d[] can be normalized, but does not have to)
7615 * stp[], g[] - arrays of length CNT which store step lengths and  gradient
7616   values at these points; g[i] is evaluated in  x0+stp[i]*d  and  contains
7617   vidx-th component of the gradient.
7618 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
7619   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
7620   with  most  likely  position  of  the  violation  between  stpidxa+1 and
7621   stpidxa+2.
7622 
7623 ==========================================================================
7624 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
7625 =                   see where C1 continuity is violated.
7626 ==========================================================================
7627 
7628 INPUT PARAMETERS:
7629     state   -   algorithm state
7630 
7631 OUTPUT PARAMETERS:
7632     strrep  -   C1 test #1 "strong" report
7633     lngrep  -   C1 test #1 "long" report
7634 
7635   -- ALGLIB --
7636      Copyright 21.11.2018 by Bochkanov Sergey
7637 *************************************************************************/
7638 void mincgoptguardnonc1test1results(const mincgstate &state, optguardnonc1test1report &strrep, optguardnonc1test1report &lngrep, const xparams _xparams = alglib::xdefault);
7639 
7640 
7641 /*************************************************************************
7642 Conjugate gradient results
7643 
7644 INPUT PARAMETERS:
7645     State   -   algorithm state
7646 
7647 OUTPUT PARAMETERS:
7648     X       -   array[0..N-1], solution
7649     Rep     -   optimization report:
7650                 * Rep.TerminationType completetion code:
7651                     * -8    internal integrity control  detected  infinite
7652                             or NAN values in  function/gradient.  Abnormal
7653                             termination signalled.
7654                     * -7    gradient verification failed.
7655                             See MinCGSetGradientCheck() for more information.
7656                     *  1    relative function improvement is no more than
7657                             EpsF.
7658                     *  2    relative step is no more than EpsX.
7659                     *  4    gradient norm is no more than EpsG
7660                     *  5    MaxIts steps was taken
7661                     *  7    stopping conditions are too stringent,
7662                             further improvement is impossible,
7663                             we return best X found so far
7664                     *  8    terminated by user
7665                 * Rep.IterationsCount contains iterations count
7666                 * NFEV countains number of function calculations
7667 
7668   -- ALGLIB --
7669      Copyright 20.04.2009 by Bochkanov Sergey
7670 *************************************************************************/
7671 void mincgresults(const mincgstate &state, real_1d_array &x, mincgreport &rep, const xparams _xparams = alglib::xdefault);
7672 
7673 
7674 /*************************************************************************
7675 Conjugate gradient results
7676 
7677 Buffered implementation of MinCGResults(), which uses pre-allocated buffer
7678 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
7679 intended to be used in the inner cycles of performance critical algorithms
7680 where array reallocation penalty is too large to be ignored.
7681 
7682   -- ALGLIB --
7683      Copyright 20.04.2009 by Bochkanov Sergey
7684 *************************************************************************/
7685 void mincgresultsbuf(const mincgstate &state, real_1d_array &x, mincgreport &rep, const xparams _xparams = alglib::xdefault);
7686 
7687 
7688 /*************************************************************************
7689 This  subroutine  restarts  CG  algorithm from new point. All optimization
7690 parameters are left unchanged.
7691 
7692 This  function  allows  to  solve multiple  optimization  problems  (which
7693 must have same number of dimensions) without object reallocation penalty.
7694 
7695 INPUT PARAMETERS:
7696     State   -   structure used to store algorithm state.
7697     X       -   new starting point.
7698 
7699   -- ALGLIB --
7700      Copyright 30.07.2010 by Bochkanov Sergey
7701 *************************************************************************/
7702 void mincgrestartfrom(const mincgstate &state, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
7703 
7704 
7705 /*************************************************************************
7706 This subroutine submits request for termination of running  optimizer.  It
7707 should be called from user-supplied callback when user decides that it  is
7708 time to "smoothly" terminate optimization process.  As  result,  optimizer
7709 stops at point which was "current accepted" when termination  request  was
7710 submitted and returns error code 8 (successful termination).
7711 
7712 INPUT PARAMETERS:
7713     State   -   optimizer structure
7714 
7715 NOTE: after  request  for  termination  optimizer  may   perform   several
7716       additional calls to user-supplied callbacks. It does  NOT  guarantee
7717       to stop immediately - it just guarantees that these additional calls
7718       will be discarded later.
7719 
7720 NOTE: calling this function on optimizer which is NOT running will have no
7721       effect.
7722 
7723 NOTE: multiple calls to this function are possible. First call is counted,
7724       subsequent calls are silently ignored.
7725 
7726   -- ALGLIB --
7727      Copyright 08.10.2014 by Bochkanov Sergey
7728 *************************************************************************/
7729 void mincgrequesttermination(const mincgstate &state, const xparams _xparams = alglib::xdefault);
7730 #endif
7731 
7732 #if defined(AE_COMPILE_NLCSQP) || !defined(AE_PARTIAL_BUILD)
7733 
7734 #endif
7735 
7736 #if defined(AE_COMPILE_LPQPPRESOLVE) || !defined(AE_PARTIAL_BUILD)
7737 
7738 #endif
7739 
7740 #if defined(AE_COMPILE_REVISEDDUALSIMPLEX) || !defined(AE_PARTIAL_BUILD)
7741 
7742 #endif
7743 
7744 #if defined(AE_COMPILE_MINLP) || !defined(AE_PARTIAL_BUILD)
7745 /*************************************************************************
7746                             LINEAR PROGRAMMING
7747 
7748 The subroutine creates LP  solver.  After  initial  creation  it  contains
7749 default optimization problem with zero cost vector and all variables being
7750 fixed to zero values and no constraints.
7751 
7752 In order to actually solve something you should:
7753 * set cost vector with minlpsetcost()
7754 * set variable bounds with minlpsetbc() or minlpsetbcall()
7755 * specify constraint matrix with one of the following functions:
7756   [*] minlpsetlc()        for dense one-sided constraints
7757   [*] minlpsetlc2dense()  for dense two-sided constraints
7758   [*] minlpsetlc2()       for sparse two-sided constraints
7759   [*] minlpaddlc2dense()  to add one dense row to constraint matrix
7760   [*] minlpaddlc2()       to add one row to constraint matrix (compressed format)
7761 * call minlpoptimize() to run the solver and  minlpresults()  to  get  the
7762   solution vector and additional information.
7763 
7764 By  default,  LP  solver uses best algorithm available. As of ALGLIB 3.17,
7765 sparse interior point (barrier) solver is used. Future releases of  ALGLIB
7766 may introduce other solvers.
7767 
7768 User may choose specific LP algorithm by calling:
7769 * minlpsetalgodss() for revised dual simplex method with DSE  pricing  and
7770   bounds flipping ratio test (aka long dual step).  Large-scale  sparse LU
7771   solverwith  Forest-Tomlin update is used internally  as  linear  algebra
7772   driver.
7773 * minlpsetalgoipm() for sparse interior point method
7774 
7775 INPUT PARAMETERS:
7776     N       -   problem size
7777 
7778 OUTPUT PARAMETERS:
7779     State   -   optimizer in the default state
7780 
7781   -- ALGLIB --
7782      Copyright 19.07.2018 by Bochkanov Sergey
7783 *************************************************************************/
7784 void minlpcreate(const ae_int_t n, minlpstate &state, const xparams _xparams = alglib::xdefault);
7785 
7786 
7787 /*************************************************************************
7788 This function sets LP algorithm to revised dual simplex method.
7789 
7790 ALGLIB implementation of dual simplex method supports advanced performance
7791 and stability improvements like DSE pricing , bounds flipping  ratio  test
7792 (aka long dual step), Forest-Tomlin update, shifting.
7793 
7794 INPUT PARAMETERS:
7795     State   -   optimizer
7796     Eps     -   stopping condition, Eps>=0:
7797                 * should be small number about 1E-6 or 1E-7.
7798                 * zero value means that solver automatically selects good
7799                   value (can be different in different ALGLIB versions)
7800                 * default value is zero
7801                 Algorithm stops when relative error is less than Eps.
7802 
7803 ===== TRACING DSS SOLVER =================================================
7804 
7805 DSS solver supports advanced tracing capabilities. You can trace algorithm
7806 output by specifying following trace symbols (case-insensitive)  by  means
7807 of trace_file() call:
7808 * 'DSS'         - for basic trace of algorithm  steps and decisions.  Only
7809                   short scalars (function values and deltas) are  printed.
7810                   N-dimensional quantities like search directions are  NOT
7811                   printed.
7812 * 'DSS.DETAILED'- for output of points being visited and search directions
7813                   This  symbol  also  implicitly  defines  'DSS'. You  can
7814                   control output format by additionally specifying:
7815                   * nothing     to output in  6-digit exponential format
7816                   * 'PREC.E15'  to output in 15-digit exponential format
7817                   * 'PREC.F6'   to output in  6-digit fixed-point format
7818 
7819 By default trace is disabled and adds  no  overhead  to  the  optimization
7820 process. However, specifying any of the symbols adds some  formatting  and
7821 output-related overhead.
7822 
7823 You may specify multiple symbols by separating them with commas:
7824 >
7825 > alglib::trace_file("DSS,PREC.F6", "path/to/trace.log")
7826 >
7827 
7828   -- ALGLIB --
7829      Copyright 08.11.2020 by Bochkanov Sergey
7830 *************************************************************************/
7831 void minlpsetalgodss(const minlpstate &state, const double eps, const xparams _xparams = alglib::xdefault);
7832 
7833 
7834 /*************************************************************************
7835 This function sets LP algorithm to sparse interior point method.
7836 
7837 ALGORITHM INFORMATION:
7838 
7839 * this  algorithm  is  our implementation  of  interior  point  method  as
7840   formulated by  R.J.Vanderbei, with minor modifications to the  algorithm
7841   (damped Newton directions are extensively used)
7842 * like all interior point methods, this algorithm  tends  to  converge  in
7843   roughly same number of iterations (between 15 and 50) independently from
7844   the problem dimensionality
7845 
7846 INPUT PARAMETERS:
7847     State   -   optimizer
7848     Eps     -   stopping condition, Eps>=0:
7849                 * should be small number about 1E-7 or 1E-8.
7850                 * zero value means that solver automatically selects good
7851                   value (can be different in different ALGLIB versions)
7852                 * default value is zero
7853                 Algorithm  stops  when  primal  error  AND  dual error AND
7854                 duality gap are less than Eps.
7855 
7856 ===== TRACING IPM SOLVER =================================================
7857 
7858 IPM solver supports advanced tracing capabilities. You can trace algorithm
7859 output by specifying following trace symbols (case-insensitive)  by  means
7860 of trace_file() call:
7861 * 'IPM'         - for basic trace of algorithm  steps and decisions.  Only
7862                   short scalars (function values and deltas) are  printed.
7863                   N-dimensional quantities like search directions are  NOT
7864                   printed.
7865 * 'IPM.DETAILED'- for output of points being visited and search directions
7866                   This  symbol  also  implicitly  defines  'IPM'. You  can
7867                   control output format by additionally specifying:
7868                   * nothing     to output in  6-digit exponential format
7869                   * 'PREC.E15'  to output in 15-digit exponential format
7870                   * 'PREC.F6'   to output in  6-digit fixed-point format
7871 
7872 By default trace is disabled and adds  no  overhead  to  the  optimization
7873 process. However, specifying any of the symbols adds some  formatting  and
7874 output-related overhead.
7875 
7876 You may specify multiple symbols by separating them with commas:
7877 >
7878 > alglib::trace_file("IPM,PREC.F6", "path/to/trace.log")
7879 >
7880 
7881   -- ALGLIB --
7882      Copyright 08.11.2020 by Bochkanov Sergey
7883 *************************************************************************/
7884 void minlpsetalgoipm(const minlpstate &state, const double eps, const xparams _xparams = alglib::xdefault);
7885 void minlpsetalgoipm(const minlpstate &state, const xparams _xparams = alglib::xdefault);
7886 
7887 
7888 /*************************************************************************
7889 This function sets cost term for LP solver.
7890 
7891 By default, cost term is zero.
7892 
7893 INPUT PARAMETERS:
7894     State   -   structure which stores algorithm state
7895     C       -   cost term, array[N].
7896 
7897   -- ALGLIB --
7898      Copyright 19.07.2018 by Bochkanov Sergey
7899 *************************************************************************/
7900 void minlpsetcost(const minlpstate &state, const real_1d_array &c, const xparams _xparams = alglib::xdefault);
7901 
7902 
7903 /*************************************************************************
7904 This function sets scaling coefficients.
7905 
7906 ALGLIB optimizers use scaling matrices to test stopping  conditions and as
7907 preconditioner.
7908 
7909 Scale of the I-th variable is a translation invariant measure of:
7910 a) "how large" the variable is
7911 b) how large the step should be to make significant changes in the
7912    function
7913 
7914 INPUT PARAMETERS:
7915     State   -   structure stores algorithm state
7916     S       -   array[N], non-zero scaling coefficients
7917                 S[i] may be negative, sign doesn't matter.
7918 
7919   -- ALGLIB --
7920      Copyright 19.07.2018 by Bochkanov Sergey
7921 *************************************************************************/
7922 void minlpsetscale(const minlpstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
7923 
7924 
7925 /*************************************************************************
7926 This function sets box constraints for LP solver (all variables  at  once,
7927 different constraints for different variables).
7928 
7929 The default state of constraints is to have all variables fixed  at  zero.
7930 You have to overwrite it by your own constraint vector. Constraint  status
7931 is preserved until constraints are  explicitly  overwritten  with  another
7932 minlpsetbc()  call,   overwritten   with  minlpsetbcall(),  or   partially
7933 overwritten with minlmsetbci() call.
7934 
7935 Following types of constraints are supported:
7936 
7937     DESCRIPTION         CONSTRAINT              HOW TO SPECIFY
7938     fixed variable      x[i]=Bnd[i]             BndL[i]=BndU[i]
7939     lower bound         BndL[i]<=x[i]           BndU[i]=+INF
7940     upper bound         x[i]<=BndU[i]           BndL[i]=-INF
7941     range               BndL[i]<=x[i]<=BndU[i]  ...
7942     free variable       -                       BndL[I]=-INF, BndU[I]+INF
7943 
7944 INPUT PARAMETERS:
7945     State   -   structure stores algorithm state
7946     BndL    -   lower bounds, array[N].
7947     BndU    -   upper bounds, array[N].
7948 
7949 NOTE: infinite values can be specified by means of Double.PositiveInfinity
7950       and  Double.NegativeInfinity  (in  C#)  and  alglib::fp_posinf   and
7951       alglib::fp_neginf (in C++).
7952 
7953 NOTE: you may replace infinities by very small/very large values,  but  it
7954       is not recommended because large numbers may introduce large numerical
7955       errors in the algorithm.
7956 
7957 NOTE: if constraints for all variables are same you may use minlpsetbcall()
7958       which allows to specify constraints without using arrays.
7959 
7960 NOTE: BndL>BndU will result in LP problem being recognized as infeasible.
7961 
7962   -- ALGLIB --
7963      Copyright 19.07.2018 by Bochkanov Sergey
7964 *************************************************************************/
7965 void minlpsetbc(const minlpstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams = alglib::xdefault);
7966 
7967 
7968 /*************************************************************************
7969 This function sets box constraints for LP solver (all variables  at  once,
7970 same constraints for all variables)
7971 
7972 The default state of constraints is to have all variables fixed  at  zero.
7973 You have to overwrite it by your own constraint vector. Constraint  status
7974 is preserved until constraints are  explicitly  overwritten  with  another
7975 minlpsetbc() call or partially overwritten with minlpsetbcall().
7976 
7977 Following types of constraints are supported:
7978 
7979     DESCRIPTION         CONSTRAINT              HOW TO SPECIFY
7980     fixed variable      x[i]=Bnd[i]             BndL[i]=BndU[i]
7981     lower bound         BndL[i]<=x[i]           BndU[i]=+INF
7982     upper bound         x[i]<=BndU[i]           BndL[i]=-INF
7983     range               BndL[i]<=x[i]<=BndU[i]  ...
7984     free variable       -                       BndL[I]=-INF, BndU[I]+INF
7985 
7986 INPUT PARAMETERS:
7987     State   -   structure stores algorithm state
7988     BndL    -   lower bound, same for all variables
7989     BndU    -   upper bound, same for all variables
7990 
7991 NOTE: infinite values can be specified by means of Double.PositiveInfinity
7992       and  Double.NegativeInfinity  (in  C#)  and  alglib::fp_posinf   and
7993       alglib::fp_neginf (in C++).
7994 
7995 NOTE: you may replace infinities by very small/very large values,  but  it
7996       is not recommended because large numbers may introduce large numerical
7997       errors in the algorithm.
7998 
7999 NOTE: minlpsetbc() can  be  used  to  specify  different  constraints  for
8000       different variables.
8001 
8002 NOTE: BndL>BndU will result in LP problem being recognized as infeasible.
8003 
8004   -- ALGLIB --
8005      Copyright 19.07.2018 by Bochkanov Sergey
8006 *************************************************************************/
8007 void minlpsetbcall(const minlpstate &state, const double bndl, const double bndu, const xparams _xparams = alglib::xdefault);
8008 
8009 
8010 /*************************************************************************
8011 This function sets box constraints for I-th variable (other variables are
8012 not modified).
8013 
8014 The default state of constraints is to have all variables fixed  at  zero.
8015 You have to overwrite it by your own constraint vector.
8016 
8017 Following types of constraints are supported:
8018 
8019     DESCRIPTION         CONSTRAINT              HOW TO SPECIFY
8020     fixed variable      x[i]=Bnd[i]             BndL[i]=BndU[i]
8021     lower bound         BndL[i]<=x[i]           BndU[i]=+INF
8022     upper bound         x[i]<=BndU[i]           BndL[i]=-INF
8023     range               BndL[i]<=x[i]<=BndU[i]  ...
8024     free variable       -                       BndL[I]=-INF, BndU[I]+INF
8025 
8026 INPUT PARAMETERS:
8027     State   -   structure stores algorithm state
8028     I       -   variable index, in [0,N)
8029     BndL    -   lower bound for I-th variable
8030     BndU    -   upper bound for I-th variable
8031 
8032 NOTE: infinite values can be specified by means of Double.PositiveInfinity
8033       and  Double.NegativeInfinity  (in  C#)  and  alglib::fp_posinf   and
8034       alglib::fp_neginf (in C++).
8035 
8036 NOTE: you may replace infinities by very small/very large values,  but  it
8037       is not recommended because large numbers may introduce large numerical
8038       errors in the algorithm.
8039 
8040 NOTE: minlpsetbc() can  be  used  to  specify  different  constraints  for
8041       different variables.
8042 
8043 NOTE: BndL>BndU will result in LP problem being recognized as infeasible.
8044 
8045   -- ALGLIB --
8046      Copyright 19.07.2018 by Bochkanov Sergey
8047 *************************************************************************/
8048 void minlpsetbci(const minlpstate &state, const ae_int_t i, const double bndl, const double bndu, const xparams _xparams = alglib::xdefault);
8049 
8050 
8051 /*************************************************************************
8052 This function sets one-sided linear constraints A*x ~ AU, where "~" can be
8053 a mix of "<=", "=" and ">=".
8054 
8055 IMPORTANT: this function is provided here for compatibility with the  rest
8056            of ALGLIB optimizers which accept constraints  in  format  like
8057            this one. Many real-life problems feature two-sided constraints
8058            like a0 <= a*x <= a1. It is really inefficient to add them as a
8059            pair of one-sided constraints.
8060 
8061            Use minlpsetlc2dense(), minlpsetlc2(), minlpaddlc2()  (or   its
8062            sparse version) wherever possible.
8063 
8064 INPUT PARAMETERS:
8065     State   -   structure previously allocated with minlpcreate() call.
8066     A       -   linear constraints, array[K,N+1]. Each row of A represents
8067                 one constraint, with first N elements being linear coefficients,
8068                 and last element being right side.
8069     CT      -   constraint types, array[K]:
8070                 * if CT[i]>0, then I-th constraint is A[i,*]*x >= A[i,n]
8071                 * if CT[i]=0, then I-th constraint is A[i,*]*x  = A[i,n]
8072                 * if CT[i]<0, then I-th constraint is A[i,*]*x <= A[i,n]
8073     K       -   number of equality/inequality constraints,  K>=0;  if  not
8074                 given, inferred from sizes of A and CT.
8075 
8076   -- ALGLIB --
8077      Copyright 19.07.2018 by Bochkanov Sergey
8078 *************************************************************************/
8079 void minlpsetlc(const minlpstate &state, const real_2d_array &a, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams = alglib::xdefault);
8080 void minlpsetlc(const minlpstate &state, const real_2d_array &a, const integer_1d_array &ct, const xparams _xparams = alglib::xdefault);
8081 
8082 
8083 /*************************************************************************
8084 This function sets two-sided linear constraints AL <= A*x <= AU.
8085 
8086 This version accepts dense matrix as  input;  internally  LP  solver  uses
8087 sparse storage  anyway  (most  LP  problems  are  sparse),  but  for  your
8088 convenience it may accept dense inputs. This  function  overwrites  linear
8089 constraints set by previous calls (if such calls were made).
8090 
8091 We recommend you to use sparse version of this function unless  you  solve
8092 small-scale LP problem (less than few hundreds of variables).
8093 
8094 NOTE: there also exist several versions of this function:
8095       * one-sided dense version which  accepts  constraints  in  the  same
8096         format as one used by QP and  NLP solvers
8097       * two-sided sparse version which accepts sparse matrix
8098       * two-sided dense  version which allows you to add constraints row by row
8099       * two-sided sparse version which allows you to add constraints row by row
8100 
8101 INPUT PARAMETERS:
8102     State   -   structure previously allocated with minlpcreate() call.
8103     A       -   linear constraints, array[K,N]. Each row of  A  represents
8104                 one  constraint. One-sided  inequality   constraints, two-
8105                 sided inequality  constraints,  equality  constraints  are
8106                 supported (see below)
8107     AL, AU  -   lower and upper bounds, array[K];
8108                 * AL[i]=AU[i] => equality constraint Ai*x
8109                 * AL[i]<AU[i] => two-sided constraint AL[i]<=Ai*x<=AU[i]
8110                 * AL[i]=-INF  => one-sided constraint Ai*x<=AU[i]
8111                 * AU[i]=+INF  => one-sided constraint AL[i]<=Ai*x
8112                 * AL[i]=-INF, AU[i]=+INF => constraint is ignored
8113     K       -   number of equality/inequality constraints,  K>=0;  if  not
8114                 given, inferred from sizes of A, AL, AU.
8115 
8116   -- ALGLIB --
8117      Copyright 19.07.2018 by Bochkanov Sergey
8118 *************************************************************************/
8119 void minlpsetlc2dense(const minlpstate &state, const real_2d_array &a, const real_1d_array &al, const real_1d_array &au, const ae_int_t k, const xparams _xparams = alglib::xdefault);
8120 void minlpsetlc2dense(const minlpstate &state, const real_2d_array &a, const real_1d_array &al, const real_1d_array &au, const xparams _xparams = alglib::xdefault);
8121 
8122 
8123 /*************************************************************************
8124 This  function  sets  two-sided linear  constraints  AL <= A*x <= AU  with
8125 sparse constraining matrix A. Recommended for large-scale problems.
8126 
8127 This  function  overwrites  linear  (non-box)  constraints set by previous
8128 calls (if such calls were made).
8129 
8130 INPUT PARAMETERS:
8131     State   -   structure previously allocated with minlpcreate() call.
8132     A       -   sparse matrix with size [K,N] (exactly!).
8133                 Each row of A represents one general linear constraint.
8134                 A can be stored in any sparse storage format.
8135     AL, AU  -   lower and upper bounds, array[K];
8136                 * AL[i]=AU[i] => equality constraint Ai*x
8137                 * AL[i]<AU[i] => two-sided constraint AL[i]<=Ai*x<=AU[i]
8138                 * AL[i]=-INF  => one-sided constraint Ai*x<=AU[i]
8139                 * AU[i]=+INF  => one-sided constraint AL[i]<=Ai*x
8140                 * AL[i]=-INF, AU[i]=+INF => constraint is ignored
8141     K       -   number  of equality/inequality constraints, K>=0.  If  K=0
8142                 is specified, A, AL, AU are ignored.
8143 
8144   -- ALGLIB --
8145      Copyright 19.07.2018 by Bochkanov Sergey
8146 *************************************************************************/
8147 void minlpsetlc2(const minlpstate &state, const sparsematrix &a, const real_1d_array &al, const real_1d_array &au, const ae_int_t k, const xparams _xparams = alglib::xdefault);
8148 
8149 
8150 /*************************************************************************
8151 This function appends two-sided linear constraint  AL <= A*x <= AU  to the
8152 list of currently present constraints.
8153 
8154 This version accepts dense constraint vector as input, but  sparsifies  it
8155 for internal storage and processing. Thus, time to add one  constraint  in
8156 is O(N) - we have to scan entire array of length N. Sparse version of this
8157 function is order of magnitude faster for  constraints  with  just  a  few
8158 nonzeros per row.
8159 
8160 INPUT PARAMETERS:
8161     State   -   structure previously allocated with minlpcreate() call.
8162     A       -   linear constraint coefficient, array[N], right side is NOT
8163                 included.
8164     AL, AU  -   lower and upper bounds;
8165                 * AL=AU    => equality constraint Ai*x
8166                 * AL<AU    => two-sided constraint AL<=A*x<=AU
8167                 * AL=-INF  => one-sided constraint Ai*x<=AU
8168                 * AU=+INF  => one-sided constraint AL<=Ai*x
8169                 * AL=-INF, AU=+INF => constraint is ignored
8170 
8171   -- ALGLIB --
8172      Copyright 19.07.2018 by Bochkanov Sergey
8173 *************************************************************************/
8174 void minlpaddlc2dense(const minlpstate &state, const real_1d_array &a, const double al, const double au, const xparams _xparams = alglib::xdefault);
8175 
8176 
8177 /*************************************************************************
8178 This function appends two-sided linear constraint  AL <= A*x <= AU  to the
8179 list of currently present constraints.
8180 
8181 Constraint is passed in compressed format: as list of non-zero entries  of
8182 coefficient vector A. Such approach is more efficient than  dense  storage
8183 for highly sparse constraint vectors.
8184 
8185 INPUT PARAMETERS:
8186     State   -   structure previously allocated with minlpcreate() call.
8187     IdxA    -   array[NNZ], indexes of non-zero elements of A:
8188                 * can be unsorted
8189                 * can include duplicate indexes (corresponding entries  of
8190                   ValA[] will be summed)
8191     ValA    -   array[NNZ], values of non-zero elements of A
8192     NNZ     -   number of non-zero coefficients in A
8193     AL, AU  -   lower and upper bounds;
8194                 * AL=AU    => equality constraint A*x
8195                 * AL<AU    => two-sided constraint AL<=A*x<=AU
8196                 * AL=-INF  => one-sided constraint A*x<=AU
8197                 * AU=+INF  => one-sided constraint AL<=A*x
8198                 * AL=-INF, AU=+INF => constraint is ignored
8199 
8200   -- ALGLIB --
8201      Copyright 19.07.2018 by Bochkanov Sergey
8202 *************************************************************************/
8203 void minlpaddlc2(const minlpstate &state, const integer_1d_array &idxa, const real_1d_array &vala, const ae_int_t nnz, const double al, const double au, const xparams _xparams = alglib::xdefault);
8204 
8205 
8206 /*************************************************************************
8207 This function solves LP problem.
8208 
8209 INPUT PARAMETERS:
8210     State   -   algorithm state
8211 
8212 You should use minlpresults() function to access results  after  calls  to
8213 this function.
8214 
8215   -- ALGLIB --
8216      Copyright 19.07.2018 by Bochkanov Sergey.
8217 *************************************************************************/
8218 void minlpoptimize(const minlpstate &state, const xparams _xparams = alglib::xdefault);
8219 
8220 
8221 /*************************************************************************
8222 LP solver results
8223 
8224 INPUT PARAMETERS:
8225     State   -   algorithm state
8226 
8227 OUTPUT PARAMETERS:
8228     X       -   array[N], solution (on failure: last trial point)
8229     Rep     -   optimization report. You should check Rep.TerminationType,
8230                 which contains completion code, and you may check  another
8231                 fields which contain another information  about  algorithm
8232                 functioning.
8233 
8234                 Failure codes returned by algorithm are:
8235                 * -4    LP problem is primal unbounded (dual infeasible)
8236                 * -3    LP problem is primal infeasible (dual unbounded)
8237                 * -2    IPM solver detected that problem is either
8238                         infeasible or unbounded
8239 
8240                 Success codes:
8241                 *  1..4 successful completion
8242                 *  5    MaxIts steps was taken
8243 
8244   -- ALGLIB --
8245      Copyright 11.01.2011 by Bochkanov Sergey
8246 *************************************************************************/
8247 void minlpresults(const minlpstate &state, real_1d_array &x, minlpreport &rep, const xparams _xparams = alglib::xdefault);
8248 
8249 
8250 /*************************************************************************
8251 LP results
8252 
8253 Buffered implementation of MinLPResults() which uses pre-allocated  buffer
8254 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
8255 intended to be used in the inner cycles of performance critical algorithms
8256 where array reallocation penalty is too large to be ignored.
8257 
8258   -- ALGLIB --
8259      Copyright 11.01.2011 by Bochkanov Sergey
8260 *************************************************************************/
8261 void minlpresultsbuf(const minlpstate &state, real_1d_array &x, minlpreport &rep, const xparams _xparams = alglib::xdefault);
8262 #endif
8263 
8264 #if defined(AE_COMPILE_NLCSLP) || !defined(AE_PARTIAL_BUILD)
8265 
8266 #endif
8267 
8268 #if defined(AE_COMPILE_MINNLC) || !defined(AE_PARTIAL_BUILD)
8269 /*************************************************************************
8270                   NONLINEARLY  CONSTRAINED  OPTIMIZATION
8271             WITH PRECONDITIONED AUGMENTED LAGRANGIAN ALGORITHM
8272 
8273 DESCRIPTION:
8274 The  subroutine  minimizes  function   F(x)  of N arguments subject to any
8275 combination of:
8276 * bound constraints
8277 * linear inequality constraints
8278 * linear equality constraints
8279 * nonlinear equality constraints Gi(x)=0
8280 * nonlinear inequality constraints Hi(x)<=0
8281 
8282 REQUIREMENTS:
8283 * user must provide function value and gradient for F(), H(), G()
8284 * starting point X0 must be feasible or not too far away from the feasible
8285   set
8286 * F(), G(), H() are continuously differentiable on the  feasible  set  and
8287   its neighborhood
8288 * nonlinear constraints G() and H() must have non-zero gradient at  G(x)=0
8289   and at H(x)=0. Say, constraint like x^2>=1 is supported, but x^2>=0   is
8290   NOT supported.
8291 
8292 USAGE:
8293 
8294 Constrained optimization if far more complex than the  unconstrained  one.
8295 Nonlinearly constrained optimization is one of the most esoteric numerical
8296 procedures.
8297 
8298 Here we give very brief outline  of  the  MinNLC  optimizer.  We  strongly
8299 recommend you to study examples in the ALGLIB Reference Manual and to read
8300 ALGLIB User Guide on optimization, which is available at
8301 http://www.alglib.net/optimization/
8302 
8303 1. User initializes algorithm state with MinNLCCreate() call  and  chooses
8304    what NLC solver to use. There is some solver which is used by  default,
8305    with default settings, but you should NOT rely on  default  choice.  It
8306    may change in future releases of ALGLIB without notice, and no one  can
8307    guarantee that new solver will be  able  to  solve  your  problem  with
8308    default settings.
8309 
8310    From the other side, if you choose solver explicitly, you can be pretty
8311    sure that it will work with new ALGLIB releases.
8312 
8313    In the current release following solvers can be used:
8314    * SQP solver, recommended for medium-scale problems (less than thousand
8315      of variables) with hard-to-evaluate target functions.  Requires  less
8316      function  evaluations  than  other  solvers  but  each  step involves
8317      solution of QP subproblem, so running time may be higher than that of
8318      AUL (another recommended option). Activated  with  minnlcsetalgosqp()
8319      function.
8320    * AUL solver with dense  preconditioner,  recommended  for  large-scale
8321      problems or for problems  with  cheap  target  function.  Needs  more
8322      function evaluations that SQP (about  5x-10x  times  more),  but  its
8323      iterations  are  much  cheaper  that  that  of  SQP.  Activated  with
8324      minnlcsetalgoaul() function.
8325    * SLP solver, successive linear programming. The slowest one,  requires
8326      more target function evaluations that SQP and  AUL.  However,  it  is
8327      somewhat more robust in tricky cases, so it can be used  as  a backup
8328      plan. Activated with minnlcsetalgoslp() function.
8329 
8330 2. [optional] user activates OptGuard  integrity checker  which  tries  to
8331    detect possible errors in the user-supplied callbacks:
8332    * discontinuity/nonsmoothness of the target/nonlinear constraints
8333    * errors in the analytic gradient provided by user
8334    This feature is essential for early prototyping stages because it helps
8335    to catch common coding and problem statement errors.
8336    OptGuard can be activated with following functions (one per each  check
8337    performed):
8338    * minnlcoptguardsmoothness()
8339    * minnlcoptguardgradient()
8340 
8341 3. User adds boundary and/or linear and/or nonlinear constraints by  means
8342    of calling one of the following functions:
8343    a) minnlcsetbc() for boundary constraints
8344    b) minnlcsetlc() for linear constraints
8345    c) minnlcsetnlc() for nonlinear constraints
8346    You may combine (a), (b) and (c) in one optimization problem.
8347 
8348 4. User sets scale of the variables with minnlcsetscale() function. It  is
8349    VERY important to set  scale  of  the  variables,  because  nonlinearly
8350    constrained problems are hard to solve when variables are badly scaled.
8351 
8352 5. User sets  stopping  conditions  with  minnlcsetcond(). If  NLC  solver
8353    uses  inner/outer  iteration  layout,  this  function   sets   stopping
8354    conditions for INNER iterations.
8355 
8356 6. Finally, user calls minnlcoptimize()  function  which  takes  algorithm
8357    state and pointer (delegate, etc.) to callback function which calculates
8358    F/G/H.
8359 
8360 7. User calls  minnlcresults()  to  get  solution;  additionally  you  can
8361    retrieve OptGuard report with minnlcoptguardresults(), and get detailed
8362    report about purported errors in the target function with:
8363    * minnlcoptguardnonc1test0results()
8364    * minnlcoptguardnonc1test1results()
8365 
8366 8. Optionally user may call minnlcrestartfrom() to solve  another  problem
8367    with same N but another starting point. minnlcrestartfrom()  allows  to
8368    reuse already initialized structure.
8369 
8370 
8371 INPUT PARAMETERS:
8372     N       -   problem dimension, N>0:
8373                 * if given, only leading N elements of X are used
8374                 * if not given, automatically determined from size ofX
8375     X       -   starting point, array[N]:
8376                 * it is better to set X to a feasible point
8377                 * but X can be infeasible, in which case algorithm will try
8378                   to find feasible point first, using X as initial
8379                   approximation.
8380 
8381 OUTPUT PARAMETERS:
8382     State   -   structure stores algorithm state
8383 
8384   -- ALGLIB --
8385      Copyright 06.06.2014 by Bochkanov Sergey
8386 *************************************************************************/
8387 void minnlccreate(const ae_int_t n, const real_1d_array &x, minnlcstate &state, const xparams _xparams = alglib::xdefault);
8388 void minnlccreate(const real_1d_array &x, minnlcstate &state, const xparams _xparams = alglib::xdefault);
8389 
8390 
8391 /*************************************************************************
8392 This subroutine is a finite  difference variant of MinNLCCreate(). It uses
8393 finite differences in order to differentiate target function.
8394 
8395 Description below contains information which is specific to this  function
8396 only. We recommend to read comments on MinNLCCreate() in order to get more
8397 information about creation of NLC optimizer.
8398 
8399 INPUT PARAMETERS:
8400     N       -   problem dimension, N>0:
8401                 * if given, only leading N elements of X are used
8402                 * if not given, automatically determined from size ofX
8403     X       -   starting point, array[N]:
8404                 * it is better to set X to a feasible point
8405                 * but X can be infeasible, in which case algorithm will try
8406                   to find feasible point first, using X as initial
8407                   approximation.
8408     DiffStep-   differentiation step, >0
8409 
8410 OUTPUT PARAMETERS:
8411     State   -   structure stores algorithm state
8412 
8413 NOTES:
8414 1. algorithm uses 4-point central formula for differentiation.
8415 2. differentiation step along I-th axis is equal to DiffStep*S[I] where
8416    S[] is scaling vector which can be set by MinNLCSetScale() call.
8417 3. we recommend you to use moderate values of  differentiation  step.  Too
8418    large step will result in too large TRUNCATION  errors, while too small
8419    step will result in too large NUMERICAL  errors.  1.0E-4  can  be  good
8420    value to start from.
8421 4. Numerical  differentiation  is   very   inefficient  -   one   gradient
8422    calculation needs 4*N function evaluations. This function will work for
8423    any N - either small (1...10), moderate (10...100) or  large  (100...).
8424    However, performance penalty will be too severe for any N's except  for
8425    small ones.
8426    We should also say that code which relies on numerical  differentiation
8427    is  less   robust   and  precise.  Imprecise  gradient  may  slow  down
8428    convergence, especially on highly nonlinear problems.
8429    Thus  we  recommend to use this function for fast prototyping on small-
8430    dimensional problems only, and to implement analytical gradient as soon
8431    as possible.
8432 
8433   -- ALGLIB --
8434      Copyright 06.06.2014 by Bochkanov Sergey
8435 *************************************************************************/
8436 void minnlccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minnlcstate &state, const xparams _xparams = alglib::xdefault);
8437 void minnlccreatef(const real_1d_array &x, const double diffstep, minnlcstate &state, const xparams _xparams = alglib::xdefault);
8438 
8439 
8440 /*************************************************************************
8441 This function sets boundary constraints for NLC optimizer.
8442 
8443 Boundary constraints are inactive by  default  (after  initial  creation).
8444 They are preserved after algorithm restart with  MinNLCRestartFrom().
8445 
8446 You may combine boundary constraints with  general  linear ones - and with
8447 nonlinear ones! Boundary constraints are  handled  more  efficiently  than
8448 other types.  Thus,  if  your  problem  has  mixed  constraints,  you  may
8449 explicitly specify some of them as boundary and save some time/space.
8450 
8451 INPUT PARAMETERS:
8452     State   -   structure stores algorithm state
8453     BndL    -   lower bounds, array[N].
8454                 If some (all) variables are unbounded, you may specify
8455                 very small number or -INF.
8456     BndU    -   upper bounds, array[N].
8457                 If some (all) variables are unbounded, you may specify
8458                 very large number or +INF.
8459 
8460 NOTE 1:  it is possible to specify  BndL[i]=BndU[i].  In  this  case  I-th
8461 variable will be "frozen" at X[i]=BndL[i]=BndU[i].
8462 
8463 NOTE 2:  when you solve your problem  with  augmented  Lagrangian  solver,
8464          boundary constraints are  satisfied  only  approximately!  It  is
8465          possible   that  algorithm  will  evaluate  function  outside  of
8466          feasible area!
8467 
8468   -- ALGLIB --
8469      Copyright 06.06.2014 by Bochkanov Sergey
8470 *************************************************************************/
8471 void minnlcsetbc(const minnlcstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams = alglib::xdefault);
8472 
8473 
8474 /*************************************************************************
8475 This function sets linear constraints for MinNLC optimizer.
8476 
8477 Linear constraints are inactive by default (after initial creation).  They
8478 are preserved after algorithm restart with MinNLCRestartFrom().
8479 
8480 You may combine linear constraints with boundary ones - and with nonlinear
8481 ones! If your problem has mixed constraints, you  may  explicitly  specify
8482 some of them as linear. It  may  help  optimizer   to   handle  them  more
8483 efficiently.
8484 
8485 INPUT PARAMETERS:
8486     State   -   structure previously allocated with MinNLCCreate call.
8487     C       -   linear constraints, array[K,N+1].
8488                 Each row of C represents one constraint, either equality
8489                 or inequality (see below):
8490                 * first N elements correspond to coefficients,
8491                 * last element corresponds to the right part.
8492                 All elements of C (including right part) must be finite.
8493     CT      -   type of constraints, array[K]:
8494                 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
8495                 * if CT[i]=0, then I-th constraint is C[i,*]*x  = C[i,n+1]
8496                 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
8497     K       -   number of equality/inequality constraints, K>=0:
8498                 * if given, only leading K elements of C/CT are used
8499                 * if not given, automatically determined from sizes of C/CT
8500 
8501 NOTE 1: when you solve your problem  with  augmented  Lagrangian   solver,
8502         linear constraints are  satisfied  only   approximately!   It   is
8503         possible   that  algorithm  will  evaluate  function  outside   of
8504         feasible area!
8505 
8506   -- ALGLIB --
8507      Copyright 06.06.2014 by Bochkanov Sergey
8508 *************************************************************************/
8509 void minnlcsetlc(const minnlcstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams = alglib::xdefault);
8510 void minnlcsetlc(const minnlcstate &state, const real_2d_array &c, const integer_1d_array &ct, const xparams _xparams = alglib::xdefault);
8511 
8512 
8513 /*************************************************************************
8514 This function sets nonlinear constraints for MinNLC optimizer.
8515 
8516 In fact, this function sets NUMBER of nonlinear  constraints.  Constraints
8517 itself (constraint functions) are passed to MinNLCOptimize() method.  This
8518 method requires user-defined vector function F[]  and  its  Jacobian  J[],
8519 where:
8520 * first component of F[] and first row  of  Jacobian  J[]  corresponds  to
8521   function being minimized
8522 * next NLEC components of F[] (and rows  of  J)  correspond  to  nonlinear
8523   equality constraints G_i(x)=0
8524 * next NLIC components of F[] (and rows  of  J)  correspond  to  nonlinear
8525   inequality constraints H_i(x)<=0
8526 
8527 NOTE: you may combine nonlinear constraints with linear/boundary ones.  If
8528       your problem has mixed constraints, you  may explicitly specify some
8529       of them as linear ones. It may help optimizer to  handle  them  more
8530       efficiently.
8531 
8532 INPUT PARAMETERS:
8533     State   -   structure previously allocated with MinNLCCreate call.
8534     NLEC    -   number of Non-Linear Equality Constraints (NLEC), >=0
8535     NLIC    -   number of Non-Linear Inquality Constraints (NLIC), >=0
8536 
8537 NOTE 1: when you solve your problem  with  augmented  Lagrangian   solver,
8538         nonlinear constraints are satisfied only  approximately!   It   is
8539         possible   that  algorithm  will  evaluate  function  outside   of
8540         feasible area!
8541 
8542 NOTE 2: algorithm scales variables  according  to   scale   specified   by
8543         MinNLCSetScale()  function,  so  it can handle problems with badly
8544         scaled variables (as long as we KNOW their scales).
8545 
8546         However,  there  is  no  way  to  automatically  scale   nonlinear
8547         constraints Gi(x) and Hi(x). Inappropriate scaling  of  Gi/Hi  may
8548         ruin convergence. Solving problem with  constraint  "1000*G0(x)=0"
8549         is NOT same as solving it with constraint "0.001*G0(x)=0".
8550 
8551         It  means  that  YOU  are  the  one who is responsible for correct
8552         scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you
8553         to scale nonlinear constraints in such way that I-th component  of
8554         dG/dX (or dH/dx) has approximately unit  magnitude  (for  problems
8555         with unit scale)  or  has  magnitude approximately equal to 1/S[i]
8556         (where S is a scale set by MinNLCSetScale() function).
8557 
8558 
8559   -- ALGLIB --
8560      Copyright 06.06.2014 by Bochkanov Sergey
8561 *************************************************************************/
8562 void minnlcsetnlc(const minnlcstate &state, const ae_int_t nlec, const ae_int_t nlic, const xparams _xparams = alglib::xdefault);
8563 
8564 
8565 /*************************************************************************
8566 This function sets stopping conditions for inner iterations of  optimizer.
8567 
8568 INPUT PARAMETERS:
8569     State   -   structure which stores algorithm state
8570     EpsX    -   >=0
8571                 The subroutine finishes its work if  on  k+1-th  iteration
8572                 the condition |v|<=EpsX is fulfilled, where:
8573                 * |.| means Euclidian norm
8574                 * v - scaled step vector, v[i]=dx[i]/s[i]
8575                 * dx - step vector, dx=X(k+1)-X(k)
8576                 * s - scaling coefficients set by MinNLCSetScale()
8577     MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
8578                 iterations is unlimited.
8579 
8580 Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic
8581 selection of the stopping condition.
8582 
8583   -- ALGLIB --
8584      Copyright 06.06.2014 by Bochkanov Sergey
8585 *************************************************************************/
8586 void minnlcsetcond(const minnlcstate &state, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
8587 
8588 
8589 /*************************************************************************
8590 This function sets scaling coefficients for NLC optimizer.
8591 
8592 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
8593 size and gradient are scaled before comparison with tolerances).  Scale of
8594 the I-th variable is a translation invariant measure of:
8595 a) "how large" the variable is
8596 b) how large the step should be to make significant changes in the function
8597 
8598 Scaling is also used by finite difference variant of the optimizer  - step
8599 along I-th axis is equal to DiffStep*S[I].
8600 
8601 INPUT PARAMETERS:
8602     State   -   structure stores algorithm state
8603     S       -   array[N], non-zero scaling coefficients
8604                 S[i] may be negative, sign doesn't matter.
8605 
8606   -- ALGLIB --
8607      Copyright 06.06.2014 by Bochkanov Sergey
8608 *************************************************************************/
8609 void minnlcsetscale(const minnlcstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
8610 
8611 
8612 /*************************************************************************
8613 This function sets preconditioner to "inexact LBFGS-based" mode.
8614 
8615 Preconditioning is very important for convergence of  Augmented Lagrangian
8616 algorithm because presence of penalty term makes problem  ill-conditioned.
8617 Difference between  performance  of  preconditioned  and  unpreconditioned
8618 methods can be as large as 100x!
8619 
8620 MinNLC optimizer may use following preconditioners,  each  with   its  own
8621 benefits and drawbacks:
8622     a) inexact LBFGS-based, with O(N*K) evaluation time
8623     b) exact low rank one,  with O(N*K^2) evaluation time
8624     c) exact robust one,    with O(N^3+K*N^2) evaluation time
8625 where K is a total number of general linear and nonlinear constraints (box
8626 ones are not counted).
8627 
8628 Inexact  LBFGS-based  preconditioner  uses L-BFGS  formula  combined  with
8629 orthogonality assumption to perform very fast updates. For a N-dimensional
8630 problem with K general linear or nonlinear constraints (boundary ones  are
8631 not counted) it has O(N*K) cost per iteration.  This   preconditioner  has
8632 best  quality  (less  iterations)  when   general   linear  and  nonlinear
8633 constraints are orthogonal to each other (orthogonality  with  respect  to
8634 boundary constraints is not required). Number of iterations increases when
8635 constraints  are  non-orthogonal, because algorithm assumes orthogonality,
8636 but still it is better than no preconditioner at all.
8637 
8638 INPUT PARAMETERS:
8639     State   -   structure stores algorithm state
8640 
8641   -- ALGLIB --
8642      Copyright 26.09.2014 by Bochkanov Sergey
8643 *************************************************************************/
8644 void minnlcsetprecinexact(const minnlcstate &state, const xparams _xparams = alglib::xdefault);
8645 
8646 
8647 /*************************************************************************
8648 This function sets preconditioner to "exact low rank" mode.
8649 
8650 Preconditioning is very important for convergence of  Augmented Lagrangian
8651 algorithm because presence of penalty term makes problem  ill-conditioned.
8652 Difference between  performance  of  preconditioned  and  unpreconditioned
8653 methods can be as large as 100x!
8654 
8655 MinNLC optimizer may use following preconditioners,  each  with   its  own
8656 benefits and drawbacks:
8657     a) inexact LBFGS-based, with O(N*K) evaluation time
8658     b) exact low rank one,  with O(N*K^2) evaluation time
8659     c) exact robust one,    with O(N^3+K*N^2) evaluation time
8660 where K is a total number of general linear and nonlinear constraints (box
8661 ones are not counted).
8662 
8663 It also provides special unpreconditioned mode of operation which  can  be
8664 used for test purposes. Comments below discuss low rank preconditioner.
8665 
8666 Exact low-rank preconditioner  uses  Woodbury  matrix  identity  to  build
8667 quadratic model of the penalized function. It has following features:
8668 * no special assumptions about orthogonality of constraints
8669 * preconditioner evaluation is optimized for K<<N. Its cost  is  O(N*K^2),
8670   so it may become prohibitively slow for K>=N.
8671 * finally, stability of the process is guaranteed only for K<<N.  Woodbury
8672   update often fail for K>=N due to degeneracy of  intermediate  matrices.
8673   That's why we recommend to use "exact robust"  preconditioner  for  such
8674   cases.
8675 
8676 RECOMMENDATIONS
8677 
8678 We  recommend  to  choose  between  "exact  low  rank"  and "exact robust"
8679 preconditioners, with "low rank" version being chosen  when  you  know  in
8680 advance that total count of non-box constraints won't exceed N, and "robust"
8681 version being chosen when you need bulletproof solution.
8682 
8683 INPUT PARAMETERS:
8684     State   -   structure stores algorithm state
8685     UpdateFreq- update frequency. Preconditioner is  rebuilt  after  every
8686                 UpdateFreq iterations. Recommended value: 10 or higher.
8687                 Zero value means that good default value will be used.
8688 
8689   -- ALGLIB --
8690      Copyright 26.09.2014 by Bochkanov Sergey
8691 *************************************************************************/
8692 void minnlcsetprecexactlowrank(const minnlcstate &state, const ae_int_t updatefreq, const xparams _xparams = alglib::xdefault);
8693 
8694 
8695 /*************************************************************************
8696 This function sets preconditioner to "exact robust" mode.
8697 
8698 Preconditioning is very important for convergence of  Augmented Lagrangian
8699 algorithm because presence of penalty term makes problem  ill-conditioned.
8700 Difference between  performance  of  preconditioned  and  unpreconditioned
8701 methods can be as large as 100x!
8702 
8703 MinNLC optimizer may use following preconditioners,  each  with   its  own
8704 benefits and drawbacks:
8705     a) inexact LBFGS-based, with O(N*K) evaluation time
8706     b) exact low rank one,  with O(N*K^2) evaluation time
8707     c) exact robust one,    with O(N^3+K*N^2) evaluation time
8708 where K is a total number of general linear and nonlinear constraints (box
8709 ones are not counted).
8710 
8711 It also provides special unpreconditioned mode of operation which  can  be
8712 used for test purposes. Comments below discuss robust preconditioner.
8713 
8714 Exact  robust  preconditioner   uses   Cholesky  decomposition  to  invert
8715 approximate Hessian matrix H=D+W'*C*W (where D stands for  diagonal  terms
8716 of Hessian, combined result of initial scaling matrix and penalty from box
8717 constraints; W stands for general linear constraints and linearization  of
8718 nonlinear ones; C stands for diagonal matrix of penalty coefficients).
8719 
8720 This preconditioner has following features:
8721 * no special assumptions about constraint structure
8722 * preconditioner is optimized  for  stability;  unlike  "exact  low  rank"
8723   version which fails for K>=N, this one works well for any value of K.
8724 * the only drawback is that is takes O(N^3+K*N^2) time  to  build  it.  No
8725   economical  Woodbury update is applied even when it  makes  sense,  thus
8726   there  are  exist situations (K<<N) when "exact low rank" preconditioner
8727   outperforms this one.
8728 
8729 RECOMMENDATIONS
8730 
8731 We  recommend  to  choose  between  "exact  low  rank"  and "exact robust"
8732 preconditioners, with "low rank" version being chosen  when  you  know  in
8733 advance that total count of non-box constraints won't exceed N, and "robust"
8734 version being chosen when you need bulletproof solution.
8735 
8736 INPUT PARAMETERS:
8737     State   -   structure stores algorithm state
8738     UpdateFreq- update frequency. Preconditioner is  rebuilt  after  every
8739                 UpdateFreq iterations. Recommended value: 10 or higher.
8740                 Zero value means that good default value will be used.
8741 
8742   -- ALGLIB --
8743      Copyright 26.09.2014 by Bochkanov Sergey
8744 *************************************************************************/
8745 void minnlcsetprecexactrobust(const minnlcstate &state, const ae_int_t updatefreq, const xparams _xparams = alglib::xdefault);
8746 
8747 
8748 /*************************************************************************
8749 This function sets preconditioner to "turned off" mode.
8750 
8751 Preconditioning is very important for convergence of  Augmented Lagrangian
8752 algorithm because presence of penalty term makes problem  ill-conditioned.
8753 Difference between  performance  of  preconditioned  and  unpreconditioned
8754 methods can be as large as 100x!
8755 
8756 MinNLC optimizer may  utilize  two  preconditioners,  each  with  its  own
8757 benefits and drawbacks: a) inexact LBFGS-based, and b) exact low rank one.
8758 It also provides special unpreconditioned mode of operation which  can  be
8759 used for test purposes.
8760 
8761 This function activates this test mode. Do not use it in  production  code
8762 to solve real-life problems.
8763 
8764 INPUT PARAMETERS:
8765     State   -   structure stores algorithm state
8766 
8767   -- ALGLIB --
8768      Copyright 26.09.2014 by Bochkanov Sergey
8769 *************************************************************************/
8770 void minnlcsetprecnone(const minnlcstate &state, const xparams _xparams = alglib::xdefault);
8771 
8772 
8773 /*************************************************************************
8774 This function sets maximum step length (after scaling of step vector  with
8775 respect to variable scales specified by minnlcsetscale() call).
8776 
8777 INPUT PARAMETERS:
8778     State   -   structure which stores algorithm state
8779     StpMax  -   maximum step length, >=0. Set StpMax to 0.0 (default),  if
8780                 you don't want to limit step length.
8781 
8782 Use this subroutine when you optimize target function which contains exp()
8783 or  other  fast  growing  functions,  and optimization algorithm makes too
8784 large  steps  which  leads  to overflow. This function allows us to reject
8785 steps  that  are  too  large  (and  therefore  expose  us  to the possible
8786 overflow) without actually calculating function value at the x+stp*d.
8787 
8788 NOTE: different solvers employed by MinNLC optimizer use  different  norms
8789       for step; AUL solver uses 2-norm, whilst SLP solver uses INF-norm.
8790 
8791   -- ALGLIB --
8792      Copyright 02.04.2010 by Bochkanov Sergey
8793 *************************************************************************/
8794 void minnlcsetstpmax(const minnlcstate &state, const double stpmax, const xparams _xparams = alglib::xdefault);
8795 
8796 
8797 /*************************************************************************
8798 This  function  tells MinNLC unit to use  Augmented  Lagrangian  algorithm
8799 for nonlinearly constrained  optimization.  This  algorithm  is  a  slight
8800 modification of one described in "A Modified Barrier-Augmented  Lagrangian
8801 Method for  Constrained  Minimization  (1999)"  by  D.GOLDFARB,  R.POLYAK,
8802 K. SCHEINBERG, I.YUZEFOVICH.
8803 
8804 AUL solver can be significantly faster than SQP on easy  problems  due  to
8805 cheaper iterations, although it needs more function evaluations.
8806 
8807 Augmented Lagrangian algorithm works by converting problem  of  minimizing
8808 F(x) subject to equality/inequality constraints   to unconstrained problem
8809 of the form
8810 
8811     min[ f(x) +
8812         + Rho*PENALTY_EQ(x)   + SHIFT_EQ(x,Nu1) +
8813         + Rho*PENALTY_INEQ(x) + SHIFT_INEQ(x,Nu2) ]
8814 
8815 where:
8816 * Rho is a fixed penalization coefficient
8817 * PENALTY_EQ(x) is a penalty term, which is used to APPROXIMATELY  enforce
8818   equality constraints
8819 * SHIFT_EQ(x) is a special "shift"  term  which  is  used  to  "fine-tune"
8820   equality constraints, greatly increasing precision
8821 * PENALTY_INEQ(x) is a penalty term which is used to approximately enforce
8822   inequality constraints
8823 * SHIFT_INEQ(x) is a special "shift"  term  which  is  used to "fine-tune"
8824   inequality constraints, greatly increasing precision
8825 * Nu1/Nu2 are vectors of Lagrange coefficients which are fine-tuned during
8826   outer iterations of algorithm
8827 
8828 This  version  of  AUL  algorithm  uses   preconditioner,  which   greatly
8829 accelerates convergence. Because this  algorithm  is  similar  to  penalty
8830 methods,  it  may  perform  steps  into  infeasible  area.  All  kinds  of
8831 constraints (boundary, linear and nonlinear ones) may   be   violated   in
8832 intermediate points - and in the solution.  However,  properly  configured
8833 AUL method is significantly better at handling  constraints  than  barrier
8834 and/or penalty methods.
8835 
8836 The very basic outline of algorithm is given below:
8837 1) first outer iteration is performed with "default"  values  of  Lagrange
8838    multipliers Nu1/Nu2. Solution quality is low (candidate  point  can  be
8839    too  far  away  from  true  solution; large violation of constraints is
8840    possible) and is comparable with that of penalty methods.
8841 2) subsequent outer iterations  refine  Lagrange  multipliers  and improve
8842    quality of the solution.
8843 
8844 INPUT PARAMETERS:
8845     State   -   structure which stores algorithm state
8846     Rho     -   penalty coefficient, Rho>0:
8847                 * large enough  that  algorithm  converges  with   desired
8848                   precision. Minimum value is 10*max(S'*diag(H)*S),  where
8849                   S is a scale matrix (set by MinNLCSetScale) and H  is  a
8850                   Hessian of the function being minimized. If you can  not
8851                   easily estimate Hessian norm,  see  our  recommendations
8852                   below.
8853                 * not TOO large to prevent ill-conditioning
8854                 * for unit-scale problems (variables and Hessian have unit
8855                   magnitude), Rho=100 or Rho=1000 can be used.
8856                 * it is important to note that Rho is internally multiplied
8857                   by scaling matrix, i.e. optimum value of Rho depends  on
8858                   scale of variables specified  by  MinNLCSetScale().
8859     ItsCnt  -   number of outer iterations:
8860                 * ItsCnt=0 means that small number of outer iterations  is
8861                   automatically chosen (10 iterations in current version).
8862                 * ItsCnt=1 means that AUL algorithm performs just as usual
8863                   barrier method.
8864                 * ItsCnt>1 means that  AUL  algorithm  performs  specified
8865                   number of outer iterations
8866 
8867 HOW TO CHOOSE PARAMETERS
8868 
8869 Nonlinear optimization is a tricky area and Augmented Lagrangian algorithm
8870 is sometimes hard to tune. Good values of  Rho  and  ItsCnt  are  problem-
8871 specific.  In  order  to  help  you   we   prepared   following   set   of
8872 recommendations:
8873 
8874 * for  unit-scale  problems  (variables  and Hessian have unit magnitude),
8875   Rho=100 or Rho=1000 can be used.
8876 
8877 * start from  some  small  value of Rho and solve problem  with  just  one
8878   outer iteration (ItcCnt=1). In this case algorithm behaves like  penalty
8879   method. Increase Rho in 2x or 10x steps until you  see  that  one  outer
8880   iteration returns point which is "rough approximation to solution".
8881 
8882   It is very important to have Rho so  large  that  penalty  term  becomes
8883   constraining i.e. modified function becomes highly convex in constrained
8884   directions.
8885 
8886   From the other side, too large Rho may prevent you  from  converging  to
8887   the solution. You can diagnose it by studying number of inner iterations
8888   performed by algorithm: too few (5-10 on  1000-dimensional  problem)  or
8889   too many (orders of magnitude more than  dimensionality)  usually  means
8890   that Rho is too large.
8891 
8892 * with just one outer iteration you  usually  have  low-quality  solution.
8893   Some constraints can be violated with very  large  margin,  while  other
8894   ones (which are NOT violated in the true solution) can push final  point
8895   too far in the inner area of the feasible set.
8896 
8897   For example, if you have constraint x0>=0 and true solution  x0=1,  then
8898   merely a presence of "x0>=0" will introduce a bias towards larger values
8899   of x0. Say, algorithm may stop at x0=1.5 instead of 1.0.
8900 
8901 * after you found good Rho, you may increase number of  outer  iterations.
8902   ItsCnt=10 is a good value. Subsequent outer iteration will refine values
8903   of  Lagrange  multipliers.  Constraints  which  were  violated  will  be
8904   enforced, inactive constraints will be dropped (corresponding multipliers
8905   will be decreased). Ideally, you  should  see  10-1000x  improvement  in
8906   constraint handling (constraint violation is reduced).
8907 
8908 * if  you  see  that  algorithm  converges  to  vicinity  of solution, but
8909   additional outer iterations do not refine solution,  it  may  mean  that
8910   algorithm is unstable - it wanders around true  solution,  but  can  not
8911   approach it. Sometimes algorithm may be stabilized by increasing Rho one
8912   more time, making it 5x or 10x larger.
8913 
8914 SCALING OF CONSTRAINTS [IMPORTANT]
8915 
8916 AUL optimizer scales   variables   according   to   scale   specified   by
8917 MinNLCSetScale() function, so it can handle  problems  with  badly  scaled
8918 variables (as long as we KNOW their scales).   However,  because  function
8919 being optimized is a mix  of  original  function and  constraint-dependent
8920 penalty  functions, it  is   important  to   rescale  both  variables  AND
8921 constraints.
8922 
8923 Say,  if  you  minimize f(x)=x^2 subject to 1000000*x>=0,  then  you  have
8924 constraint whose scale is different from that of target  function (another
8925 example is 0.000001*x>=0). It is also possible to have constraints   whose
8926 scales  are   misaligned:   1000000*x0>=0, 0.000001*x1<=0.   Inappropriate
8927 scaling may ruin convergence because minimizing x^2 subject to x>=0 is NOT
8928 same as minimizing it subject to 1000000*x>=0.
8929 
8930 Because we  know  coefficients  of  boundary/linear  constraints,  we  can
8931 automatically rescale and normalize them. However,  there  is  no  way  to
8932 automatically rescale nonlinear constraints Gi(x) and  Hi(x)  -  they  are
8933 black boxes.
8934 
8935 It means that YOU are the one who is  responsible  for  correct scaling of
8936 nonlinear constraints  Gi(x)  and  Hi(x).  We  recommend  you  to  rescale
8937 nonlinear constraints in such way that I-th component of dG/dX (or  dH/dx)
8938 has magnitude approximately equal to 1/S[i] (where S  is  a  scale  set by
8939 MinNLCSetScale() function).
8940 
8941 WHAT IF IT DOES NOT CONVERGE?
8942 
8943 It is possible that AUL algorithm fails to converge to precise  values  of
8944 Lagrange multipliers. It stops somewhere around true solution, but candidate
8945 point is still too far from solution, and some constraints  are  violated.
8946 Such kind of failure is specific for Lagrangian algorithms -  technically,
8947 they stop at some point, but this point is not constrained solution.
8948 
8949 There are exist several reasons why algorithm may fail to converge:
8950 a) too loose stopping criteria for inner iteration
8951 b) degenerate, redundant constraints
8952 c) target function has unconstrained extremum exactly at the  boundary  of
8953    some constraint
8954 d) numerical noise in the target function
8955 
8956 In all these cases algorithm is unstable - each outer iteration results in
8957 large and almost random step which improves handling of some  constraints,
8958 but violates other ones (ideally  outer iterations should form a  sequence
8959 of progressively decreasing steps towards solution).
8960 
8961 First reason possible is  that  too  loose  stopping  criteria  for  inner
8962 iteration were specified. Augmented Lagrangian algorithm solves a sequence
8963 of intermediate problems, and requries each of them to be solved with high
8964 precision. Insufficient precision results in incorrect update of  Lagrange
8965 multipliers.
8966 
8967 Another reason is that you may have specified degenerate constraints: say,
8968 some constraint was repeated twice. In most cases AUL algorithm gracefully
8969 handles such situations, but sometimes it may spend too much time figuring
8970 out subtle degeneracies in constraint matrix.
8971 
8972 Third reason is tricky and hard to diagnose. Consider situation  when  you
8973 minimize  f=x^2  subject to constraint x>=0.  Unconstrained   extremum  is
8974 located  exactly  at  the  boundary  of  constrained  area.  In  this case
8975 algorithm will tend to oscillate between negative  and  positive  x.  Each
8976 time it stops at x<0 it "reinforces" constraint x>=0, and each time it  is
8977 bounced to x>0 it "relaxes" constraint (and is  attracted  to  x<0).
8978 
8979 Such situation  sometimes  happens  in  problems  with  hidden  symetries.
8980 Algorithm  is  got  caught  in  a  loop with  Lagrange  multipliers  being
8981 continuously increased/decreased. Luckily, such loop forms after at  least
8982 three iterations, so this problem can be solved by  DECREASING  number  of
8983 outer iterations down to 1-2 and increasing  penalty  coefficient  Rho  as
8984 much as possible.
8985 
8986 Final reason is numerical noise. AUL algorithm is robust against  moderate
8987 noise (more robust than, say, active set methods),  but  large  noise  may
8988 destabilize algorithm.
8989 
8990   -- ALGLIB --
8991      Copyright 06.06.2014 by Bochkanov Sergey
8992 *************************************************************************/
8993 void minnlcsetalgoaul(const minnlcstate &state, const double rho, const ae_int_t itscnt, const xparams _xparams = alglib::xdefault);
8994 
8995 
8996 /*************************************************************************
8997 This   function  tells  MinNLC  optimizer  to  use  SLP (Successive Linear
8998 Programming) algorithm for  nonlinearly  constrained   optimization.  This
8999 algorithm  is  a  slight  modification  of  one  described  in  "A  Linear
9000 programming-based optimization algorithm for solving nonlinear programming
9001 problems" (2010) by Claus Still and Tapio Westerlund.
9002 
9003 This solver is the slowest one in ALGLIB, it requires more target function
9004 evaluations that SQP and AUL. However it is somewhat more robust in tricky
9005 cases, so it can be used as a backup plan. We recommend to use  this  algo
9006 when SQP/AUL do not work (does not return  the  solution  you  expect). If
9007 trying different approach gives same  results,  then  MAYBE  something  is
9008 wrong with your optimization problem.
9009 
9010 Despite its name ("linear" = "first order method") this algorithm performs
9011 steps similar to that of conjugate gradients method;  internally  it  uses
9012 orthogonality/conjugacy requirement for subsequent steps  which  makes  it
9013 closer to second order methods in terms of convergence speed.
9014 
9015 Convergence is proved for the following case:
9016 * function and constraints are continuously differentiable (C1 class)
9017 * extended Mangasarian–Fromovitz constraint qualification  (EMFCQ)  holds;
9018   in the context of this algorithm EMFCQ  means  that  one  can,  for  any
9019   infeasible  point,  find  a  search  direction  such that the constraint
9020   infeasibilities are reduced.
9021 
9022 This algorithm has following nice properties:
9023 * no parameters to tune
9024 * no convexity requirements for target function or constraints
9025 * initial point can be infeasible
9026 * algorithm respects box constraints in all intermediate points  (it  does
9027   not even evaluate function outside of box constrained area)
9028 * once linear constraints are enforced, algorithm will not violate them
9029 * no such guarantees can be provided for nonlinear constraints,  but  once
9030   nonlinear constraints are enforced, algorithm will try  to  respect them
9031   as much as possible
9032 * numerical differentiation does not  violate  box  constraints  (although
9033   general linear and nonlinear ones can be violated during differentiation)
9034 * from our experience, this algorithm is somewhat more  robust  in  really
9035   difficult cases
9036 
9037 INPUT PARAMETERS:
9038     State   -   structure which stores algorithm state
9039 
9040 ===== TRACING SLP SOLVER =================================================
9041 
9042 SLP solver supports advanced tracing capabilities. You can trace algorithm
9043 output by specifying following trace symbols (case-insensitive)  by  means
9044 of trace_file() call:
9045 * 'SLP'         - for basic trace of algorithm  steps and decisions.  Only
9046                   short scalars (function values and deltas) are  printed.
9047                   N-dimensional quantities like search directions are  NOT
9048                   printed.
9049                   It also prints OptGuard  integrity  checker  report when
9050                   nonsmoothness of target/constraints is suspected.
9051 * 'SLP.DETAILED'- for output of points being visited and search directions
9052                   This  symbol  also  implicitly  defines  'SLP'. You  can
9053                   control output format by additionally specifying:
9054                   * nothing     to output in  6-digit exponential format
9055                   * 'PREC.E15'  to output in 15-digit exponential format
9056                   * 'PREC.F6'   to output in  6-digit fixed-point format
9057 * 'SLP.PROBING' - to let algorithm insert additional function  evaluations
9058                   before line search  in  order  to  build  human-readable
9059                   chart of the raw  Lagrangian  (~40  additional  function
9060                   evaluations is performed for  each  line  search).  This
9061                   symbol also implicitly defines 'SLP'. Definition of this
9062                   symbol also automatically activates OptGuard  smoothness
9063                   monitor.
9064 * 'OPTGUARD'    - for report of smoothness/continuity violations in target
9065                   and/or constraints. This kind of reporting is   included
9066                   in 'SLP', but it comes with lots of additional info.  If
9067                   you  need  just  smoothness  monitoring,   specify  this
9068                   setting.
9069 
9070                   NOTE: this tag merely directs  OptGuard  output  to  log
9071                         file. Even if you specify it, you  still  have  to
9072                         configure OptGuard  by calling minnlcoptguard...()
9073                         family of functions.
9074 
9075 By default trace is disabled and adds  no  overhead  to  the  optimization
9076 process. However, specifying any of the symbols adds some  formatting  and
9077 output-related   overhead.  Specifying  'SLP.PROBING'  adds   even  larger
9078 overhead due to additional function evaluations being performed.
9079 
9080 You may specify multiple symbols by separating them with commas:
9081 >
9082 > alglib::trace_file("SLP,SLP.PROBING,PREC.F6", "path/to/trace.log")
9083 >
9084 
9085   -- ALGLIB --
9086      Copyright 02.04.2018 by Bochkanov Sergey
9087 *************************************************************************/
9088 void minnlcsetalgoslp(const minnlcstate &state, const xparams _xparams = alglib::xdefault);
9089 
9090 
9091 /*************************************************************************
9092 This   function  tells  MinNLC  optimizer to use SQP (Successive Quadratic
9093 Programming) algorithm for nonlinearly constrained optimization.
9094 
9095 This algorithm needs order of magnitude (5x-10x) less function evaluations
9096 than AUL solver, but has higher overhead because each  iteration  involves
9097 solution of quadratic programming problem.
9098 
9099 Convergence is proved for the following case:
9100 * function and constraints are continuously differentiable (C1 class)
9101 
9102 This algorithm has following nice properties:
9103 * no parameters to tune
9104 * no convexity requirements for target function or constraints
9105 * initial point can be infeasible
9106 * algorithm respects box constraints in all intermediate points  (it  does
9107   not even evaluate function outside of box constrained area)
9108 * once linear constraints are enforced, algorithm will not violate them
9109 * no such guarantees can be provided for nonlinear constraints,  but  once
9110   nonlinear constraints are enforced, algorithm will try  to  respect them
9111   as much as possible
9112 * numerical differentiation does not  violate  box  constraints  (although
9113   general linear and nonlinear ones can be violated during differentiation)
9114 
9115 We recommend this algorithm as a default option for medium-scale  problems
9116 (less than thousand of variables) or problems with target  function  being
9117 hard to evaluate.
9118 
9119 For   large-scale  problems  or  ones  with very  cheap  target   function
9120 AUL solver can be better option.
9121 
9122 INPUT PARAMETERS:
9123     State   -   structure which stores algorithm state
9124 
9125 ===== INTERACTION WITH OPTGUARD ==========================================
9126 
9127 OptGuard integrity  checker  allows us to catch problems  like  errors  in
9128 gradients   and  discontinuity/nonsmoothness  of  the  target/constraints.
9129 The latter kind of problems can be detected  by looking upon line searches
9130 performed during optimization and searching for signs of nonsmoothness.
9131 
9132 The problem with SQP is that it is too good for OptGuard to work - it does
9133 not perform line searches. It typically  needs  1-2  function  evaluations
9134 per step, and it is not enough for OptGuard to detect nonsmoothness.
9135 
9136 So, if you suspect that your problem is  nonsmooth  and  if  you  want  to
9137 confirm or deny it, we recommend you to either:
9138 * use AUL or SLP solvers, which can detect nonsmoothness of the problem
9139 * or, alternatively, activate 'SQP.PROBING' trace  tag  that  will  insert
9140   additional  function  evaluations (~40  per  line  step) that will  help
9141   OptGuard integrity checker to study properties of your problem
9142 
9143 ===== TRACING SQP SOLVER =================================================
9144 
9145 SQP solver supports advanced tracing capabilities. You can trace algorithm
9146 output by specifying following trace symbols (case-insensitive)  by  means
9147 of trace_file() call:
9148 * 'SQP'         - for basic trace of algorithm  steps and decisions.  Only
9149                   short scalars (function values and deltas) are  printed.
9150                   N-dimensional quantities like search directions are  NOT
9151                   printed.
9152                   It also prints OptGuard  integrity  checker  report when
9153                   nonsmoothness of target/constraints is suspected.
9154 * 'SQP.DETAILED'- for output of points being visited and search directions
9155                   This  symbol  also  implicitly  defines  'SQP'. You  can
9156                   control output format by additionally specifying:
9157                   * nothing     to output in  6-digit exponential format
9158                   * 'PREC.E15'  to output in 15-digit exponential format
9159                   * 'PREC.F6'   to output in  6-digit fixed-point format
9160 * 'SQP.PROBING' - to let algorithm insert additional function  evaluations
9161                   before line search  in  order  to  build  human-readable
9162                   chart of the raw  Lagrangian  (~40  additional  function
9163                   evaluations is performed for  each  line  search).  This
9164                   symbol  also  implicitly  defines  'SQP'  and  activates
9165                   OptGuard integrity checker which detects continuity  and
9166                   smoothness violations. An OptGuard log is printed at the
9167                   end of the file.
9168 
9169 By default trace is disabled and adds  no  overhead  to  the  optimization
9170 process. However, specifying any of the symbols adds some  formatting  and
9171 output-related   overhead.  Specifying  'SQP.PROBING'  adds   even  larger
9172 overhead due to additional function evaluations being performed.
9173 
9174 You may specify multiple symbols by separating them with commas:
9175 >
9176 > alglib::trace_file("SQP,SQP.PROBING,PREC.F6", "path/to/trace.log")
9177 >
9178 
9179   -- ALGLIB --
9180      Copyright 02.12.2019 by Bochkanov Sergey
9181 *************************************************************************/
9182 void minnlcsetalgosqp(const minnlcstate &state, const xparams _xparams = alglib::xdefault);
9183 
9184 
9185 /*************************************************************************
9186 This function turns on/off reporting.
9187 
9188 INPUT PARAMETERS:
9189     State   -   structure which stores algorithm state
9190     NeedXRep-   whether iteration reports are needed or not
9191 
9192 If NeedXRep is True, algorithm will call rep() callback function if  it is
9193 provided to MinNLCOptimize().
9194 
9195 NOTE: algorithm passes two parameters to rep() callback  -  current  point
9196       and penalized function value at current point. Important -  function
9197       value which is returned is NOT function being minimized. It  is  sum
9198       of the value of the function being minimized - and penalty term.
9199 
9200   -- ALGLIB --
9201      Copyright 28.11.2010 by Bochkanov Sergey
9202 *************************************************************************/
9203 void minnlcsetxrep(const minnlcstate &state, const bool needxrep, const xparams _xparams = alglib::xdefault);
9204 
9205 
9206 /*************************************************************************
9207 This function provides reverse communication interface
9208 Reverse communication interface is not documented or recommended to use.
9209 See below for functions which provide better documented API
9210 *************************************************************************/
9211 bool minnlciteration(const minnlcstate &state, const xparams _xparams = alglib::xdefault);
9212 
9213 
9214 /*************************************************************************
9215 This family of functions is used to launcn iterations of nonlinear optimizer
9216 
9217 These functions accept following parameters:
9218     state   -   algorithm state
9219     fvec    -   callback which calculates function vector fi[]
9220                 at given point x
9221     jac     -   callback which calculates function vector fi[]
9222                 and Jacobian jac at given point x
9223     rep     -   optional callback which is called after each iteration
9224                 can be NULL
9225     ptr     -   optional pointer which is passed to func/grad/hess/jac/rep
9226                 can be NULL
9227 
9228 
9229 NOTES:
9230 
9231 1. This function has two different implementations: one which  uses  exact
9232    (analytical) user-supplied Jacobian, and one which uses  only  function
9233    vector and numerically  differentiates  function  in  order  to  obtain
9234    gradient.
9235 
9236    Depending  on  the  specific  function  used to create optimizer object
9237    you should choose appropriate variant of MinNLCOptimize() -  one  which
9238    accepts function AND Jacobian or one which accepts ONLY function.
9239 
9240    Be careful to choose variant of MinNLCOptimize()  which  corresponds to
9241    your optimization scheme! Table below lists different  combinations  of
9242    callback (function/gradient) passed to MinNLCOptimize()   and  specific
9243    function used to create optimizer.
9244 
9245 
9246                      |         USER PASSED TO MinNLCOptimize()
9247    CREATED WITH      |  function only   |  function and gradient
9248    ------------------------------------------------------------
9249    MinNLCCreateF()   |     works               FAILS
9250    MinNLCCreate()    |     FAILS               works
9251 
9252    Here "FAILS" denotes inappropriate combinations  of  optimizer creation
9253    function  and  MinNLCOptimize()  version.   Attemps   to    use    such
9254    combination will lead to exception. Either  you  did  not pass gradient
9255    when it WAS needed or you passed gradient when it was NOT needed.
9256 
9257   -- ALGLIB --
9258      Copyright 06.06.2014 by Bochkanov Sergey
9259 
9260 *************************************************************************/
9261 void minnlcoptimize(minnlcstate &state,
9262     void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
9263     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
9264     void *ptr = NULL,
9265     const xparams _xparams = alglib::xdefault);
9266 void minnlcoptimize(minnlcstate &state,
9267     void  (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
9268     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
9269     void *ptr = NULL,
9270     const xparams _xparams = alglib::xdefault);
9271 
9272 
9273 /*************************************************************************
9274 This  function  activates/deactivates verification  of  the  user-supplied
9275 analytic gradient/Jacobian.
9276 
9277 Upon  activation  of  this  option  OptGuard  integrity  checker  performs
9278 numerical differentiation of your target  function  (constraints)  at  the
9279 initial point (note: future versions may also perform check  at  the final
9280 point) and compares numerical gradient/Jacobian with analytic one provided
9281 by you.
9282 
9283 If difference is too large, an error flag is set and optimization  session
9284 continues. After optimization session is over, you can retrieve the report
9285 which stores both gradients/Jacobians, and specific components highlighted
9286 as suspicious by the OptGuard.
9287 
9288 The primary OptGuard report can be retrieved with minnlcoptguardresults().
9289 
9290 IMPORTANT: gradient check is a high-overhead option which  will  cost  you
9291            about 3*N additional function evaluations. In many cases it may
9292            cost as much as the rest of the optimization session.
9293 
9294            YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO
9295            CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY.
9296 
9297 NOTE: unlike previous incarnation of the gradient checking code,  OptGuard
9298       does NOT interrupt optimization even if it discovers bad gradient.
9299 
9300 INPUT PARAMETERS:
9301     State       -   structure used to store algorithm state
9302     TestStep    -   verification step used for numerical differentiation:
9303                     * TestStep=0 turns verification off
9304                     * TestStep>0 activates verification
9305                     You should carefully choose TestStep. Value  which  is
9306                     too large (so large that  function  behavior  is  non-
9307                     cubic at this scale) will lead  to  false  alarms. Too
9308                     short step will result in rounding  errors  dominating
9309                     numerical derivative.
9310 
9311                     You may use different step for different parameters by
9312                     means of setting scale with minnlcsetscale().
9313 
9314 === EXPLANATION ==========================================================
9315 
9316 In order to verify gradient algorithm performs following steps:
9317   * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
9318     where X[i] is i-th component of the initial point and S[i] is a  scale
9319     of i-th parameter
9320   * F(X) is evaluated at these trial points
9321   * we perform one more evaluation in the middle point of the interval
9322   * we  build  cubic  model using function values and derivatives at trial
9323     points and we compare its prediction with actual value in  the  middle
9324     point
9325 
9326   -- ALGLIB --
9327      Copyright 15.06.2014 by Bochkanov Sergey
9328 *************************************************************************/
9329 void minnlcoptguardgradient(const minnlcstate &state, const double teststep, const xparams _xparams = alglib::xdefault);
9330 
9331 
9332 /*************************************************************************
9333 This  function  activates/deactivates nonsmoothness monitoring  option  of
9334 the  OptGuard  integrity  checker. Smoothness  monitor  silently  observes
9335 solution process and tries to detect ill-posed problems, i.e. ones with:
9336 a) discontinuous target function (non-C0) and/or constraints
9337 b) nonsmooth     target function (non-C1) and/or constraints
9338 
9339 Smoothness monitoring does NOT interrupt optimization  even if it suspects
9340 that your problem is nonsmooth. It just sets corresponding  flags  in  the
9341 OptGuard report which can be retrieved after optimization is over.
9342 
9343 Smoothness monitoring is a moderate overhead option which often adds  less
9344 than 1% to the optimizer running time. Thus, you can use it even for large
9345 scale problems.
9346 
9347 NOTE: OptGuard does  NOT  guarantee  that  it  will  always  detect  C0/C1
9348       continuity violations.
9349 
9350       First, minor errors are hard to  catch - say, a 0.0001 difference in
9351       the model values at two sides of the gap may be due to discontinuity
9352       of the model - or simply because the model has changed.
9353 
9354       Second, C1-violations  are  especially  difficult  to  detect  in  a
9355       noninvasive way. The optimizer usually  performs  very  short  steps
9356       near the nonsmoothness, and differentiation  usually   introduces  a
9357       lot of numerical noise.  It  is  hard  to  tell  whether  some  tiny
9358       discontinuity in the slope is due to real nonsmoothness or just  due
9359       to numerical noise alone.
9360 
9361       Our top priority was to avoid false positives, so in some rare cases
9362       minor errors may went unnoticed (however, in most cases they can  be
9363       spotted with restart from different initial point).
9364 
9365 INPUT PARAMETERS:
9366     state   -   algorithm state
9367     level   -   monitoring level:
9368                 * 0 - monitoring is disabled
9369                 * 1 - noninvasive low-overhead monitoring; function values
9370                       and/or gradients are recorded, but OptGuard does not
9371                       try to perform additional evaluations  in  order  to
9372                       get more information about suspicious locations.
9373                       This kind of monitoring does not work well with  SQP
9374                       because SQP solver needs just 1-2 function evaluations
9375                       per step, which is not enough for OptGuard  to  make
9376                       any conclusions.
9377 
9378 === EXPLANATION ==========================================================
9379 
9380 One major source of headache during optimization  is  the  possibility  of
9381 the coding errors in the target function/constraints (or their gradients).
9382 Such  errors   most   often   manifest   themselves  as  discontinuity  or
9383 nonsmoothness of the target/constraints.
9384 
9385 Another frequent situation is when you try to optimize something involving
9386 lots of min() and max() operations, i.e. nonsmooth target. Although not  a
9387 coding error, it is nonsmoothness anyway - and smooth  optimizers  usually
9388 stop right after encountering nonsmoothness, well before reaching solution.
9389 
9390 OptGuard integrity checker helps you to catch such situations: it monitors
9391 function values/gradients being passed  to  the  optimizer  and  tries  to
9392 errors. Upon discovering suspicious pair of points it  raises  appropriate
9393 flag (and allows you to continue optimization). When optimization is done,
9394 you can study OptGuard result.
9395 
9396   -- ALGLIB --
9397      Copyright 21.11.2018 by Bochkanov Sergey
9398 *************************************************************************/
9399 void minnlcoptguardsmoothness(const minnlcstate &state, const ae_int_t level, const xparams _xparams = alglib::xdefault);
9400 void minnlcoptguardsmoothness(const minnlcstate &state, const xparams _xparams = alglib::xdefault);
9401 
9402 
9403 /*************************************************************************
9404 Results of OptGuard integrity check, should be called  after  optimization
9405 session is over.
9406 
9407 === PRIMARY REPORT =======================================================
9408 
9409 OptGuard performs several checks which are intended to catch common errors
9410 in the implementation of nonlinear function/gradient:
9411 * incorrect analytic gradient
9412 * discontinuous (non-C0) target functions (constraints)
9413 * nonsmooth     (non-C1) target functions (constraints)
9414 
9415 Each of these checks is activated with appropriate function:
9416 * minnlcoptguardgradient() for gradient verification
9417 * minnlcoptguardsmoothness() for C0/C1 checks
9418 
9419 Following flags are set when these errors are suspected:
9420 * rep.badgradsuspected, and additionally:
9421   * rep.badgradfidx for specific function (Jacobian row) suspected
9422   * rep.badgradvidx for specific variable (Jacobian column) suspected
9423   * rep.badgradxbase, a point where gradient/Jacobian is tested
9424   * rep.badgraduser, user-provided gradient/Jacobian
9425   * rep.badgradnum, reference gradient/Jacobian obtained via numerical
9426     differentiation
9427 * rep.nonc0suspected, and additionally:
9428   * rep.nonc0fidx - an index of specific function violating C0 continuity
9429 * rep.nonc1suspected, and additionally
9430   * rep.nonc1fidx - an index of specific function violating C1 continuity
9431 Here function index 0 means  target function, index 1  or  higher  denotes
9432 nonlinear constraints.
9433 
9434 === ADDITIONAL REPORTS/LOGS ==============================================
9435 
9436 Several different tests are performed to catch C0/C1 errors, you can  find
9437 out specific test signaled error by looking to:
9438 * rep.nonc0test0positive, for non-C0 test #0
9439 * rep.nonc1test0positive, for non-C1 test #0
9440 * rep.nonc1test1positive, for non-C1 test #1
9441 
9442 Additional information (including line search logs)  can  be  obtained  by
9443 means of:
9444 * minnlcoptguardnonc1test0results()
9445 * minnlcoptguardnonc1test1results()
9446 which return detailed error reports, specific points where discontinuities
9447 were found, and so on.
9448 
9449 ==========================================================================
9450 
9451 INPUT PARAMETERS:
9452     state   -   algorithm state
9453 
9454 OUTPUT PARAMETERS:
9455     rep     -   generic OptGuard report;  more  detailed  reports  can  be
9456                 retrieved with other functions.
9457 
9458 NOTE: false negatives (nonsmooth problems are not identified as  nonsmooth
9459       ones) are possible although unlikely.
9460 
9461       The reason  is  that  you  need  to  make several evaluations around
9462       nonsmoothness  in  order  to  accumulate  enough  information  about
9463       function curvature. Say, if you start right from the nonsmooth point,
9464       optimizer simply won't get enough data to understand what  is  going
9465       wrong before it terminates due to abrupt changes in the  derivative.
9466       It is also  possible  that  "unlucky"  step  will  move  us  to  the
9467       termination too quickly.
9468 
9469       Our current approach is to have less than 0.1%  false  negatives  in
9470       our test examples  (measured  with  multiple  restarts  from  random
9471       points), and to have exactly 0% false positives.
9472 
9473   -- ALGLIB --
9474      Copyright 21.11.2018 by Bochkanov Sergey
9475 *************************************************************************/
9476 void minnlcoptguardresults(const minnlcstate &state, optguardreport &rep, const xparams _xparams = alglib::xdefault);
9477 
9478 
9479 /*************************************************************************
9480 Detailed results of the OptGuard integrity check for nonsmoothness test #0
9481 
9482 Nonsmoothness (non-C1) test #0 studies  function  values  (not  gradient!)
9483 obtained during line searches and monitors  behavior  of  the  directional
9484 derivative estimate.
9485 
9486 This test is less powerful than test #1, but it does  not  depend  on  the
9487 gradient values and thus it is more robust against artifacts introduced by
9488 numerical differentiation.
9489 
9490 Two reports are returned:
9491 * a "strongest" one, corresponding  to  line   search  which  had  highest
9492   value of the nonsmoothness indicator
9493 * a "longest" one, corresponding to line search which  had  more  function
9494   evaluations, and thus is more detailed
9495 
9496 In both cases following fields are returned:
9497 
9498 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
9499   did not notice anything (in the latter cases fields below are empty).
9500 * fidx - is an index of the function (0 for  target  function, 1 or higher
9501   for nonlinear constraints) which is suspected of being "non-C1"
9502 * x0[], d[] - arrays of length N which store initial point  and  direction
9503   for line search (d[] can be normalized, but does not have to)
9504 * stp[], f[] - arrays of length CNT which store step lengths and  function
9505   values at these points; f[i] is evaluated in x0+stp[i]*d.
9506 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
9507   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
9508   with  most  likely  position  of  the  violation  between  stpidxa+1 and
9509   stpidxa+2.
9510 
9511 ==========================================================================
9512 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
9513 =                   see where C1 continuity is violated.
9514 ==========================================================================
9515 
9516 INPUT PARAMETERS:
9517     state   -   algorithm state
9518 
9519 OUTPUT PARAMETERS:
9520     strrep  -   C1 test #0 "strong" report
9521     lngrep  -   C1 test #0 "long" report
9522 
9523   -- ALGLIB --
9524      Copyright 21.11.2018 by Bochkanov Sergey
9525 *************************************************************************/
9526 void minnlcoptguardnonc1test0results(const minnlcstate &state, optguardnonc1test0report &strrep, optguardnonc1test0report &lngrep, const xparams _xparams = alglib::xdefault);
9527 
9528 
9529 /*************************************************************************
9530 Detailed results of the OptGuard integrity check for nonsmoothness test #1
9531 
9532 Nonsmoothness (non-C1)  test  #1  studies  individual  components  of  the
9533 gradient computed during line search.
9534 
9535 When precise analytic gradient is provided this test is more powerful than
9536 test #0  which  works  with  function  values  and  ignores  user-provided
9537 gradient.  However,  test  #0  becomes  more   powerful   when   numerical
9538 differentiation is employed (in such cases test #1 detects  higher  levels
9539 of numerical noise and becomes too conservative).
9540 
9541 This test also tells specific components of the gradient which violate  C1
9542 continuity, which makes it more informative than #0, which just tells that
9543 continuity is violated.
9544 
9545 Two reports are returned:
9546 * a "strongest" one, corresponding  to  line   search  which  had  highest
9547   value of the nonsmoothness indicator
9548 * a "longest" one, corresponding to line search which  had  more  function
9549   evaluations, and thus is more detailed
9550 
9551 In both cases following fields are returned:
9552 
9553 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
9554   did not notice anything (in the latter cases fields below are empty).
9555 * fidx - is an index of the function (0 for  target  function, 1 or higher
9556   for nonlinear constraints) which is suspected of being "non-C1"
9557 * vidx - is an index of the variable in [0,N) with nonsmooth derivative
9558 * x0[], d[] - arrays of length N which store initial point  and  direction
9559   for line search (d[] can be normalized, but does not have to)
9560 * stp[], g[] - arrays of length CNT which store step lengths and  gradient
9561   values at these points; g[i] is evaluated in  x0+stp[i]*d  and  contains
9562   vidx-th component of the gradient.
9563 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
9564   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
9565   with  most  likely  position  of  the  violation  between  stpidxa+1 and
9566   stpidxa+2.
9567 
9568 ==========================================================================
9569 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
9570 =                   see where C1 continuity is violated.
9571 ==========================================================================
9572 
9573 INPUT PARAMETERS:
9574     state   -   algorithm state
9575 
9576 OUTPUT PARAMETERS:
9577     strrep  -   C1 test #1 "strong" report
9578     lngrep  -   C1 test #1 "long" report
9579 
9580   -- ALGLIB --
9581      Copyright 21.11.2018 by Bochkanov Sergey
9582 *************************************************************************/
9583 void minnlcoptguardnonc1test1results(const minnlcstate &state, optguardnonc1test1report &strrep, optguardnonc1test1report &lngrep, const xparams _xparams = alglib::xdefault);
9584 
9585 
9586 /*************************************************************************
9587 MinNLC results:  the  solution  found,  completion  codes  and  additional
9588 information.
9589 
9590 If you activated OptGuard integrity checking functionality and want to get
9591 OptGuard report, it can be retrieved with:
9592 * minnlcoptguardresults() - for a primary report about (a) suspected C0/C1
9593   continuity violations and (b) errors in the analytic gradient.
9594 * minnlcoptguardnonc1test0results() - for C1 continuity violation test #0,
9595   detailed line search log
9596 * minnlcoptguardnonc1test1results() - for C1 continuity violation test #1,
9597   detailed line search log
9598 
9599 INPUT PARAMETERS:
9600     State   -   algorithm state
9601 
9602 OUTPUT PARAMETERS:
9603     X       -   array[0..N-1], solution
9604     Rep     -   optimization report, contains information about completion
9605                 code, constraint violation at the solution and so on.
9606 
9607                 You   should   check   rep.terminationtype  in  order   to
9608                 distinguish successful termination from unsuccessful one:
9609 
9610                 === FAILURE CODES ===
9611                 * -8    internal  integrity control  detected  infinite or
9612                         NAN   values    in   function/gradient.   Abnormal
9613                         termination signalled.
9614                 * -3    box  constraints are infeasible.
9615                         Note: infeasibility of  non-box  constraints  does
9616                               NOT trigger emergency completion;  you  have
9617                               to examine rep.bcerr/rep.lcerr/rep.nlcerr to
9618                               detect possibly inconsistent constraints.
9619 
9620                 === SUCCESS CODES ===
9621                 *  2   scaled step is no more than EpsX.
9622                 *  5   MaxIts steps were taken.
9623                 *  8   user   requested    algorithm    termination    via
9624                        minnlcrequesttermination(), last accepted point  is
9625                        returned.
9626 
9627                 More information about fields of this  structure  can  be
9628                 found in the comments on minnlcreport datatype.
9629 
9630   -- ALGLIB --
9631      Copyright 06.06.2014 by Bochkanov Sergey
9632 *************************************************************************/
9633 void minnlcresults(const minnlcstate &state, real_1d_array &x, minnlcreport &rep, const xparams _xparams = alglib::xdefault);
9634 
9635 
9636 /*************************************************************************
9637 NLC results
9638 
9639 Buffered implementation of MinNLCResults() which uses pre-allocated buffer
9640 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
9641 intended to be used in the inner cycles of performance critical algorithms
9642 where array reallocation penalty is too large to be ignored.
9643 
9644   -- ALGLIB --
9645      Copyright 28.11.2010 by Bochkanov Sergey
9646 *************************************************************************/
9647 void minnlcresultsbuf(const minnlcstate &state, real_1d_array &x, minnlcreport &rep, const xparams _xparams = alglib::xdefault);
9648 
9649 
9650 /*************************************************************************
9651 This subroutine submits request for termination of running  optimizer.  It
9652 should be called from user-supplied callback when user decides that it  is
9653 time to "smoothly" terminate optimization process.  As  result,  optimizer
9654 stops at point which was "current accepted" when termination  request  was
9655 submitted and returns error code 8 (successful termination).
9656 
9657 INPUT PARAMETERS:
9658     State   -   optimizer structure
9659 
9660 NOTE: after  request  for  termination  optimizer  may   perform   several
9661       additional calls to user-supplied callbacks. It does  NOT  guarantee
9662       to stop immediately - it just guarantees that these additional calls
9663       will be discarded later.
9664 
9665 NOTE: calling this function on optimizer which is NOT running will have no
9666       effect.
9667 
9668 NOTE: multiple calls to this function are possible. First call is counted,
9669       subsequent calls are silently ignored.
9670 
9671   -- ALGLIB --
9672      Copyright 08.10.2014 by Bochkanov Sergey
9673 *************************************************************************/
9674 void minnlcrequesttermination(const minnlcstate &state, const xparams _xparams = alglib::xdefault);
9675 
9676 
9677 /*************************************************************************
9678 This subroutine restarts algorithm from new point.
9679 All optimization parameters (including constraints) are left unchanged.
9680 
9681 This  function  allows  to  solve multiple  optimization  problems  (which
9682 must have  same number of dimensions) without object reallocation penalty.
9683 
9684 INPUT PARAMETERS:
9685     State   -   structure previously allocated with MinNLCCreate call.
9686     X       -   new starting point.
9687 
9688   -- ALGLIB --
9689      Copyright 28.11.2010 by Bochkanov Sergey
9690 *************************************************************************/
9691 void minnlcrestartfrom(const minnlcstate &state, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
9692 #endif
9693 
9694 #if defined(AE_COMPILE_MINNS) || !defined(AE_PARTIAL_BUILD)
9695 /*************************************************************************
9696                   NONSMOOTH NONCONVEX OPTIMIZATION
9697             SUBJECT TO BOX/LINEAR/NONLINEAR-NONSMOOTH CONSTRAINTS
9698 
9699 DESCRIPTION:
9700 
9701 The  subroutine  minimizes  function   F(x)  of N arguments subject to any
9702 combination of:
9703 * bound constraints
9704 * linear inequality constraints
9705 * linear equality constraints
9706 * nonlinear equality constraints Gi(x)=0
9707 * nonlinear inequality constraints Hi(x)<=0
9708 
9709 IMPORTANT: see MinNSSetAlgoAGS for important  information  on  performance
9710            restrictions of AGS solver.
9711 
9712 REQUIREMENTS:
9713 * starting point X0 must be feasible or not too far away from the feasible
9714   set
9715 * F(), G(), H() are continuous, locally Lipschitz  and  continuously  (but
9716   not necessarily twice) differentiable in an open dense  subset  of  R^N.
9717   Functions F(), G() and H() may be nonsmooth and non-convex.
9718   Informally speaking, it means  that  functions  are  composed  of  large
9719   differentiable "patches" with nonsmoothness having  place  only  at  the
9720   boundaries between these "patches".
9721   Most real-life nonsmooth  functions  satisfy  these  requirements.  Say,
9722   anything which involves finite number of abs(), min() and max() is  very
9723   likely to pass the test.
9724   Say, it is possible to optimize anything of the following:
9725   * f=abs(x0)+2*abs(x1)
9726   * f=max(x0,x1)
9727   * f=sin(max(x0,x1)+abs(x2))
9728 * for nonlinearly constrained problems: F()  must  be  bounded from  below
9729   without nonlinear constraints (this requirement is due to the fact that,
9730   contrary to box and linear constraints, nonlinear ones  require  special
9731   handling).
9732 * user must provide function value and gradient for F(), H(), G()  at  all
9733   points where function/gradient can be calculated. If optimizer  requires
9734   value exactly at the boundary between "patches" (say, at x=0 for f=abs(x)),
9735   where gradient is not defined, user may resolve tie arbitrarily (in  our
9736   case - return +1 or -1 at its discretion).
9737 * NS solver supports numerical differentiation, i.e. it may  differentiate
9738   your function for you,  but  it  results  in  2N  increase  of  function
9739   evaluations. Not recommended unless you solve really small problems. See
9740   minnscreatef() for more information on this functionality.
9741 
9742 USAGE:
9743 
9744 1. User initializes algorithm state with MinNSCreate() call  and   chooses
9745    what NLC solver to use. There is some solver which is used by  default,
9746    with default settings, but you should NOT rely on  default  choice.  It
9747    may change in future releases of ALGLIB without notice, and no one  can
9748    guarantee that new solver will be  able  to  solve  your  problem  with
9749    default settings.
9750 
9751    From the other side, if you choose solver explicitly, you can be pretty
9752    sure that it will work with new ALGLIB releases.
9753 
9754    In the current release following solvers can be used:
9755    * AGS solver (activated with MinNSSetAlgoAGS() function)
9756 
9757 2. User adds boundary and/or linear and/or nonlinear constraints by  means
9758    of calling one of the following functions:
9759    a) MinNSSetBC() for boundary constraints
9760    b) MinNSSetLC() for linear constraints
9761    c) MinNSSetNLC() for nonlinear constraints
9762    You may combine (a), (b) and (c) in one optimization problem.
9763 
9764 3. User sets scale of the variables with MinNSSetScale() function. It   is
9765    VERY important to set  scale  of  the  variables,  because  nonlinearly
9766    constrained problems are hard to solve when variables are badly scaled.
9767 
9768 4. User sets stopping conditions with MinNSSetCond().
9769 
9770 5. Finally, user calls MinNSOptimize()  function  which  takes   algorithm
9771    state and pointer (delegate, etc) to callback function which calculates
9772    F/G/H.
9773 
9774 7. User calls MinNSResults() to get solution
9775 
9776 8. Optionally user may call MinNSRestartFrom() to solve   another  problem
9777    with same N but another starting point. MinNSRestartFrom()  allows   to
9778    reuse already initialized structure.
9779 
9780 
9781 INPUT PARAMETERS:
9782     N       -   problem dimension, N>0:
9783                 * if given, only leading N elements of X are used
9784                 * if not given, automatically determined from size of X
9785     X       -   starting point, array[N]:
9786                 * it is better to set X to a feasible point
9787                 * but X can be infeasible, in which case algorithm will try
9788                   to find feasible point first, using X as initial
9789                   approximation.
9790 
9791 OUTPUT PARAMETERS:
9792     State   -   structure stores algorithm state
9793 
9794 NOTE: minnscreatef() function may be used if  you  do  not  have  analytic
9795       gradient.   This   function  creates  solver  which  uses  numerical
9796       differentiation with user-specified step.
9797 
9798   -- ALGLIB --
9799      Copyright 18.05.2015 by Bochkanov Sergey
9800 *************************************************************************/
9801 void minnscreate(const ae_int_t n, const real_1d_array &x, minnsstate &state, const xparams _xparams = alglib::xdefault);
9802 void minnscreate(const real_1d_array &x, minnsstate &state, const xparams _xparams = alglib::xdefault);
9803 
9804 
9805 /*************************************************************************
9806 Version of minnscreatef() which uses numerical differentiation. I.e.,  you
9807 do not have to calculate derivatives yourself. However, this version needs
9808 2N times more function evaluations.
9809 
9810 2-point differentiation formula is  used,  because  more  precise  4-point
9811 formula is unstable when used on non-smooth functions.
9812 
9813 INPUT PARAMETERS:
9814     N       -   problem dimension, N>0:
9815                 * if given, only leading N elements of X are used
9816                 * if not given, automatically determined from size of X
9817     X       -   starting point, array[N]:
9818                 * it is better to set X to a feasible point
9819                 * but X can be infeasible, in which case algorithm will try
9820                   to find feasible point first, using X as initial
9821                   approximation.
9822     DiffStep-   differentiation  step,  DiffStep>0.   Algorithm   performs
9823                 numerical differentiation  with  step  for  I-th  variable
9824                 being equal to DiffStep*S[I] (here S[] is a  scale vector,
9825                 set by minnssetscale() function).
9826                 Do not use  too  small  steps,  because  it  may  lead  to
9827                 catastrophic cancellation during intermediate calculations.
9828 
9829 OUTPUT PARAMETERS:
9830     State   -   structure stores algorithm state
9831 
9832   -- ALGLIB --
9833      Copyright 18.05.2015 by Bochkanov Sergey
9834 *************************************************************************/
9835 void minnscreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minnsstate &state, const xparams _xparams = alglib::xdefault);
9836 void minnscreatef(const real_1d_array &x, const double diffstep, minnsstate &state, const xparams _xparams = alglib::xdefault);
9837 
9838 
9839 /*************************************************************************
9840 This function sets boundary constraints.
9841 
9842 Boundary constraints are inactive by default (after initial creation).
9843 They are preserved after algorithm restart with minnsrestartfrom().
9844 
9845 INPUT PARAMETERS:
9846     State   -   structure stores algorithm state
9847     BndL    -   lower bounds, array[N].
9848                 If some (all) variables are unbounded, you may specify
9849                 very small number or -INF.
9850     BndU    -   upper bounds, array[N].
9851                 If some (all) variables are unbounded, you may specify
9852                 very large number or +INF.
9853 
9854 NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
9855 variable will be "frozen" at X[i]=BndL[i]=BndU[i].
9856 
9857 NOTE 2: AGS solver has following useful properties:
9858 * bound constraints are always satisfied exactly
9859 * function is evaluated only INSIDE area specified by  bound  constraints,
9860   even  when  numerical  differentiation is used (algorithm adjusts  nodes
9861   according to boundary constraints)
9862 
9863   -- ALGLIB --
9864      Copyright 18.05.2015 by Bochkanov Sergey
9865 *************************************************************************/
9866 void minnssetbc(const minnsstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams = alglib::xdefault);
9867 
9868 
9869 /*************************************************************************
9870 This function sets linear constraints.
9871 
9872 Linear constraints are inactive by default (after initial creation).
9873 They are preserved after algorithm restart with minnsrestartfrom().
9874 
9875 INPUT PARAMETERS:
9876     State   -   structure previously allocated with minnscreate() call.
9877     C       -   linear constraints, array[K,N+1].
9878                 Each row of C represents one constraint, either equality
9879                 or inequality (see below):
9880                 * first N elements correspond to coefficients,
9881                 * last element corresponds to the right part.
9882                 All elements of C (including right part) must be finite.
9883     CT      -   type of constraints, array[K]:
9884                 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
9885                 * if CT[i]=0, then I-th constraint is C[i,*]*x  = C[i,n+1]
9886                 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
9887     K       -   number of equality/inequality constraints, K>=0:
9888                 * if given, only leading K elements of C/CT are used
9889                 * if not given, automatically determined from sizes of C/CT
9890 
9891 NOTE: linear (non-bound) constraints are satisfied only approximately:
9892 
9893 * there always exists some minor violation (about current sampling  radius
9894   in magnitude during optimization, about EpsX in the solution) due to use
9895   of penalty method to handle constraints.
9896 * numerical differentiation, if used, may  lead  to  function  evaluations
9897   outside  of the feasible  area,   because   algorithm  does  NOT  change
9898   numerical differentiation formula according to linear constraints.
9899 
9900 If you want constraints to be  satisfied  exactly, try to reformulate your
9901 problem  in  such  manner  that  all constraints will become boundary ones
9902 (this kind of constraints is always satisfied exactly, both in  the  final
9903 solution and in all intermediate points).
9904 
9905   -- ALGLIB --
9906      Copyright 18.05.2015 by Bochkanov Sergey
9907 *************************************************************************/
9908 void minnssetlc(const minnsstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams = alglib::xdefault);
9909 void minnssetlc(const minnsstate &state, const real_2d_array &c, const integer_1d_array &ct, const xparams _xparams = alglib::xdefault);
9910 
9911 
9912 /*************************************************************************
9913 This function sets nonlinear constraints.
9914 
9915 In fact, this function sets NUMBER of nonlinear  constraints.  Constraints
9916 itself (constraint functions) are passed to minnsoptimize() method.   This
9917 method requires user-defined vector function F[]  and  its  Jacobian  J[],
9918 where:
9919 * first component of F[] and first row  of  Jacobian  J[]  correspond   to
9920   function being minimized
9921 * next NLEC components of F[] (and rows  of  J)  correspond  to  nonlinear
9922   equality constraints G_i(x)=0
9923 * next NLIC components of F[] (and rows  of  J)  correspond  to  nonlinear
9924   inequality constraints H_i(x)<=0
9925 
9926 NOTE: you may combine nonlinear constraints with linear/boundary ones.  If
9927       your problem has mixed constraints, you  may explicitly specify some
9928       of them as linear ones. It may help optimizer to  handle  them  more
9929       efficiently.
9930 
9931 INPUT PARAMETERS:
9932     State   -   structure previously allocated with minnscreate() call.
9933     NLEC    -   number of Non-Linear Equality Constraints (NLEC), >=0
9934     NLIC    -   number of Non-Linear Inquality Constraints (NLIC), >=0
9935 
9936 NOTE 1: nonlinear constraints are satisfied only  approximately!   It   is
9937         possible   that  algorithm  will  evaluate  function  outside   of
9938         the feasible area!
9939 
9940 NOTE 2: algorithm scales variables  according  to   scale   specified   by
9941         minnssetscale()  function,  so  it can handle problems with  badly
9942         scaled variables (as long as we KNOW their scales).
9943 
9944         However,  there  is  no  way  to  automatically  scale   nonlinear
9945         constraints Gi(x) and Hi(x). Inappropriate scaling  of  Gi/Hi  may
9946         ruin convergence. Solving problem with  constraint  "1000*G0(x)=0"
9947         is NOT same as solving it with constraint "0.001*G0(x)=0".
9948 
9949         It  means  that  YOU  are  the  one who is responsible for correct
9950         scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you
9951         to scale nonlinear constraints in such way that I-th component  of
9952         dG/dX (or dH/dx) has approximately unit  magnitude  (for  problems
9953         with unit scale)  or  has  magnitude approximately equal to 1/S[i]
9954         (where S is a scale set by minnssetscale() function).
9955 
9956 NOTE 3: nonlinear constraints are always hard to handle,  no  matter  what
9957         algorithm you try to use. Even basic box/linear constraints modify
9958         function  curvature   by  adding   valleys  and  ridges.  However,
9959         nonlinear constraints add valleys which are very  hard  to  follow
9960         due to their "curved" nature.
9961 
9962         It means that optimization with single nonlinear constraint may be
9963         significantly slower than optimization with multiple linear  ones.
9964         It is normal situation, and we recommend you to  carefully  choose
9965         Rho parameter of minnssetalgoags(), because too  large  value  may
9966         slow down convergence.
9967 
9968 
9969   -- ALGLIB --
9970      Copyright 18.05.2015 by Bochkanov Sergey
9971 *************************************************************************/
9972 void minnssetnlc(const minnsstate &state, const ae_int_t nlec, const ae_int_t nlic, const xparams _xparams = alglib::xdefault);
9973 
9974 
9975 /*************************************************************************
9976 This function sets stopping conditions for iterations of optimizer.
9977 
9978 INPUT PARAMETERS:
9979     State   -   structure which stores algorithm state
9980     EpsX    -   >=0
9981                 The AGS solver finishes its work if  on  k+1-th  iteration
9982                 sampling radius decreases below EpsX.
9983     MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
9984                 iterations is unlimited.
9985 
9986 Passing EpsX=0  and  MaxIts=0  (simultaneously)  will  lead  to  automatic
9987 stopping criterion selection. We do not recommend you to rely  on  default
9988 choice in production code.
9989 
9990   -- ALGLIB --
9991      Copyright 18.05.2015 by Bochkanov Sergey
9992 *************************************************************************/
9993 void minnssetcond(const minnsstate &state, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
9994 
9995 
9996 /*************************************************************************
9997 This function sets scaling coefficients for NLC optimizer.
9998 
9999 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
10000 size and gradient are scaled before comparison with tolerances).  Scale of
10001 the I-th variable is a translation invariant measure of:
10002 a) "how large" the variable is
10003 b) how large the step should be to make significant changes in the function
10004 
10005 Scaling is also used by finite difference variant of the optimizer  - step
10006 along I-th axis is equal to DiffStep*S[I].
10007 
10008 INPUT PARAMETERS:
10009     State   -   structure stores algorithm state
10010     S       -   array[N], non-zero scaling coefficients
10011                 S[i] may be negative, sign doesn't matter.
10012 
10013   -- ALGLIB --
10014      Copyright 18.05.2015 by Bochkanov Sergey
10015 *************************************************************************/
10016 void minnssetscale(const minnsstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
10017 
10018 
10019 /*************************************************************************
10020 This function tells MinNS unit to use  AGS  (adaptive  gradient  sampling)
10021 algorithm for nonsmooth constrained  optimization.  This  algorithm  is  a
10022 slight modification of one described in  "An  Adaptive  Gradient  Sampling
10023 Algorithm for Nonsmooth Optimization" by Frank E. Curtisy and Xiaocun Quez.
10024 
10025 This optimizer has following benefits and drawbacks:
10026 + robustness; it can be used with nonsmooth and nonconvex functions.
10027 + relatively easy tuning; most of the metaparameters are easy to select.
10028 - it has convergence of steepest descent, slower than CG/LBFGS.
10029 - each iteration involves evaluation of ~2N gradient values  and  solution
10030   of 2Nx2N quadratic programming problem, which  limits  applicability  of
10031   algorithm by small-scale problems (up to 50-100).
10032 
10033 IMPORTANT: this  algorithm  has  convergence  guarantees,   i.e.  it  will
10034            steadily move towards some stationary point of the function.
10035 
10036            However, "stationary point" does not  always  mean  "solution".
10037            Nonsmooth problems often have "flat spots",  i.e.  areas  where
10038            function do not change at all. Such "flat spots" are stationary
10039            points by definition, and algorithm may be caught here.
10040 
10041            Nonsmooth CONVEX tasks are not prone to  this  problem. Say, if
10042            your function has form f()=MAX(f0,f1,...), and f_i are  convex,
10043            then f() is convex too and you have guaranteed  convergence  to
10044            solution.
10045 
10046 INPUT PARAMETERS:
10047     State   -   structure which stores algorithm state
10048     Radius  -   initial sampling radius, >=0.
10049 
10050                 Internally multiplied  by  vector of  per-variable  scales
10051                 specified by minnssetscale()).
10052 
10053                 You should select relatively large sampling radius, roughly
10054                 proportional to scaled length of the first  steps  of  the
10055                 algorithm. Something close to 0.1 in magnitude  should  be
10056                 good for most problems.
10057 
10058                 AGS solver can automatically decrease radius, so too large
10059                 radius is  not a problem (assuming that you  won't  choose
10060                 so large radius that algorithm  will  sample  function  in
10061                 too far away points, where gradient value is irrelevant).
10062 
10063                 Too small radius won't cause algorithm to fail, but it may
10064                 slow down algorithm (it may  have  to  perform  too  short
10065                 steps).
10066     Penalty -   penalty coefficient for nonlinear constraints:
10067                 * for problem with nonlinear constraints  should  be  some
10068                   problem-specific  positive   value,  large  enough  that
10069                   penalty term changes shape of the function.
10070                   Starting  from  some  problem-specific   value   penalty
10071                   coefficient becomes  large  enough  to  exactly  enforce
10072                   nonlinear constraints;  larger  values  do  not  improve
10073                   precision.
10074                   Increasing it too much may slow down convergence, so you
10075                   should choose it carefully.
10076                 * can be zero for problems WITHOUT  nonlinear  constraints
10077                   (i.e. for unconstrained ones or ones with  just  box  or
10078                   linear constraints)
10079                 * if you specify zero value for problem with at least  one
10080                   nonlinear  constraint,  algorithm  will  terminate  with
10081                   error code -1.
10082 
10083 ALGORITHM OUTLINE
10084 
10085 The very basic outline of unconstrained AGS algorithm is given below:
10086 
10087 0. If sampling radius is below EpsX  or  we  performed  more  then  MaxIts
10088    iterations - STOP.
10089 1. sample O(N) gradient values at random locations  around  current point;
10090    informally speaking, this sample is an implicit piecewise  linear model
10091    of the function, although algorithm formulation does  not  mention that
10092    explicitly
10093 2. solve quadratic programming problem in order to find descent direction
10094 3. if QP solver tells us that we  are  near  solution,  decrease  sampling
10095    radius and move to (0)
10096 4. perform backtracking line search
10097 5. after moving to new point, goto (0)
10098 
10099 Constraint handling details:
10100 * box constraints are handled exactly by algorithm
10101 * linear/nonlinear constraints are handled by adding L1  penalty.  Because
10102   our solver can handle nonsmoothness, we can  use  L1  penalty  function,
10103   which is an exact one  (i.e.  exact  solution  is  returned  under  such
10104   penalty).
10105 * penalty coefficient for  linear  constraints  is  chosen  automatically;
10106   however, penalty coefficient for nonlinear constraints must be specified
10107   by user.
10108 
10109 ===== TRACING AGS SOLVER =================================================
10110 
10111 AGS solver supports advanced tracing capabilities. You can trace algorithm
10112 output by specifying following trace symbols (case-insensitive)  by  means
10113 of trace_file() call:
10114 * 'AGS'         - for basic trace of algorithm  steps and decisions.  Only
10115                   short scalars (function values and deltas) are  printed.
10116                   N-dimensional quantities like search directions are  NOT
10117                   printed.
10118 * 'AGS.DETAILED'- for output of points being visited and search directions
10119                   This  symbol  also  implicitly  defines  'AGS'. You  can
10120                   control output format by additionally specifying:
10121                   * nothing     to output in  6-digit exponential format
10122                   * 'PREC.E15'  to output in 15-digit exponential format
10123                   * 'PREC.F6'   to output in  6-digit fixed-point format
10124 * 'AGS.DETAILED.SAMPLE'-
10125                   for output of points being visited ,  search  directions
10126                   and gradient sample. May take a LOT of  space ,  do  not
10127                   use it on problems with more that several tens of vars.
10128                   This  symbol   also    implicitly   defines   'AGS'  and
10129                   'AGS.DETAILED'.
10130 
10131 By default trace is disabled and adds  no  overhead  to  the  optimization
10132 process. However, specifying any of the symbols adds some  formatting  and
10133 output-related overhead.
10134 
10135 You may specify multiple symbols by separating them with commas:
10136 >
10137 > alglib::trace_file("AGS,PREC.F6", "path/to/trace.log")
10138 >
10139 
10140 
10141   -- ALGLIB --
10142      Copyright 18.05.2015 by Bochkanov Sergey
10143 *************************************************************************/
10144 void minnssetalgoags(const minnsstate &state, const double radius, const double penalty, const xparams _xparams = alglib::xdefault);
10145 
10146 
10147 /*************************************************************************
10148 This function turns on/off reporting.
10149 
10150 INPUT PARAMETERS:
10151     State   -   structure which stores algorithm state
10152     NeedXRep-   whether iteration reports are needed or not
10153 
10154 If NeedXRep is True, algorithm will call rep() callback function if  it is
10155 provided to minnsoptimize().
10156 
10157   -- ALGLIB --
10158      Copyright 28.11.2010 by Bochkanov Sergey
10159 *************************************************************************/
10160 void minnssetxrep(const minnsstate &state, const bool needxrep, const xparams _xparams = alglib::xdefault);
10161 
10162 
10163 /*************************************************************************
10164 This subroutine submits request for termination of running  optimizer.  It
10165 should be called from user-supplied callback when user decides that it  is
10166 time to "smoothly" terminate optimization process.  As  result,  optimizer
10167 stops at point which was "current accepted" when termination  request  was
10168 submitted and returns error code 8 (successful termination).
10169 
10170 INPUT PARAMETERS:
10171     State   -   optimizer structure
10172 
10173 NOTE: after  request  for  termination  optimizer  may   perform   several
10174       additional calls to user-supplied callbacks. It does  NOT  guarantee
10175       to stop immediately - it just guarantees that these additional calls
10176       will be discarded later.
10177 
10178 NOTE: calling this function on optimizer which is NOT running will have no
10179       effect.
10180 
10181 NOTE: multiple calls to this function are possible. First call is counted,
10182       subsequent calls are silently ignored.
10183 
10184   -- ALGLIB --
10185      Copyright 18.05.2015 by Bochkanov Sergey
10186 *************************************************************************/
10187 void minnsrequesttermination(const minnsstate &state, const xparams _xparams = alglib::xdefault);
10188 
10189 
10190 /*************************************************************************
10191 This function provides reverse communication interface
10192 Reverse communication interface is not documented or recommended to use.
10193 See below for functions which provide better documented API
10194 *************************************************************************/
10195 bool minnsiteration(const minnsstate &state, const xparams _xparams = alglib::xdefault);
10196 
10197 
10198 /*************************************************************************
10199 This family of functions is used to launcn iterations of nonlinear optimizer
10200 
10201 These functions accept following parameters:
10202     state   -   algorithm state
10203     fvec    -   callback which calculates function vector fi[]
10204                 at given point x
10205     jac     -   callback which calculates function vector fi[]
10206                 and Jacobian jac at given point x
10207     rep     -   optional callback which is called after each iteration
10208                 can be NULL
10209     ptr     -   optional pointer which is passed to func/grad/hess/jac/rep
10210                 can be NULL
10211 
10212 
10213 NOTES:
10214 
10215 1. This function has two different implementations: one which  uses  exact
10216    (analytical) user-supplied Jacobian, and one which uses  only  function
10217    vector and numerically  differentiates  function  in  order  to  obtain
10218    gradient.
10219 
10220    Depending  on  the  specific  function  used to create optimizer object
10221    you should choose appropriate variant of  minnsoptimize() -  one  which
10222    accepts function AND Jacobian or one which accepts ONLY function.
10223 
10224    Be careful to choose variant of minnsoptimize()  which  corresponds  to
10225    your optimization scheme! Table below lists different  combinations  of
10226    callback (function/gradient) passed to minnsoptimize()    and  specific
10227    function used to create optimizer.
10228 
10229 
10230                      |         USER PASSED TO minnsoptimize()
10231    CREATED WITH      |  function only   |  function and gradient
10232    ------------------------------------------------------------
10233    minnscreatef()    |     works               FAILS
10234    minnscreate()     |     FAILS               works
10235 
10236    Here "FAILS" denotes inappropriate combinations  of  optimizer creation
10237    function  and  minnsoptimize()  version.   Attemps   to    use     such
10238    combination will lead to exception. Either  you  did  not pass gradient
10239    when it WAS needed or you passed gradient when it was NOT needed.
10240 
10241   -- ALGLIB --
10242      Copyright 18.05.2015 by Bochkanov Sergey
10243 
10244 *************************************************************************/
10245 void minnsoptimize(minnsstate &state,
10246     void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
10247     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
10248     void *ptr = NULL,
10249     const xparams _xparams = alglib::xdefault);
10250 void minnsoptimize(minnsstate &state,
10251     void  (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
10252     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
10253     void *ptr = NULL,
10254     const xparams _xparams = alglib::xdefault);
10255 
10256 
10257 /*************************************************************************
10258 MinNS results
10259 
10260 INPUT PARAMETERS:
10261     State   -   algorithm state
10262 
10263 OUTPUT PARAMETERS:
10264     X       -   array[0..N-1], solution
10265     Rep     -   optimization report. You should check Rep.TerminationType
10266                 in  order  to  distinguish  successful  termination  from
10267                 unsuccessful one:
10268                 * -8   internal integrity control  detected  infinite  or
10269                        NAN   values   in   function/gradient.    Abnormal
10270                        termination signalled.
10271                 * -3   box constraints are inconsistent
10272                 * -1   inconsistent parameters were passed:
10273                        * penalty parameter for minnssetalgoags() is zero,
10274                          but we have nonlinear constraints set by minnssetnlc()
10275                 *  2   sampling radius decreased below epsx
10276                 *  7    stopping conditions are too stringent,
10277                         further improvement is impossible,
10278                         X contains best point found so far.
10279                 *  8    User requested termination via minnsrequesttermination()
10280 
10281   -- ALGLIB --
10282      Copyright 18.05.2015 by Bochkanov Sergey
10283 *************************************************************************/
10284 void minnsresults(const minnsstate &state, real_1d_array &x, minnsreport &rep, const xparams _xparams = alglib::xdefault);
10285 
10286 
10287 /*************************************************************************
10288 
10289 Buffered implementation of minnsresults() which uses pre-allocated  buffer
10290 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
10291 intended to be used in the inner cycles of performance critical algorithms
10292 where array reallocation penalty is too large to be ignored.
10293 
10294   -- ALGLIB --
10295      Copyright 18.05.2015 by Bochkanov Sergey
10296 *************************************************************************/
10297 void minnsresultsbuf(const minnsstate &state, real_1d_array &x, minnsreport &rep, const xparams _xparams = alglib::xdefault);
10298 
10299 
10300 /*************************************************************************
10301 This subroutine restarts algorithm from new point.
10302 All optimization parameters (including constraints) are left unchanged.
10303 
10304 This  function  allows  to  solve multiple  optimization  problems  (which
10305 must have  same number of dimensions) without object reallocation penalty.
10306 
10307 INPUT PARAMETERS:
10308     State   -   structure previously allocated with minnscreate() call.
10309     X       -   new starting point.
10310 
10311   -- ALGLIB --
10312      Copyright 18.05.2015 by Bochkanov Sergey
10313 *************************************************************************/
10314 void minnsrestartfrom(const minnsstate &state, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
10315 #endif
10316 
10317 #if defined(AE_COMPILE_MINCOMP) || !defined(AE_PARTIAL_BUILD)
10318 /*************************************************************************
10319 Obsolete function, use MinLBFGSSetPrecDefault() instead.
10320 
10321   -- ALGLIB --
10322      Copyright 13.10.2010 by Bochkanov Sergey
10323 *************************************************************************/
10324 void minlbfgssetdefaultpreconditioner(const minlbfgsstate &state, const xparams _xparams = alglib::xdefault);
10325 
10326 
10327 /*************************************************************************
10328 Obsolete function, use MinLBFGSSetCholeskyPreconditioner() instead.
10329 
10330   -- ALGLIB --
10331      Copyright 13.10.2010 by Bochkanov Sergey
10332 *************************************************************************/
10333 void minlbfgssetcholeskypreconditioner(const minlbfgsstate &state, const real_2d_array &p, const bool isupper, const xparams _xparams = alglib::xdefault);
10334 
10335 
10336 /*************************************************************************
10337 This is obsolete function which was used by previous version of the  BLEIC
10338 optimizer. It does nothing in the current version of BLEIC.
10339 
10340   -- ALGLIB --
10341      Copyright 28.11.2010 by Bochkanov Sergey
10342 *************************************************************************/
10343 void minbleicsetbarrierwidth(const minbleicstate &state, const double mu, const xparams _xparams = alglib::xdefault);
10344 
10345 
10346 /*************************************************************************
10347 This is obsolete function which was used by previous version of the  BLEIC
10348 optimizer. It does nothing in the current version of BLEIC.
10349 
10350   -- ALGLIB --
10351      Copyright 28.11.2010 by Bochkanov Sergey
10352 *************************************************************************/
10353 void minbleicsetbarrierdecay(const minbleicstate &state, const double mudecay, const xparams _xparams = alglib::xdefault);
10354 
10355 
10356 /*************************************************************************
10357 Obsolete optimization algorithm.
10358 Was replaced by MinBLEIC subpackage.
10359 
10360   -- ALGLIB --
10361      Copyright 25.03.2010 by Bochkanov Sergey
10362 *************************************************************************/
10363 void minasacreate(const ae_int_t n, const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu, minasastate &state, const xparams _xparams = alglib::xdefault);
10364 void minasacreate(const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu, minasastate &state, const xparams _xparams = alglib::xdefault);
10365 
10366 
10367 /*************************************************************************
10368 Obsolete optimization algorithm.
10369 Was replaced by MinBLEIC subpackage.
10370 
10371   -- ALGLIB --
10372      Copyright 02.04.2010 by Bochkanov Sergey
10373 *************************************************************************/
10374 void minasasetcond(const minasastate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
10375 
10376 
10377 /*************************************************************************
10378 Obsolete optimization algorithm.
10379 Was replaced by MinBLEIC subpackage.
10380 
10381   -- ALGLIB --
10382      Copyright 02.04.2010 by Bochkanov Sergey
10383 *************************************************************************/
10384 void minasasetxrep(const minasastate &state, const bool needxrep, const xparams _xparams = alglib::xdefault);
10385 
10386 
10387 /*************************************************************************
10388 Obsolete optimization algorithm.
10389 Was replaced by MinBLEIC subpackage.
10390 
10391   -- ALGLIB --
10392      Copyright 02.04.2010 by Bochkanov Sergey
10393 *************************************************************************/
10394 void minasasetalgorithm(const minasastate &state, const ae_int_t algotype, const xparams _xparams = alglib::xdefault);
10395 
10396 
10397 /*************************************************************************
10398 Obsolete optimization algorithm.
10399 Was replaced by MinBLEIC subpackage.
10400 
10401   -- ALGLIB --
10402      Copyright 02.04.2010 by Bochkanov Sergey
10403 *************************************************************************/
10404 void minasasetstpmax(const minasastate &state, const double stpmax, const xparams _xparams = alglib::xdefault);
10405 
10406 
10407 /*************************************************************************
10408 This function provides reverse communication interface
10409 Reverse communication interface is not documented or recommended to use.
10410 See below for functions which provide better documented API
10411 *************************************************************************/
10412 bool minasaiteration(const minasastate &state, const xparams _xparams = alglib::xdefault);
10413 
10414 
10415 /*************************************************************************
10416 This family of functions is used to launcn iterations of nonlinear optimizer
10417 
10418 These functions accept following parameters:
10419     state   -   algorithm state
10420     grad    -   callback which calculates function (or merit function)
10421                 value func and gradient grad at given point x
10422     rep     -   optional callback which is called after each iteration
10423                 can be NULL
10424     ptr     -   optional pointer which is passed to func/grad/hess/jac/rep
10425                 can be NULL
10426 
10427 
10428   -- ALGLIB --
10429      Copyright 20.03.2009 by Bochkanov Sergey
10430 
10431 *************************************************************************/
10432 void minasaoptimize(minasastate &state,
10433     void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
10434     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
10435     void *ptr = NULL,
10436     const xparams _xparams = alglib::xdefault);
10437 
10438 
10439 /*************************************************************************
10440 Obsolete optimization algorithm.
10441 Was replaced by MinBLEIC subpackage.
10442 
10443   -- ALGLIB --
10444      Copyright 20.03.2009 by Bochkanov Sergey
10445 *************************************************************************/
10446 void minasaresults(const minasastate &state, real_1d_array &x, minasareport &rep, const xparams _xparams = alglib::xdefault);
10447 
10448 
10449 /*************************************************************************
10450 Obsolete optimization algorithm.
10451 Was replaced by MinBLEIC subpackage.
10452 
10453   -- ALGLIB --
10454      Copyright 20.03.2009 by Bochkanov Sergey
10455 *************************************************************************/
10456 void minasaresultsbuf(const minasastate &state, real_1d_array &x, minasareport &rep, const xparams _xparams = alglib::xdefault);
10457 
10458 
10459 /*************************************************************************
10460 Obsolete optimization algorithm.
10461 Was replaced by MinBLEIC subpackage.
10462 
10463   -- ALGLIB --
10464      Copyright 30.07.2010 by Bochkanov Sergey
10465 *************************************************************************/
10466 void minasarestartfrom(const minasastate &state, const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams = alglib::xdefault);
10467 #endif
10468 
10469 #if defined(AE_COMPILE_MINBC) || !defined(AE_PARTIAL_BUILD)
10470 /*************************************************************************
10471                      BOX CONSTRAINED OPTIMIZATION
10472           WITH FAST ACTIVATION OF MULTIPLE BOX CONSTRAINTS
10473 
10474 DESCRIPTION:
10475 The  subroutine  minimizes  function   F(x) of N arguments subject  to box
10476 constraints (with some of box constraints actually being equality ones).
10477 
10478 This optimizer uses algorithm similar to that of MinBLEIC (optimizer  with
10479 general linear constraints), but presence of box-only  constraints  allows
10480 us to use faster constraint activation strategies. On large-scale problems,
10481 with multiple constraints active at the solution, this  optimizer  can  be
10482 several times faster than BLEIC.
10483 
10484 REQUIREMENTS:
10485 * user must provide function value and gradient
10486 * starting point X0 must be feasible or
10487   not too far away from the feasible set
10488 * grad(f) must be Lipschitz continuous on a level set:
10489   L = { x : f(x)<=f(x0) }
10490 * function must be defined everywhere on the feasible set F
10491 
10492 USAGE:
10493 
10494 Constrained optimization if far more complex than the unconstrained one.
10495 Here we give very brief outline of the BC optimizer. We strongly recommend
10496 you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide
10497 on optimization, which is available at http://www.alglib.net/optimization/
10498 
10499 1. User initializes algorithm state with MinBCCreate() call
10500 
10501 2. USer adds box constraints by calling MinBCSetBC() function.
10502 
10503 3. User sets stopping conditions with MinBCSetCond().
10504 
10505 4. User calls MinBCOptimize() function which takes algorithm  state and
10506    pointer (delegate, etc.) to callback function which calculates F/G.
10507 
10508 5. User calls MinBCResults() to get solution
10509 
10510 6. Optionally user may call MinBCRestartFrom() to solve another problem
10511    with same N but another starting point.
10512    MinBCRestartFrom() allows to reuse already initialized structure.
10513 
10514 
10515 INPUT PARAMETERS:
10516     N       -   problem dimension, N>0:
10517                 * if given, only leading N elements of X are used
10518                 * if not given, automatically determined from size ofX
10519     X       -   starting point, array[N]:
10520                 * it is better to set X to a feasible point
10521                 * but X can be infeasible, in which case algorithm will try
10522                   to find feasible point first, using X as initial
10523                   approximation.
10524 
10525 OUTPUT PARAMETERS:
10526     State   -   structure stores algorithm state
10527 
10528   -- ALGLIB --
10529      Copyright 28.11.2010 by Bochkanov Sergey
10530 *************************************************************************/
10531 void minbccreate(const ae_int_t n, const real_1d_array &x, minbcstate &state, const xparams _xparams = alglib::xdefault);
10532 void minbccreate(const real_1d_array &x, minbcstate &state, const xparams _xparams = alglib::xdefault);
10533 
10534 
10535 /*************************************************************************
10536 The subroutine is finite difference variant of MinBCCreate().  It  uses
10537 finite differences in order to differentiate target function.
10538 
10539 Description below contains information which is specific to  this function
10540 only. We recommend to read comments on MinBCCreate() in  order  to  get
10541 more information about creation of BC optimizer.
10542 
10543 INPUT PARAMETERS:
10544     N       -   problem dimension, N>0:
10545                 * if given, only leading N elements of X are used
10546                 * if not given, automatically determined from size of X
10547     X       -   starting point, array[0..N-1].
10548     DiffStep-   differentiation step, >0
10549 
10550 OUTPUT PARAMETERS:
10551     State   -   structure which stores algorithm state
10552 
10553 NOTES:
10554 1. algorithm uses 4-point central formula for differentiation.
10555 2. differentiation step along I-th axis is equal to DiffStep*S[I] where
10556    S[] is scaling vector which can be set by MinBCSetScale() call.
10557 3. we recommend you to use moderate values of  differentiation  step.  Too
10558    large step will result in too large truncation  errors, while too small
10559    step will result in too large numerical  errors.  1.0E-6  can  be  good
10560    value to start with.
10561 4. Numerical  differentiation  is   very   inefficient  -   one   gradient
10562    calculation needs 4*N function evaluations. This function will work for
10563    any N - either small (1...10), moderate (10...100) or  large  (100...).
10564    However, performance penalty will be too severe for any N's except  for
10565    small ones.
10566    We should also say that code which relies on numerical  differentiation
10567    is  less  robust and precise. CG needs exact gradient values. Imprecise
10568    gradient may slow  down  convergence, especially  on  highly  nonlinear
10569    problems.
10570    Thus  we  recommend to use this function for fast prototyping on small-
10571    dimensional problems only, and to implement analytical gradient as soon
10572    as possible.
10573 
10574   -- ALGLIB --
10575      Copyright 16.05.2011 by Bochkanov Sergey
10576 *************************************************************************/
10577 void minbccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minbcstate &state, const xparams _xparams = alglib::xdefault);
10578 void minbccreatef(const real_1d_array &x, const double diffstep, minbcstate &state, const xparams _xparams = alglib::xdefault);
10579 
10580 
10581 /*************************************************************************
10582 This function sets boundary constraints for BC optimizer.
10583 
10584 Boundary constraints are inactive by default (after initial creation).
10585 They are preserved after algorithm restart with MinBCRestartFrom().
10586 
10587 INPUT PARAMETERS:
10588     State   -   structure stores algorithm state
10589     BndL    -   lower bounds, array[N].
10590                 If some (all) variables are unbounded, you may specify
10591                 very small number or -INF.
10592     BndU    -   upper bounds, array[N].
10593                 If some (all) variables are unbounded, you may specify
10594                 very large number or +INF.
10595 
10596 NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
10597 variable will be "frozen" at X[i]=BndL[i]=BndU[i].
10598 
10599 NOTE 2: this solver has following useful properties:
10600 * bound constraints are always satisfied exactly
10601 * function is evaluated only INSIDE area specified by  bound  constraints,
10602   even  when  numerical  differentiation is used (algorithm adjusts  nodes
10603   according to boundary constraints)
10604 
10605   -- ALGLIB --
10606      Copyright 28.11.2010 by Bochkanov Sergey
10607 *************************************************************************/
10608 void minbcsetbc(const minbcstate &state, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams = alglib::xdefault);
10609 
10610 
10611 /*************************************************************************
10612 This function sets stopping conditions for the optimizer.
10613 
10614 INPUT PARAMETERS:
10615     State   -   structure which stores algorithm state
10616     EpsG    -   >=0
10617                 The  subroutine  finishes  its  work   if   the  condition
10618                 |v|<EpsG is satisfied, where:
10619                 * |.| means Euclidian norm
10620                 * v - scaled gradient vector, v[i]=g[i]*s[i]
10621                 * g - gradient
10622                 * s - scaling coefficients set by MinBCSetScale()
10623     EpsF    -   >=0
10624                 The  subroutine  finishes  its work if on k+1-th iteration
10625                 the  condition  |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
10626                 is satisfied.
10627     EpsX    -   >=0
10628                 The subroutine finishes its work if  on  k+1-th  iteration
10629                 the condition |v|<=EpsX is fulfilled, where:
10630                 * |.| means Euclidian norm
10631                 * v - scaled step vector, v[i]=dx[i]/s[i]
10632                 * dx - step vector, dx=X(k+1)-X(k)
10633                 * s - scaling coefficients set by MinBCSetScale()
10634     MaxIts  -   maximum number of iterations. If MaxIts=0, the  number  of
10635                 iterations is unlimited.
10636 
10637 Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
10638 to automatic stopping criterion selection.
10639 
10640 NOTE: when SetCond() called with non-zero MaxIts, BC solver may perform
10641       slightly more than MaxIts iterations. I.e., MaxIts  sets  non-strict
10642       limit on iterations count.
10643 
10644   -- ALGLIB --
10645      Copyright 28.11.2010 by Bochkanov Sergey
10646 *************************************************************************/
10647 void minbcsetcond(const minbcstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
10648 
10649 
10650 /*************************************************************************
10651 This function sets scaling coefficients for BC optimizer.
10652 
10653 ALGLIB optimizers use scaling matrices to test stopping  conditions  (step
10654 size and gradient are scaled before comparison with tolerances).  Scale of
10655 the I-th variable is a translation invariant measure of:
10656 a) "how large" the variable is
10657 b) how large the step should be to make significant changes in the function
10658 
10659 Scaling is also used by finite difference variant of the optimizer  - step
10660 along I-th axis is equal to DiffStep*S[I].
10661 
10662 In  most  optimizers  (and  in  the  BC  too)  scaling is NOT a form of
10663 preconditioning. It just  affects  stopping  conditions.  You  should  set
10664 preconditioner  by  separate  call  to  one  of  the  MinBCSetPrec...()
10665 functions.
10666 
10667 There is a special  preconditioning  mode, however,  which  uses   scaling
10668 coefficients to form diagonal preconditioning matrix. You  can  turn  this
10669 mode on, if you want.   But  you should understand that scaling is not the
10670 same thing as preconditioning - these are two different, although  related
10671 forms of tuning solver.
10672 
10673 INPUT PARAMETERS:
10674     State   -   structure stores algorithm state
10675     S       -   array[N], non-zero scaling coefficients
10676                 S[i] may be negative, sign doesn't matter.
10677 
10678   -- ALGLIB --
10679      Copyright 14.01.2011 by Bochkanov Sergey
10680 *************************************************************************/
10681 void minbcsetscale(const minbcstate &state, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
10682 
10683 
10684 /*************************************************************************
10685 Modification of the preconditioner: preconditioning is turned off.
10686 
10687 INPUT PARAMETERS:
10688     State   -   structure which stores algorithm state
10689 
10690   -- ALGLIB --
10691      Copyright 13.10.2010 by Bochkanov Sergey
10692 *************************************************************************/
10693 void minbcsetprecdefault(const minbcstate &state, const xparams _xparams = alglib::xdefault);
10694 
10695 
10696 /*************************************************************************
10697 Modification  of  the  preconditioner:  diagonal of approximate Hessian is
10698 used.
10699 
10700 INPUT PARAMETERS:
10701     State   -   structure which stores algorithm state
10702     D       -   diagonal of the approximate Hessian, array[0..N-1],
10703                 (if larger, only leading N elements are used).
10704 
10705 NOTE 1: D[i] should be positive. Exception will be thrown otherwise.
10706 
10707 NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
10708 
10709   -- ALGLIB --
10710      Copyright 13.10.2010 by Bochkanov Sergey
10711 *************************************************************************/
10712 void minbcsetprecdiag(const minbcstate &state, const real_1d_array &d, const xparams _xparams = alglib::xdefault);
10713 
10714 
10715 /*************************************************************************
10716 Modification of the preconditioner: scale-based diagonal preconditioning.
10717 
10718 This preconditioning mode can be useful when you  don't  have  approximate
10719 diagonal of Hessian, but you know that your  variables  are  badly  scaled
10720 (for  example,  one  variable is in [1,10], and another in [1000,100000]),
10721 and most part of the ill-conditioning comes from different scales of vars.
10722 
10723 In this case simple  scale-based  preconditioner,  with H[i] = 1/(s[i]^2),
10724 can greatly improve convergence.
10725 
10726 IMPRTANT: you should set scale of your variables  with  MinBCSetScale()
10727 call  (before  or after MinBCSetPrecScale() call). Without knowledge of
10728 the scale of your variables scale-based preconditioner will be  just  unit
10729 matrix.
10730 
10731 INPUT PARAMETERS:
10732     State   -   structure which stores algorithm state
10733 
10734   -- ALGLIB --
10735      Copyright 13.10.2010 by Bochkanov Sergey
10736 *************************************************************************/
10737 void minbcsetprecscale(const minbcstate &state, const xparams _xparams = alglib::xdefault);
10738 
10739 
10740 /*************************************************************************
10741 This function turns on/off reporting.
10742 
10743 INPUT PARAMETERS:
10744     State   -   structure which stores algorithm state
10745     NeedXRep-   whether iteration reports are needed or not
10746 
10747 If NeedXRep is True, algorithm will call rep() callback function if  it is
10748 provided to MinBCOptimize().
10749 
10750   -- ALGLIB --
10751      Copyright 28.11.2010 by Bochkanov Sergey
10752 *************************************************************************/
10753 void minbcsetxrep(const minbcstate &state, const bool needxrep, const xparams _xparams = alglib::xdefault);
10754 
10755 
10756 /*************************************************************************
10757 This function sets maximum step length
10758 
10759 INPUT PARAMETERS:
10760     State   -   structure which stores algorithm state
10761     StpMax  -   maximum step length, >=0. Set StpMax to 0.0,  if you don't
10762                 want to limit step length.
10763 
10764 Use this subroutine when you optimize target function which contains exp()
10765 or  other  fast  growing  functions,  and optimization algorithm makes too
10766 large  steps  which  lead   to overflow. This function allows us to reject
10767 steps  that  are  too  large  (and  therefore  expose  us  to the possible
10768 overflow) without actually calculating function value at the x+stp*d.
10769 
10770   -- ALGLIB --
10771      Copyright 02.04.2010 by Bochkanov Sergey
10772 *************************************************************************/
10773 void minbcsetstpmax(const minbcstate &state, const double stpmax, const xparams _xparams = alglib::xdefault);
10774 
10775 
10776 /*************************************************************************
10777 This function provides reverse communication interface
10778 Reverse communication interface is not documented or recommended to use.
10779 See below for functions which provide better documented API
10780 *************************************************************************/
10781 bool minbciteration(const minbcstate &state, const xparams _xparams = alglib::xdefault);
10782 
10783 
10784 /*************************************************************************
10785 This family of functions is used to launcn iterations of nonlinear optimizer
10786 
10787 These functions accept following parameters:
10788     state   -   algorithm state
10789     func    -   callback which calculates function (or merit function)
10790                 value func at given point x
10791     grad    -   callback which calculates function (or merit function)
10792                 value func and gradient grad at given point x
10793     rep     -   optional callback which is called after each iteration
10794                 can be NULL
10795     ptr     -   optional pointer which is passed to func/grad/hess/jac/rep
10796                 can be NULL
10797 
10798 NOTES:
10799 
10800 1. This function has two different implementations: one which  uses  exact
10801    (analytical) user-supplied gradient,  and one which uses function value
10802    only  and  numerically  differentiates  function  in  order  to  obtain
10803    gradient.
10804 
10805    Depending  on  the  specific  function  used to create optimizer object
10806    (either  MinBCCreate() for analytical gradient or  MinBCCreateF()
10807    for numerical differentiation) you should choose appropriate variant of
10808    MinBCOptimize() - one  which  accepts  function  AND gradient or one
10809    which accepts function ONLY.
10810 
10811    Be careful to choose variant of MinBCOptimize() which corresponds to
10812    your optimization scheme! Table below lists different  combinations  of
10813    callback (function/gradient) passed to MinBCOptimize()  and specific
10814    function used to create optimizer.
10815 
10816 
10817                      |         USER PASSED TO MinBCOptimize()
10818    CREATED WITH      |  function only   |  function and gradient
10819    ------------------------------------------------------------
10820    MinBCCreateF()    |     works               FAILS
10821    MinBCCreate()     |     FAILS               works
10822 
10823    Here "FAIL" denotes inappropriate combinations  of  optimizer  creation
10824    function  and  MinBCOptimize()  version.   Attemps   to   use   such
10825    combination (for  example,  to  create optimizer with MinBCCreateF()
10826    and  to  pass  gradient  information  to  MinCGOptimize()) will lead to
10827    exception being thrown. Either  you  did  not pass gradient when it WAS
10828    needed or you passed gradient when it was NOT needed.
10829 
10830   -- ALGLIB --
10831      Copyright 28.11.2010 by Bochkanov Sergey
10832 
10833 *************************************************************************/
10834 void minbcoptimize(minbcstate &state,
10835     void (*func)(const real_1d_array &x, double &func, void *ptr),
10836     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
10837     void *ptr = NULL,
10838     const xparams _xparams = alglib::xdefault);
10839 void minbcoptimize(minbcstate &state,
10840     void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
10841     void  (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
10842     void *ptr = NULL,
10843     const xparams _xparams = alglib::xdefault);
10844 
10845 
10846 /*************************************************************************
10847 This  function  activates/deactivates verification  of  the  user-supplied
10848 analytic gradient.
10849 
10850 Upon  activation  of  this  option  OptGuard  integrity  checker  performs
10851 numerical differentiation of your target function  at  the  initial  point
10852 (note: future versions may also perform check  at  the  final  point)  and
10853 compares numerical gradient with analytic one provided by you.
10854 
10855 If difference is too large, an error flag is set and optimization  session
10856 continues. After optimization session is over, you can retrieve the report
10857 which  stores  both  gradients  and  specific  components  highlighted  as
10858 suspicious by the OptGuard.
10859 
10860 The primary OptGuard report can be retrieved with minbcoptguardresults().
10861 
10862 IMPORTANT: gradient check is a high-overhead option which  will  cost  you
10863            about 3*N additional function evaluations. In many cases it may
10864            cost as much as the rest of the optimization session.
10865 
10866            YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO
10867            CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY.
10868 
10869 NOTE: unlike previous incarnation of the gradient checking code,  OptGuard
10870       does NOT interrupt optimization even if it discovers bad gradient.
10871 
10872 INPUT PARAMETERS:
10873     State       -   structure used to store algorithm state
10874     TestStep    -   verification step used for numerical differentiation:
10875                     * TestStep=0 turns verification off
10876                     * TestStep>0 activates verification
10877                     You should carefully choose TestStep. Value  which  is
10878                     too large (so large that  function  behavior  is  non-
10879                     cubic at this scale) will lead  to  false  alarms. Too
10880                     short step will result in rounding  errors  dominating
10881                     numerical derivative.
10882 
10883                     You may use different step for different parameters by
10884                     means of setting scale with minbcsetscale().
10885 
10886 === EXPLANATION ==========================================================
10887 
10888 In order to verify gradient algorithm performs following steps:
10889   * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
10890     where X[i] is i-th component of the initial point and S[i] is a  scale
10891     of i-th parameter
10892   * F(X) is evaluated at these trial points
10893   * we perform one more evaluation in the middle point of the interval
10894   * we  build  cubic  model using function values and derivatives at trial
10895     points and we compare its prediction with actual value in  the  middle
10896     point
10897 
10898   -- ALGLIB --
10899      Copyright 15.06.2014 by Bochkanov Sergey
10900 *************************************************************************/
10901 void minbcoptguardgradient(const minbcstate &state, const double teststep, const xparams _xparams = alglib::xdefault);
10902 
10903 
10904 /*************************************************************************
10905 This  function  activates/deactivates nonsmoothness monitoring  option  of
10906 the  OptGuard  integrity  checker. Smoothness  monitor  silently  observes
10907 solution process and tries to detect ill-posed problems, i.e. ones with:
10908 a) discontinuous target function (non-C0)
10909 b) nonsmooth     target function (non-C1)
10910 
10911 Smoothness monitoring does NOT interrupt optimization  even if it suspects
10912 that your problem is nonsmooth. It just sets corresponding  flags  in  the
10913 OptGuard report which can be retrieved after optimization is over.
10914 
10915 Smoothness monitoring is a moderate overhead option which often adds  less
10916 than 1% to the optimizer running time. Thus, you can use it even for large
10917 scale problems.
10918 
10919 NOTE: OptGuard does  NOT  guarantee  that  it  will  always  detect  C0/C1
10920       continuity violations.
10921 
10922       First, minor errors are hard to  catch - say, a 0.0001 difference in
10923       the model values at two sides of the gap may be due to discontinuity
10924       of the model - or simply because the model has changed.
10925 
10926       Second, C1-violations  are  especially  difficult  to  detect  in  a
10927       noninvasive way. The optimizer usually  performs  very  short  steps
10928       near the nonsmoothness, and differentiation  usually   introduces  a
10929       lot of numerical noise.  It  is  hard  to  tell  whether  some  tiny
10930       discontinuity in the slope is due to real nonsmoothness or just  due
10931       to numerical noise alone.
10932 
10933       Our top priority was to avoid false positives, so in some rare cases
10934       minor errors may went unnoticed (however, in most cases they can  be
10935       spotted with restart from different initial point).
10936 
10937 INPUT PARAMETERS:
10938     state   -   algorithm state
10939     level   -   monitoring level:
10940                 * 0 - monitoring is disabled
10941                 * 1 - noninvasive low-overhead monitoring; function values
10942                       and/or gradients are recorded, but OptGuard does not
10943                       try to perform additional evaluations  in  order  to
10944                       get more information about suspicious locations.
10945 
10946 === EXPLANATION ==========================================================
10947 
10948 One major source of headache during optimization  is  the  possibility  of
10949 the coding errors in the target function/constraints (or their gradients).
10950 Such  errors   most   often   manifest   themselves  as  discontinuity  or
10951 nonsmoothness of the target/constraints.
10952 
10953 Another frequent situation is when you try to optimize something involving
10954 lots of min() and max() operations, i.e. nonsmooth target. Although not  a
10955 coding error, it is nonsmoothness anyway - and smooth  optimizers  usually
10956 stop right after encountering nonsmoothness, well before reaching solution.
10957 
10958 OptGuard integrity checker helps you to catch such situations: it monitors
10959 function values/gradients being passed  to  the  optimizer  and  tries  to
10960 errors. Upon discovering suspicious pair of points it  raises  appropriate
10961 flag (and allows you to continue optimization). When optimization is done,
10962 you can study OptGuard result.
10963 
10964   -- ALGLIB --
10965      Copyright 21.11.2018 by Bochkanov Sergey
10966 *************************************************************************/
10967 void minbcoptguardsmoothness(const minbcstate &state, const ae_int_t level, const xparams _xparams = alglib::xdefault);
10968 void minbcoptguardsmoothness(const minbcstate &state, const xparams _xparams = alglib::xdefault);
10969 
10970 
10971 /*************************************************************************
10972 Results of OptGuard integrity check, should be called  after  optimization
10973 session is over.
10974 
10975 === PRIMARY REPORT =======================================================
10976 
10977 OptGuard performs several checks which are intended to catch common errors
10978 in the implementation of nonlinear function/gradient:
10979 * incorrect analytic gradient
10980 * discontinuous (non-C0) target functions (constraints)
10981 * nonsmooth     (non-C1) target functions (constraints)
10982 
10983 Each of these checks is activated with appropriate function:
10984 * minbcoptguardgradient() for gradient verification
10985 * minbcoptguardsmoothness() for C0/C1 checks
10986 
10987 Following flags are set when these errors are suspected:
10988 * rep.badgradsuspected, and additionally:
10989   * rep.badgradvidx for specific variable (gradient element) suspected
10990   * rep.badgradxbase, a point where gradient is tested
10991   * rep.badgraduser, user-provided gradient  (stored  as  2D  matrix  with
10992     single row in order to make  report  structure  compatible  with  more
10993     complex optimizers like MinNLC or MinLM)
10994   * rep.badgradnum,   reference    gradient    obtained    via   numerical
10995     differentiation (stored as  2D matrix with single row in order to make
10996     report structure compatible with more complex optimizers  like  MinNLC
10997     or MinLM)
10998 * rep.nonc0suspected
10999 * rep.nonc1suspected
11000 
11001 === ADDITIONAL REPORTS/LOGS ==============================================
11002 
11003 Several different tests are performed to catch C0/C1 errors, you can  find
11004 out specific test signaled error by looking to:
11005 * rep.nonc0test0positive, for non-C0 test #0
11006 * rep.nonc1test0positive, for non-C1 test #0
11007 * rep.nonc1test1positive, for non-C1 test #1
11008 
11009 Additional information (including line search logs)  can  be  obtained  by
11010 means of:
11011 * minbcoptguardnonc1test0results()
11012 * minbcoptguardnonc1test1results()
11013 which return detailed error reports, specific points where discontinuities
11014 were found, and so on.
11015 
11016 ==========================================================================
11017 
11018 INPUT PARAMETERS:
11019     state   -   algorithm state
11020 
11021 OUTPUT PARAMETERS:
11022     rep     -   generic OptGuard report;  more  detailed  reports  can  be
11023                 retrieved with other functions.
11024 
11025 NOTE: false negatives (nonsmooth problems are not identified as  nonsmooth
11026       ones) are possible although unlikely.
11027 
11028       The reason  is  that  you  need  to  make several evaluations around
11029       nonsmoothness  in  order  to  accumulate  enough  information  about
11030       function curvature. Say, if you start right from the nonsmooth point,
11031       optimizer simply won't get enough data to understand what  is  going
11032       wrong before it terminates due to abrupt changes in the  derivative.
11033       It is also  possible  that  "unlucky"  step  will  move  us  to  the
11034       termination too quickly.
11035 
11036       Our current approach is to have less than 0.1%  false  negatives  in
11037       our test examples  (measured  with  multiple  restarts  from  random
11038       points), and to have exactly 0% false positives.
11039 
11040   -- ALGLIB --
11041      Copyright 21.11.2018 by Bochkanov Sergey
11042 *************************************************************************/
11043 void minbcoptguardresults(const minbcstate &state, optguardreport &rep, const xparams _xparams = alglib::xdefault);
11044 
11045 
11046 /*************************************************************************
11047 Detailed results of the OptGuard integrity check for nonsmoothness test #0
11048 
11049 Nonsmoothness (non-C1) test #0 studies  function  values  (not  gradient!)
11050 obtained during line searches and monitors  behavior  of  the  directional
11051 derivative estimate.
11052 
11053 This test is less powerful than test #1, but it does  not  depend  on  the
11054 gradient values and thus it is more robust against artifacts introduced by
11055 numerical differentiation.
11056 
11057 Two reports are returned:
11058 * a "strongest" one, corresponding  to  line   search  which  had  highest
11059   value of the nonsmoothness indicator
11060 * a "longest" one, corresponding to line search which  had  more  function
11061   evaluations, and thus is more detailed
11062 
11063 In both cases following fields are returned:
11064 
11065 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
11066   did not notice anything (in the latter cases fields below are empty).
11067 * x0[], d[] - arrays of length N which store initial point  and  direction
11068   for line search (d[] can be normalized, but does not have to)
11069 * stp[], f[] - arrays of length CNT which store step lengths and  function
11070   values at these points; f[i] is evaluated in x0+stp[i]*d.
11071 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
11072   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
11073   with  most  likely  position  of  the  violation  between  stpidxa+1 and
11074   stpidxa+2.
11075 
11076 ==========================================================================
11077 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
11078 =                   see where C1 continuity is violated.
11079 ==========================================================================
11080 
11081 INPUT PARAMETERS:
11082     state   -   algorithm state
11083 
11084 OUTPUT PARAMETERS:
11085     strrep  -   C1 test #0 "strong" report
11086     lngrep  -   C1 test #0 "long" report
11087 
11088   -- ALGLIB --
11089      Copyright 21.11.2018 by Bochkanov Sergey
11090 *************************************************************************/
11091 void minbcoptguardnonc1test0results(const minbcstate &state, optguardnonc1test0report &strrep, optguardnonc1test0report &lngrep, const xparams _xparams = alglib::xdefault);
11092 
11093 
11094 /*************************************************************************
11095 Detailed results of the OptGuard integrity check for nonsmoothness test #1
11096 
11097 Nonsmoothness (non-C1)  test  #1  studies  individual  components  of  the
11098 gradient computed during line search.
11099 
11100 When precise analytic gradient is provided this test is more powerful than
11101 test #0  which  works  with  function  values  and  ignores  user-provided
11102 gradient.  However,  test  #0  becomes  more   powerful   when   numerical
11103 differentiation is employed (in such cases test #1 detects  higher  levels
11104 of numerical noise and becomes too conservative).
11105 
11106 This test also tells specific components of the gradient which violate  C1
11107 continuity, which makes it more informative than #0, which just tells that
11108 continuity is violated.
11109 
11110 Two reports are returned:
11111 * a "strongest" one, corresponding  to  line   search  which  had  highest
11112   value of the nonsmoothness indicator
11113 * a "longest" one, corresponding to line search which  had  more  function
11114   evaluations, and thus is more detailed
11115 
11116 In both cases following fields are returned:
11117 
11118 * positive - is TRUE  when test flagged suspicious point;  FALSE  if  test
11119   did not notice anything (in the latter cases fields below are empty).
11120 * vidx - is an index of the variable in [0,N) with nonsmooth derivative
11121 * x0[], d[] - arrays of length N which store initial point  and  direction
11122   for line search (d[] can be normalized, but does not have to)
11123 * stp[], g[] - arrays of length CNT which store step lengths and  gradient
11124   values at these points; g[i] is evaluated in  x0+stp[i]*d  and  contains
11125   vidx-th component of the gradient.
11126 * stpidxa, stpidxb - we  suspect  that  function  violates  C1  continuity
11127   between steps #stpidxa and #stpidxb (usually we have  stpidxb=stpidxa+3,
11128   with  most  likely  position  of  the  violation  between  stpidxa+1 and
11129   stpidxa+2.
11130 
11131 ==========================================================================
11132 = SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it -  you  will
11133 =                   see where C1 continuity is violated.
11134 ==========================================================================
11135 
11136 INPUT PARAMETERS:
11137     state   -   algorithm state
11138 
11139 OUTPUT PARAMETERS:
11140     strrep  -   C1 test #1 "strong" report
11141     lngrep  -   C1 test #1 "long" report
11142 
11143   -- ALGLIB --
11144      Copyright 21.11.2018 by Bochkanov Sergey
11145 *************************************************************************/
11146 void minbcoptguardnonc1test1results(const minbcstate &state, optguardnonc1test1report &strrep, optguardnonc1test1report &lngrep, const xparams _xparams = alglib::xdefault);
11147 
11148 
11149 /*************************************************************************
11150 BC results
11151 
11152 INPUT PARAMETERS:
11153     State   -   algorithm state
11154 
11155 OUTPUT PARAMETERS:
11156     X       -   array[0..N-1], solution
11157     Rep     -   optimization report. You should check Rep.TerminationType
11158                 in  order  to  distinguish  successful  termination  from
11159                 unsuccessful one:
11160                 * -8    internal integrity control  detected  infinite or
11161                         NAN   values   in   function/gradient.   Abnormal
11162                         termination signalled.
11163                 * -3   inconsistent constraints.
11164                 *  1   relative function improvement is no more than EpsF.
11165                 *  2   scaled step is no more than EpsX.
11166                 *  4   scaled gradient norm is no more than EpsG.
11167                 *  5   MaxIts steps was taken
11168                 *  8   terminated by user who called minbcrequesttermination().
11169                        X contains point which was "current accepted"  when
11170                        termination request was submitted.
11171                 More information about fields of this  structure  can  be
11172                 found in the comments on MinBCReport datatype.
11173 
11174   -- ALGLIB --
11175      Copyright 28.11.2010 by Bochkanov Sergey
11176 *************************************************************************/
11177 void minbcresults(const minbcstate &state, real_1d_array &x, minbcreport &rep, const xparams _xparams = alglib::xdefault);
11178 
11179 
11180 /*************************************************************************
11181 BC results
11182 
11183 Buffered implementation of MinBCResults() which uses pre-allocated buffer
11184 to store X[]. If buffer size is  too  small,  it  resizes  buffer.  It  is
11185 intended to be used in the inner cycles of performance critical algorithms
11186 where array reallocation penalty is too large to be ignored.
11187 
11188   -- ALGLIB --
11189      Copyright 28.11.2010 by Bochkanov Sergey
11190 *************************************************************************/
11191 void minbcresultsbuf(const minbcstate &state, real_1d_array &x, minbcreport &rep, const xparams _xparams = alglib::xdefault);
11192 
11193 
11194 /*************************************************************************
11195 This subroutine restarts algorithm from new point.
11196 All optimization parameters (including constraints) are left unchanged.
11197 
11198 This  function  allows  to  solve multiple  optimization  problems  (which
11199 must have  same number of dimensions) without object reallocation penalty.
11200 
11201 INPUT PARAMETERS:
11202     State   -   structure previously allocated with MinBCCreate call.
11203     X       -   new starting point.
11204 
11205   -- ALGLIB --
11206      Copyright 28.11.2010 by Bochkanov Sergey
11207 *************************************************************************/
11208 void minbcrestartfrom(const minbcstate &state, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
11209 
11210 
11211 /*************************************************************************
11212 This subroutine submits request for termination of running  optimizer.  It
11213 should be called from user-supplied callback when user decides that it  is
11214 time to "smoothly" terminate optimization process.  As  result,  optimizer
11215 stops at point which was "current accepted" when termination  request  was
11216 submitted and returns error code 8 (successful termination).
11217 
11218 INPUT PARAMETERS:
11219     State   -   optimizer structure
11220 
11221 NOTE: after  request  for  termination  optimizer  may   perform   several
11222       additional calls to user-supplied callbacks. It does  NOT  guarantee
11223       to stop immediately - it just guarantees that these additional calls
11224       will be discarded later.
11225 
11226 NOTE: calling this function on optimizer which is NOT running will have no
11227       effect.
11228 
11229 NOTE: multiple calls to this function are possible. First call is counted,
11230       subsequent calls are silently ignored.
11231 
11232   -- ALGLIB --
11233      Copyright 08.10.2014 by Bochkanov Sergey
11234 *************************************************************************/
11235 void minbcrequesttermination(const minbcstate &state, const xparams _xparams = alglib::xdefault);
11236 #endif
11237 
11238 #if defined(AE_COMPILE_OPTS) || !defined(AE_PARTIAL_BUILD)
11239 /*************************************************************************
11240 This function serializes data structure to string.
11241 
11242 Important properties of s_out:
11243 * it contains alphanumeric characters, dots, underscores, minus signs
11244 * these symbols are grouped into words, which are separated by spaces
11245   and Windows-style (CR+LF) newlines
11246 * although  serializer  uses  spaces and CR+LF as separators, you can
11247   replace any separator character by arbitrary combination of spaces,
11248   tabs, Windows or Unix newlines. It allows flexible reformatting  of
11249   the  string  in  case you want to include it into text or XML file.
11250   But you should not insert separators into the middle of the "words"
11251   nor you should change case of letters.
11252 * s_out can be freely moved between 32-bit and 64-bit systems, little
11253   and big endian machines, and so on. You can serialize structure  on
11254   32-bit machine and unserialize it on 64-bit one (or vice versa), or
11255   serialize  it  on  SPARC  and  unserialize  on  x86.  You  can also
11256   serialize  it  in  C++ version of ALGLIB and unserialize in C# one,
11257   and vice versa.
11258 *************************************************************************/
11259 void lptestproblemserialize(lptestproblem &obj, std::string &s_out);
11260 
11261 
11262 /*************************************************************************
11263 This function unserializes data structure from string.
11264 *************************************************************************/
11265 void lptestproblemunserialize(const std::string &s_in, lptestproblem &obj);
11266 
11267 
11268 
11269 
11270 /*************************************************************************
11271 This function serializes data structure to C++ stream.
11272 
11273 Data stream generated by this function is same as  string  representation
11274 generated  by  string  version  of  serializer - alphanumeric characters,
11275 dots, underscores, minus signs, which are grouped into words separated by
11276 spaces and CR+LF.
11277 
11278 We recommend you to read comments on string version of serializer to find
11279 out more about serialization of AlGLIB objects.
11280 *************************************************************************/
11281 void lptestproblemserialize(lptestproblem &obj, std::ostream &s_out);
11282 
11283 
11284 /*************************************************************************
11285 This function unserializes data structure from stream.
11286 *************************************************************************/
11287 void lptestproblemunserialize(const std::istream &s_in, lptestproblem &obj);
11288 
11289 
11290 /*************************************************************************
11291 Initialize test LP problem.
11292 
11293 This function is intended for internal use by ALGLIB.
11294 
11295   -- ALGLIB --
11296      Copyright 20.07.2021 by Bochkanov Sergey
11297 *************************************************************************/
11298 void lptestproblemcreate(const ae_int_t n, const bool hasknowntarget, const double targetf, lptestproblem &p, const xparams _xparams = alglib::xdefault);
11299 
11300 
11301 /*************************************************************************
11302 Set scale for test LP problem
11303 
11304 This function is intended for internal use by ALGLIB.
11305 
11306   -- ALGLIB --
11307      Copyright 20.07.2021 by Bochkanov Sergey
11308 *************************************************************************/
11309 void lptestproblemsetscale(const lptestproblem &p, const real_1d_array &s, const xparams _xparams = alglib::xdefault);
11310 
11311 
11312 /*************************************************************************
11313 Set cost for test LP problem
11314 
11315 This function is intended for internal use by ALGLIB.
11316 
11317   -- ALGLIB --
11318      Copyright 20.07.2021 by Bochkanov Sergey
11319 *************************************************************************/
11320 void lptestproblemsetcost(const lptestproblem &p, const real_1d_array &c, const xparams _xparams = alglib::xdefault);
11321 
11322 
11323 /*************************************************************************
11324 Set box constraints for test LP problem
11325 
11326 This function is intended for internal use by ALGLIB.
11327 
11328   -- ALGLIB --
11329      Copyright 20.07.2021 by Bochkanov Sergey
11330 *************************************************************************/
11331 void lptestproblemsetbc(const lptestproblem &p, const real_1d_array &bndl, const real_1d_array &bndu, const xparams _xparams = alglib::xdefault);
11332 
11333 
11334 /*************************************************************************
11335 Set box constraints for test LP problem
11336 
11337 This function is intended for internal use by ALGLIB.
11338 
11339   -- ALGLIB --
11340      Copyright 20.07.2021 by Bochkanov Sergey
11341 *************************************************************************/
11342 void lptestproblemsetlc2(const lptestproblem &p, const sparsematrix &a, const real_1d_array &al, const real_1d_array &au, const ae_int_t m, const xparams _xparams = alglib::xdefault);
11343 
11344 
11345 /*************************************************************************
11346 This is internal function intended to  be  used  only  by  ALGLIB  itself.
11347 Although for technical reasons it is made publicly available (and has  its
11348 own manual entry), you should never call it.
11349 
11350   -- ALGLIB --
11351      Copyright 11.01.2011 by Bochkanov Sergey
11352 *************************************************************************/
11353 void xdbgminlpcreatefromtestproblem(const lptestproblem &p, minlpstate &state, const xparams _xparams = alglib::xdefault);
11354 #endif
11355 }
11356 
11357 /////////////////////////////////////////////////////////////////////////
11358 //
11359 // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (FUNCTIONS)
11360 //
11361 /////////////////////////////////////////////////////////////////////////
11362 namespace alglib_impl
11363 {
11364 #if defined(AE_COMPILE_OPTGUARDAPI) || !defined(AE_PARTIAL_BUILD)
11365 void optguardinitinternal(optguardreport* rep,
11366      ae_int_t n,
11367      ae_int_t k,
11368      ae_state *_state);
11369 void optguardexportreport(optguardreport* srcrep,
11370      ae_int_t n,
11371      ae_int_t k,
11372      ae_bool badgradhasxj,
11373      optguardreport* dstrep,
11374      ae_state *_state);
11375 void smoothnessmonitorexportc1test0report(optguardnonc1test0report* srcrep,
11376      /* Real    */ ae_vector* s,
11377      optguardnonc1test0report* dstrep,
11378      ae_state *_state);
11379 void smoothnessmonitorexportc1test1report(optguardnonc1test1report* srcrep,
11380      /* Real    */ ae_vector* s,
11381      optguardnonc1test1report* dstrep,
11382      ae_state *_state);
11383 ae_bool optguardallclear(optguardreport* rep, ae_state *_state);
11384 void _optguardreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
11385 void _optguardreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11386 void _optguardreport_clear(void* _p);
11387 void _optguardreport_destroy(void* _p);
11388 void _optguardnonc0report_init(void* _p, ae_state *_state, ae_bool make_automatic);
11389 void _optguardnonc0report_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11390 void _optguardnonc0report_clear(void* _p);
11391 void _optguardnonc0report_destroy(void* _p);
11392 void _optguardnonc1test0report_init(void* _p, ae_state *_state, ae_bool make_automatic);
11393 void _optguardnonc1test0report_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11394 void _optguardnonc1test0report_clear(void* _p);
11395 void _optguardnonc1test0report_destroy(void* _p);
11396 void _optguardnonc1test1report_init(void* _p, ae_state *_state, ae_bool make_automatic);
11397 void _optguardnonc1test1report_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11398 void _optguardnonc1test1report_clear(void* _p);
11399 void _optguardnonc1test1report_destroy(void* _p);
11400 #endif
11401 #if defined(AE_COMPILE_OPTSERV) || !defined(AE_PARTIAL_BUILD)
11402 void checkbcviolation(/* Boolean */ ae_vector* hasbndl,
11403      /* Real    */ ae_vector* bndl,
11404      /* Boolean */ ae_vector* hasbndu,
11405      /* Real    */ ae_vector* bndu,
11406      /* Real    */ ae_vector* x,
11407      ae_int_t n,
11408      /* Real    */ ae_vector* s,
11409      ae_bool nonunits,
11410      double* bcerr,
11411      ae_int_t* bcidx,
11412      ae_state *_state);
11413 void checklcviolation(/* Real    */ ae_matrix* cleic,
11414      /* Integer */ ae_vector* lcsrcidx,
11415      ae_int_t nec,
11416      ae_int_t nic,
11417      /* Real    */ ae_vector* x,
11418      ae_int_t n,
11419      double* lcerr,
11420      ae_int_t* lcidx,
11421      ae_state *_state);
11422 void checknlcviolation(/* Real    */ ae_vector* fi,
11423      ae_int_t ng,
11424      ae_int_t nh,
11425      double* nlcerr,
11426      ae_int_t* nlcidx,
11427      ae_state *_state);
11428 void unscaleandchecknlcviolation(/* Real    */ ae_vector* fi,
11429      /* Real    */ ae_vector* fscales,
11430      ae_int_t ng,
11431      ae_int_t nh,
11432      double* nlcerr,
11433      ae_int_t* nlcidx,
11434      ae_state *_state);
11435 void trimprepare(double f, double* threshold, ae_state *_state);
11436 void trimfunction(double* f,
11437      /* Real    */ ae_vector* g,
11438      ae_int_t n,
11439      double threshold,
11440      ae_state *_state);
11441 ae_bool enforceboundaryconstraints(/* Real    */ ae_vector* x,
11442      /* Real    */ ae_vector* bl,
11443      /* Boolean */ ae_vector* havebl,
11444      /* Real    */ ae_vector* bu,
11445      /* Boolean */ ae_vector* havebu,
11446      ae_int_t nmain,
11447      ae_int_t nslack,
11448      ae_state *_state);
11449 void projectgradientintobc(/* Real    */ ae_vector* x,
11450      /* Real    */ ae_vector* g,
11451      /* Real    */ ae_vector* bl,
11452      /* Boolean */ ae_vector* havebl,
11453      /* Real    */ ae_vector* bu,
11454      /* Boolean */ ae_vector* havebu,
11455      ae_int_t nmain,
11456      ae_int_t nslack,
11457      ae_state *_state);
11458 void calculatestepbound(/* Real    */ ae_vector* x,
11459      /* Real    */ ae_vector* d,
11460      double alpha,
11461      /* Real    */ ae_vector* bndl,
11462      /* Boolean */ ae_vector* havebndl,
11463      /* Real    */ ae_vector* bndu,
11464      /* Boolean */ ae_vector* havebndu,
11465      ae_int_t nmain,
11466      ae_int_t nslack,
11467      ae_int_t* variabletofreeze,
11468      double* valuetofreeze,
11469      double* maxsteplen,
11470      ae_state *_state);
11471 ae_int_t postprocessboundedstep(/* Real    */ ae_vector* x,
11472      /* Real    */ ae_vector* xprev,
11473      /* Real    */ ae_vector* bndl,
11474      /* Boolean */ ae_vector* havebndl,
11475      /* Real    */ ae_vector* bndu,
11476      /* Boolean */ ae_vector* havebndu,
11477      ae_int_t nmain,
11478      ae_int_t nslack,
11479      ae_int_t variabletofreeze,
11480      double valuetofreeze,
11481      double steptaken,
11482      double maxsteplen,
11483      ae_state *_state);
11484 void filterdirection(/* Real    */ ae_vector* d,
11485      /* Real    */ ae_vector* x,
11486      /* Real    */ ae_vector* bndl,
11487      /* Boolean */ ae_vector* havebndl,
11488      /* Real    */ ae_vector* bndu,
11489      /* Boolean */ ae_vector* havebndu,
11490      /* Real    */ ae_vector* s,
11491      ae_int_t nmain,
11492      ae_int_t nslack,
11493      double droptol,
11494      ae_state *_state);
11495 ae_int_t numberofchangedconstraints(/* Real    */ ae_vector* x,
11496      /* Real    */ ae_vector* xprev,
11497      /* Real    */ ae_vector* bndl,
11498      /* Boolean */ ae_vector* havebndl,
11499      /* Real    */ ae_vector* bndu,
11500      /* Boolean */ ae_vector* havebndu,
11501      ae_int_t nmain,
11502      ae_int_t nslack,
11503      ae_state *_state);
11504 ae_bool findfeasiblepoint(/* Real    */ ae_vector* x,
11505      /* Real    */ ae_vector* bndl,
11506      /* Boolean */ ae_vector* havebndl,
11507      /* Real    */ ae_vector* bndu,
11508      /* Boolean */ ae_vector* havebndu,
11509      ae_int_t nmain,
11510      ae_int_t nslack,
11511      /* Real    */ ae_matrix* ce,
11512      ae_int_t k,
11513      double epsi,
11514      ae_int_t* qpits,
11515      ae_int_t* gpaits,
11516      ae_state *_state);
11517 ae_bool derivativecheck(double f0,
11518      double df0,
11519      double f1,
11520      double df1,
11521      double f,
11522      double df,
11523      double width,
11524      ae_state *_state);
11525 void estimateparabolicmodel(double absasum,
11526      double absasum2,
11527      double mx,
11528      double mb,
11529      double md,
11530      double d1,
11531      double d2,
11532      ae_int_t* d1est,
11533      ae_int_t* d2est,
11534      ae_state *_state);
11535 void inexactlbfgspreconditioner(/* Real    */ ae_vector* s,
11536      ae_int_t n,
11537      /* Real    */ ae_vector* d,
11538      /* Real    */ ae_vector* c,
11539      /* Real    */ ae_matrix* w,
11540      ae_int_t k,
11541      precbuflbfgs* buf,
11542      ae_state *_state);
11543 void preparelowrankpreconditioner(/* Real    */ ae_vector* d,
11544      /* Real    */ ae_vector* c,
11545      /* Real    */ ae_matrix* w,
11546      ae_int_t n,
11547      ae_int_t k,
11548      precbuflowrank* buf,
11549      ae_state *_state);
11550 void applylowrankpreconditioner(/* Real    */ ae_vector* s,
11551      precbuflowrank* buf,
11552      ae_state *_state);
11553 void smoothnessmonitorinit(smoothnessmonitor* monitor,
11554      /* Real    */ ae_vector* s,
11555      ae_int_t n,
11556      ae_int_t k,
11557      ae_bool checksmoothness,
11558      ae_state *_state);
11559 void smoothnessmonitorstartlinesearch(smoothnessmonitor* monitor,
11560      /* Real    */ ae_vector* x,
11561      /* Real    */ ae_vector* fi,
11562      /* Real    */ ae_matrix* jac,
11563      ae_state *_state);
11564 void smoothnessmonitorstartlinesearch1u(smoothnessmonitor* monitor,
11565      /* Real    */ ae_vector* s,
11566      /* Real    */ ae_vector* invs,
11567      /* Real    */ ae_vector* x,
11568      double f0,
11569      /* Real    */ ae_vector* j0,
11570      ae_state *_state);
11571 void smoothnessmonitorenqueuepoint(smoothnessmonitor* monitor,
11572      /* Real    */ ae_vector* d,
11573      double stp,
11574      /* Real    */ ae_vector* x,
11575      /* Real    */ ae_vector* fi,
11576      /* Real    */ ae_matrix* jac,
11577      ae_state *_state);
11578 void smoothnessmonitorenqueuepoint1u(smoothnessmonitor* monitor,
11579      /* Real    */ ae_vector* s,
11580      /* Real    */ ae_vector* invs,
11581      /* Real    */ ae_vector* d,
11582      double stp,
11583      /* Real    */ ae_vector* x,
11584      double f0,
11585      /* Real    */ ae_vector* j0,
11586      ae_state *_state);
11587 void smoothnessmonitorfinalizelinesearch(smoothnessmonitor* monitor,
11588      ae_state *_state);
11589 void smoothnessmonitorstartprobing(smoothnessmonitor* monitor,
11590      double stpmax,
11591      ae_int_t nvalues,
11592      double stepscale,
11593      ae_state *_state);
11594 ae_bool smoothnessmonitorprobe(smoothnessmonitor* monitor,
11595      ae_state *_state);
11596 void smoothnessmonitortraceprobingresults(smoothnessmonitor* monitor,
11597      ae_state *_state);
11598 void smoothnessmonitortracestatus(smoothnessmonitor* monitor,
11599      ae_bool callersuggeststrace,
11600      ae_state *_state);
11601 void smoothnessmonitorexportreport(smoothnessmonitor* monitor,
11602      optguardreport* rep,
11603      ae_state *_state);
11604 ae_bool smoothnessmonitorcheckgradientatx0(smoothnessmonitor* monitor,
11605      /* Real    */ ae_vector* unscaledx0,
11606      /* Real    */ ae_vector* s,
11607      /* Real    */ ae_vector* bndl,
11608      /* Real    */ ae_vector* bndu,
11609      ae_bool hasboxconstraints,
11610      double teststep,
11611      ae_state *_state);
11612 void _precbuflbfgs_init(void* _p, ae_state *_state, ae_bool make_automatic);
11613 void _precbuflbfgs_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11614 void _precbuflbfgs_clear(void* _p);
11615 void _precbuflbfgs_destroy(void* _p);
11616 void _precbuflowrank_init(void* _p, ae_state *_state, ae_bool make_automatic);
11617 void _precbuflowrank_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11618 void _precbuflowrank_clear(void* _p);
11619 void _precbuflowrank_destroy(void* _p);
11620 void _smoothnessmonitor_init(void* _p, ae_state *_state, ae_bool make_automatic);
11621 void _smoothnessmonitor_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11622 void _smoothnessmonitor_clear(void* _p);
11623 void _smoothnessmonitor_destroy(void* _p);
11624 #endif
11625 #if defined(AE_COMPILE_MINLBFGS) || !defined(AE_PARTIAL_BUILD)
11626 void minlbfgscreate(ae_int_t n,
11627      ae_int_t m,
11628      /* Real    */ ae_vector* x,
11629      minlbfgsstate* state,
11630      ae_state *_state);
11631 void minlbfgscreatef(ae_int_t n,
11632      ae_int_t m,
11633      /* Real    */ ae_vector* x,
11634      double diffstep,
11635      minlbfgsstate* state,
11636      ae_state *_state);
11637 void minlbfgssetcond(minlbfgsstate* state,
11638      double epsg,
11639      double epsf,
11640      double epsx,
11641      ae_int_t maxits,
11642      ae_state *_state);
11643 void minlbfgssetxrep(minlbfgsstate* state,
11644      ae_bool needxrep,
11645      ae_state *_state);
11646 void minlbfgssetstpmax(minlbfgsstate* state,
11647      double stpmax,
11648      ae_state *_state);
11649 void minlbfgssetscale(minlbfgsstate* state,
11650      /* Real    */ ae_vector* s,
11651      ae_state *_state);
11652 void minlbfgscreatex(ae_int_t n,
11653      ae_int_t m,
11654      /* Real    */ ae_vector* x,
11655      ae_int_t flags,
11656      double diffstep,
11657      minlbfgsstate* state,
11658      ae_state *_state);
11659 void minlbfgssetprecdefault(minlbfgsstate* state, ae_state *_state);
11660 void minlbfgssetpreccholesky(minlbfgsstate* state,
11661      /* Real    */ ae_matrix* p,
11662      ae_bool isupper,
11663      ae_state *_state);
11664 void minlbfgssetprecdiag(minlbfgsstate* state,
11665      /* Real    */ ae_vector* d,
11666      ae_state *_state);
11667 void minlbfgssetprecscale(minlbfgsstate* state, ae_state *_state);
11668 void minlbfgssetprecrankklbfgsfast(minlbfgsstate* state,
11669      /* Real    */ ae_vector* d,
11670      /* Real    */ ae_vector* c,
11671      /* Real    */ ae_matrix* w,
11672      ae_int_t cnt,
11673      ae_state *_state);
11674 void minlbfgssetpreclowrankexact(minlbfgsstate* state,
11675      /* Real    */ ae_vector* d,
11676      /* Real    */ ae_vector* c,
11677      /* Real    */ ae_matrix* w,
11678      ae_int_t cnt,
11679      ae_state *_state);
11680 ae_bool minlbfgsiteration(minlbfgsstate* state, ae_state *_state);
11681 void minlbfgsoptguardgradient(minlbfgsstate* state,
11682      double teststep,
11683      ae_state *_state);
11684 void minlbfgsoptguardsmoothness(minlbfgsstate* state,
11685      ae_int_t level,
11686      ae_state *_state);
11687 void minlbfgsoptguardresults(minlbfgsstate* state,
11688      optguardreport* rep,
11689      ae_state *_state);
11690 void minlbfgsoptguardnonc1test0results(minlbfgsstate* state,
11691      optguardnonc1test0report* strrep,
11692      optguardnonc1test0report* lngrep,
11693      ae_state *_state);
11694 void minlbfgsoptguardnonc1test1results(minlbfgsstate* state,
11695      optguardnonc1test1report* strrep,
11696      optguardnonc1test1report* lngrep,
11697      ae_state *_state);
11698 void minlbfgsresults(minlbfgsstate* state,
11699      /* Real    */ ae_vector* x,
11700      minlbfgsreport* rep,
11701      ae_state *_state);
11702 void minlbfgsresultsbuf(minlbfgsstate* state,
11703      /* Real    */ ae_vector* x,
11704      minlbfgsreport* rep,
11705      ae_state *_state);
11706 void minlbfgsrestartfrom(minlbfgsstate* state,
11707      /* Real    */ ae_vector* x,
11708      ae_state *_state);
11709 void minlbfgsrequesttermination(minlbfgsstate* state, ae_state *_state);
11710 void _minlbfgsstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
11711 void _minlbfgsstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11712 void _minlbfgsstate_clear(void* _p);
11713 void _minlbfgsstate_destroy(void* _p);
11714 void _minlbfgsreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
11715 void _minlbfgsreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11716 void _minlbfgsreport_clear(void* _p);
11717 void _minlbfgsreport_destroy(void* _p);
11718 #endif
11719 #if defined(AE_COMPILE_CQMODELS) || !defined(AE_PARTIAL_BUILD)
11720 void cqminit(ae_int_t n, convexquadraticmodel* s, ae_state *_state);
11721 void cqmseta(convexquadraticmodel* s,
11722      /* Real    */ ae_matrix* a,
11723      ae_bool isupper,
11724      double alpha,
11725      ae_state *_state);
11726 void cqmgeta(convexquadraticmodel* s,
11727      /* Real    */ ae_matrix* a,
11728      ae_state *_state);
11729 void cqmrewritedensediagonal(convexquadraticmodel* s,
11730      /* Real    */ ae_vector* z,
11731      ae_state *_state);
11732 void cqmsetd(convexquadraticmodel* s,
11733      /* Real    */ ae_vector* d,
11734      double tau,
11735      ae_state *_state);
11736 void cqmdropa(convexquadraticmodel* s, ae_state *_state);
11737 void cqmsetb(convexquadraticmodel* s,
11738      /* Real    */ ae_vector* b,
11739      ae_state *_state);
11740 void cqmsetq(convexquadraticmodel* s,
11741      /* Real    */ ae_matrix* q,
11742      /* Real    */ ae_vector* r,
11743      ae_int_t k,
11744      double theta,
11745      ae_state *_state);
11746 void cqmsetactiveset(convexquadraticmodel* s,
11747      /* Real    */ ae_vector* x,
11748      /* Boolean */ ae_vector* activeset,
11749      ae_state *_state);
11750 double cqmeval(convexquadraticmodel* s,
11751      /* Real    */ ae_vector* x,
11752      ae_state *_state);
11753 void cqmevalx(convexquadraticmodel* s,
11754      /* Real    */ ae_vector* x,
11755      double* r,
11756      double* noise,
11757      ae_state *_state);
11758 void cqmgradunconstrained(convexquadraticmodel* s,
11759      /* Real    */ ae_vector* x,
11760      /* Real    */ ae_vector* g,
11761      ae_state *_state);
11762 double cqmxtadx2(convexquadraticmodel* s,
11763      /* Real    */ ae_vector* x,
11764      /* Real    */ ae_vector* tmp,
11765      ae_state *_state);
11766 void cqmadx(convexquadraticmodel* s,
11767      /* Real    */ ae_vector* x,
11768      /* Real    */ ae_vector* y,
11769      ae_state *_state);
11770 ae_bool cqmconstrainedoptimum(convexquadraticmodel* s,
11771      /* Real    */ ae_vector* x,
11772      ae_state *_state);
11773 void cqmscalevector(convexquadraticmodel* s,
11774      /* Real    */ ae_vector* x,
11775      ae_state *_state);
11776 void cqmgetdiaga(convexquadraticmodel* s,
11777      /* Real    */ ae_vector* x,
11778      ae_state *_state);
11779 double cqmdebugconstrainedevalt(convexquadraticmodel* s,
11780      /* Real    */ ae_vector* x,
11781      ae_state *_state);
11782 double cqmdebugconstrainedevale(convexquadraticmodel* s,
11783      /* Real    */ ae_vector* x,
11784      ae_state *_state);
11785 void _convexquadraticmodel_init(void* _p, ae_state *_state, ae_bool make_automatic);
11786 void _convexquadraticmodel_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11787 void _convexquadraticmodel_clear(void* _p);
11788 void _convexquadraticmodel_destroy(void* _p);
11789 #endif
11790 #if defined(AE_COMPILE_LPQPSERV) || !defined(AE_PARTIAL_BUILD)
11791 void scaleshiftbcinplace(/* Real    */ ae_vector* s,
11792      /* Real    */ ae_vector* xorigin,
11793      /* Real    */ ae_vector* bndl,
11794      /* Real    */ ae_vector* bndu,
11795      ae_int_t n,
11796      ae_state *_state);
11797 void scaleshiftdensebrlcinplace(/* Real    */ ae_vector* s,
11798      /* Real    */ ae_vector* xorigin,
11799      ae_int_t n,
11800      /* Real    */ ae_matrix* densea,
11801      /* Real    */ ae_vector* ab,
11802      /* Real    */ ae_vector* ar,
11803      ae_int_t m,
11804      ae_state *_state);
11805 void scaleshiftmixedbrlcinplace(/* Real    */ ae_vector* s,
11806      /* Real    */ ae_vector* xorigin,
11807      ae_int_t n,
11808      sparsematrix* sparsea,
11809      ae_int_t msparse,
11810      /* Real    */ ae_matrix* densea,
11811      ae_int_t mdense,
11812      /* Real    */ ae_vector* ab,
11813      /* Real    */ ae_vector* ar,
11814      ae_state *_state);
11815 void scaledenseqpinplace(/* Real    */ ae_matrix* densea,
11816      ae_bool isupper,
11817      ae_int_t nmain,
11818      /* Real    */ ae_vector* denseb,
11819      ae_int_t ntotal,
11820      /* Real    */ ae_vector* s,
11821      ae_state *_state);
11822 void scalesparseqpinplace(/* Real    */ ae_vector* s,
11823      ae_int_t n,
11824      sparsematrix* sparsea,
11825      /* Real    */ ae_vector* denseb,
11826      ae_state *_state);
11827 void normalizedensebrlcinplace(/* Real    */ ae_matrix* densea,
11828      /* Real    */ ae_vector* ab,
11829      /* Real    */ ae_vector* ar,
11830      ae_int_t n,
11831      ae_int_t m,
11832      /* Real    */ ae_vector* rownorms,
11833      ae_bool neednorms,
11834      ae_state *_state);
11835 void normalizemixedbrlcinplace(sparsematrix* sparsea,
11836      ae_int_t msparse,
11837      /* Real    */ ae_matrix* densea,
11838      ae_int_t mdense,
11839      /* Real    */ ae_vector* ab,
11840      /* Real    */ ae_vector* ar,
11841      ae_int_t n,
11842      ae_bool limitedamplification,
11843      /* Real    */ ae_vector* rownorms,
11844      ae_bool neednorms,
11845      ae_state *_state);
11846 double normalizedenseqpinplace(/* Real    */ ae_matrix* densea,
11847      ae_bool isupper,
11848      ae_int_t nmain,
11849      /* Real    */ ae_vector* denseb,
11850      ae_int_t ntotal,
11851      ae_state *_state);
11852 double normalizesparseqpinplace(sparsematrix* sparsea,
11853      ae_bool isupper,
11854      /* Real    */ ae_vector* denseb,
11855      ae_int_t n,
11856      ae_state *_state);
11857 void unscaleunshiftpointbc(/* Real    */ ae_vector* s,
11858      /* Real    */ ae_vector* xorigin,
11859      /* Real    */ ae_vector* rawbndl,
11860      /* Real    */ ae_vector* rawbndu,
11861      /* Real    */ ae_vector* sclsftbndl,
11862      /* Real    */ ae_vector* sclsftbndu,
11863      /* Boolean */ ae_vector* hasbndl,
11864      /* Boolean */ ae_vector* hasbndu,
11865      /* Real    */ ae_vector* x,
11866      ae_int_t n,
11867      ae_state *_state);
11868 #endif
11869 #if defined(AE_COMPILE_SNNLS) || !defined(AE_PARTIAL_BUILD)
11870 void snnlsinit(ae_int_t nsmax,
11871      ae_int_t ndmax,
11872      ae_int_t nrmax,
11873      snnlssolver* s,
11874      ae_state *_state);
11875 void snnlssetproblem(snnlssolver* s,
11876      /* Real    */ ae_matrix* a,
11877      /* Real    */ ae_vector* b,
11878      ae_int_t ns,
11879      ae_int_t nd,
11880      ae_int_t nr,
11881      ae_state *_state);
11882 void snnlsdropnnc(snnlssolver* s, ae_int_t idx, ae_state *_state);
11883 void snnlssolve(snnlssolver* s,
11884      /* Real    */ ae_vector* x,
11885      ae_state *_state);
11886 void _snnlssolver_init(void* _p, ae_state *_state, ae_bool make_automatic);
11887 void _snnlssolver_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11888 void _snnlssolver_clear(void* _p);
11889 void _snnlssolver_destroy(void* _p);
11890 #endif
11891 #if defined(AE_COMPILE_SACTIVESETS) || !defined(AE_PARTIAL_BUILD)
11892 void sasinit(ae_int_t n, sactiveset* s, ae_state *_state);
11893 void sassetscale(sactiveset* state,
11894      /* Real    */ ae_vector* s,
11895      ae_state *_state);
11896 void sassetprecdiag(sactiveset* state,
11897      /* Real    */ ae_vector* d,
11898      ae_state *_state);
11899 void sassetbc(sactiveset* state,
11900      /* Real    */ ae_vector* bndl,
11901      /* Real    */ ae_vector* bndu,
11902      ae_state *_state);
11903 void sassetlc(sactiveset* state,
11904      /* Real    */ ae_matrix* c,
11905      /* Integer */ ae_vector* ct,
11906      ae_int_t k,
11907      ae_state *_state);
11908 void sassetlcx(sactiveset* state,
11909      /* Real    */ ae_matrix* cleic,
11910      ae_int_t nec,
11911      ae_int_t nic,
11912      ae_state *_state);
11913 ae_bool sasstartoptimization(sactiveset* state,
11914      /* Real    */ ae_vector* x,
11915      ae_state *_state);
11916 void sasexploredirection(sactiveset* state,
11917      /* Real    */ ae_vector* d,
11918      double* stpmax,
11919      ae_int_t* cidx,
11920      double* vval,
11921      ae_state *_state);
11922 ae_int_t sasmoveto(sactiveset* state,
11923      /* Real    */ ae_vector* xn,
11924      ae_bool needact,
11925      ae_int_t cidx,
11926      double cval,
11927      ae_state *_state);
11928 void sasimmediateactivation(sactiveset* state,
11929      ae_int_t cidx,
11930      double cval,
11931      ae_state *_state);
11932 void sasconstraineddescent(sactiveset* state,
11933      /* Real    */ ae_vector* g,
11934      /* Real    */ ae_vector* d,
11935      ae_state *_state);
11936 void sasconstraineddescentprec(sactiveset* state,
11937      /* Real    */ ae_vector* g,
11938      /* Real    */ ae_vector* d,
11939      ae_state *_state);
11940 void sasconstraineddirection(sactiveset* state,
11941      /* Real    */ ae_vector* d,
11942      ae_state *_state);
11943 void sasconstraineddirectionprec(sactiveset* state,
11944      /* Real    */ ae_vector* d,
11945      ae_state *_state);
11946 void sascorrection(sactiveset* state,
11947      /* Real    */ ae_vector* x,
11948      double* penalty,
11949      ae_state *_state);
11950 double sasactivelcpenalty1(sactiveset* state,
11951      /* Real    */ ae_vector* x,
11952      ae_state *_state);
11953 double sasscaledconstrainednorm(sactiveset* state,
11954      /* Real    */ ae_vector* d,
11955      ae_state *_state);
11956 void sasstopoptimization(sactiveset* state, ae_state *_state);
11957 void sasreactivateconstraints(sactiveset* state,
11958      /* Real    */ ae_vector* gc,
11959      ae_state *_state);
11960 void sasreactivateconstraintsprec(sactiveset* state,
11961      /* Real    */ ae_vector* gc,
11962      ae_state *_state);
11963 void sasrebuildbasis(sactiveset* state, ae_state *_state);
11964 void sasappendtobasis(sactiveset* state,
11965      /* Boolean */ ae_vector* newentries,
11966      ae_state *_state);
11967 void _sactiveset_init(void* _p, ae_state *_state, ae_bool make_automatic);
11968 void _sactiveset_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11969 void _sactiveset_clear(void* _p);
11970 void _sactiveset_destroy(void* _p);
11971 #endif
11972 #if defined(AE_COMPILE_QQPSOLVER) || !defined(AE_PARTIAL_BUILD)
11973 void qqploaddefaults(ae_int_t n, qqpsettings* s, ae_state *_state);
11974 void qqpcopysettings(qqpsettings* src, qqpsettings* dst, ae_state *_state);
11975 void qqppreallocategrowdense(qqpbuffers* sstate,
11976      ae_int_t nexpected,
11977      ae_int_t ngrowto,
11978      ae_state *_state);
11979 void qqpoptimize(convexquadraticmodel* cqmac,
11980      sparsematrix* sparseac,
11981      /* Real    */ ae_matrix* denseac,
11982      ae_int_t akind,
11983      ae_bool isupper,
11984      /* Real    */ ae_vector* bc,
11985      /* Real    */ ae_vector* bndlc,
11986      /* Real    */ ae_vector* bnduc,
11987      /* Real    */ ae_vector* sc,
11988      /* Real    */ ae_vector* xoriginc,
11989      ae_int_t nc,
11990      qqpsettings* settings,
11991      qqpbuffers* sstate,
11992      /* Real    */ ae_vector* xs,
11993      ae_int_t* terminationtype,
11994      ae_state *_state);
11995 void _qqpsettings_init(void* _p, ae_state *_state, ae_bool make_automatic);
11996 void _qqpsettings_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
11997 void _qqpsettings_clear(void* _p);
11998 void _qqpsettings_destroy(void* _p);
11999 void _qqpbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic);
12000 void _qqpbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12001 void _qqpbuffers_clear(void* _p);
12002 void _qqpbuffers_destroy(void* _p);
12003 #endif
12004 #if defined(AE_COMPILE_QPDENSEAULSOLVER) || !defined(AE_PARTIAL_BUILD)
12005 void qpdenseaulloaddefaults(ae_int_t nmain,
12006      qpdenseaulsettings* s,
12007      ae_state *_state);
12008 void qpdenseauloptimize(convexquadraticmodel* a,
12009      sparsematrix* sparsea,
12010      ae_int_t akind,
12011      ae_bool sparseaupper,
12012      /* Real    */ ae_vector* b,
12013      /* Real    */ ae_vector* bndl,
12014      /* Real    */ ae_vector* bndu,
12015      /* Real    */ ae_vector* s,
12016      /* Real    */ ae_vector* xorigin,
12017      ae_int_t nn,
12018      /* Real    */ ae_matrix* cleic,
12019      ae_int_t dnec,
12020      ae_int_t dnic,
12021      sparsematrix* scleic,
12022      ae_int_t snec,
12023      ae_int_t snic,
12024      ae_bool renormlc,
12025      qpdenseaulsettings* settings,
12026      qpdenseaulbuffers* state,
12027      /* Real    */ ae_vector* xs,
12028      /* Real    */ ae_vector* lagbc,
12029      /* Real    */ ae_vector* laglc,
12030      ae_int_t* terminationtype,
12031      ae_state *_state);
12032 void _qpdenseaulsettings_init(void* _p, ae_state *_state, ae_bool make_automatic);
12033 void _qpdenseaulsettings_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12034 void _qpdenseaulsettings_clear(void* _p);
12035 void _qpdenseaulsettings_destroy(void* _p);
12036 void _qpdenseaulbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic);
12037 void _qpdenseaulbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12038 void _qpdenseaulbuffers_clear(void* _p);
12039 void _qpdenseaulbuffers_destroy(void* _p);
12040 #endif
12041 #if defined(AE_COMPILE_MINBLEIC) || !defined(AE_PARTIAL_BUILD)
12042 void minbleiccreate(ae_int_t n,
12043      /* Real    */ ae_vector* x,
12044      minbleicstate* state,
12045      ae_state *_state);
12046 void minbleiccreatef(ae_int_t n,
12047      /* Real    */ ae_vector* x,
12048      double diffstep,
12049      minbleicstate* state,
12050      ae_state *_state);
12051 void minbleicsetbc(minbleicstate* state,
12052      /* Real    */ ae_vector* bndl,
12053      /* Real    */ ae_vector* bndu,
12054      ae_state *_state);
12055 void minbleicsetlc(minbleicstate* state,
12056      /* Real    */ ae_matrix* c,
12057      /* Integer */ ae_vector* ct,
12058      ae_int_t k,
12059      ae_state *_state);
12060 void minbleicsetcond(minbleicstate* state,
12061      double epsg,
12062      double epsf,
12063      double epsx,
12064      ae_int_t maxits,
12065      ae_state *_state);
12066 void minbleicsetscale(minbleicstate* state,
12067      /* Real    */ ae_vector* s,
12068      ae_state *_state);
12069 void minbleicsetprecdefault(minbleicstate* state, ae_state *_state);
12070 void minbleicsetprecdiag(minbleicstate* state,
12071      /* Real    */ ae_vector* d,
12072      ae_state *_state);
12073 void minbleicsetprecscale(minbleicstate* state, ae_state *_state);
12074 void minbleicsetxrep(minbleicstate* state,
12075      ae_bool needxrep,
12076      ae_state *_state);
12077 void minbleicsetdrep(minbleicstate* state,
12078      ae_bool needdrep,
12079      ae_state *_state);
12080 void minbleicsetstpmax(minbleicstate* state,
12081      double stpmax,
12082      ae_state *_state);
12083 ae_bool minbleiciteration(minbleicstate* state, ae_state *_state);
12084 void minbleicoptguardgradient(minbleicstate* state,
12085      double teststep,
12086      ae_state *_state);
12087 void minbleicoptguardsmoothness(minbleicstate* state,
12088      ae_int_t level,
12089      ae_state *_state);
12090 void minbleicoptguardresults(minbleicstate* state,
12091      optguardreport* rep,
12092      ae_state *_state);
12093 void minbleicoptguardnonc1test0results(minbleicstate* state,
12094      optguardnonc1test0report* strrep,
12095      optguardnonc1test0report* lngrep,
12096      ae_state *_state);
12097 void minbleicoptguardnonc1test1results(minbleicstate* state,
12098      optguardnonc1test1report* strrep,
12099      optguardnonc1test1report* lngrep,
12100      ae_state *_state);
12101 void minbleicresults(minbleicstate* state,
12102      /* Real    */ ae_vector* x,
12103      minbleicreport* rep,
12104      ae_state *_state);
12105 void minbleicresultsbuf(minbleicstate* state,
12106      /* Real    */ ae_vector* x,
12107      minbleicreport* rep,
12108      ae_state *_state);
12109 void minbleicrestartfrom(minbleicstate* state,
12110      /* Real    */ ae_vector* x,
12111      ae_state *_state);
12112 void minbleicrequesttermination(minbleicstate* state, ae_state *_state);
12113 void minbleicemergencytermination(minbleicstate* state, ae_state *_state);
12114 void _minbleicstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
12115 void _minbleicstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12116 void _minbleicstate_clear(void* _p);
12117 void _minbleicstate_destroy(void* _p);
12118 void _minbleicreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
12119 void _minbleicreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12120 void _minbleicreport_clear(void* _p);
12121 void _minbleicreport_destroy(void* _p);
12122 #endif
12123 #if defined(AE_COMPILE_QPBLEICSOLVER) || !defined(AE_PARTIAL_BUILD)
12124 void qpbleicloaddefaults(ae_int_t nmain,
12125      qpbleicsettings* s,
12126      ae_state *_state);
12127 void qpbleiccopysettings(qpbleicsettings* src,
12128      qpbleicsettings* dst,
12129      ae_state *_state);
12130 void qpbleicoptimize(convexquadraticmodel* a,
12131      sparsematrix* sparsea,
12132      ae_int_t akind,
12133      ae_bool sparseaupper,
12134      double absasum,
12135      double absasum2,
12136      /* Real    */ ae_vector* b,
12137      /* Real    */ ae_vector* bndl,
12138      /* Real    */ ae_vector* bndu,
12139      /* Real    */ ae_vector* s,
12140      /* Real    */ ae_vector* xorigin,
12141      ae_int_t n,
12142      /* Real    */ ae_matrix* cleic,
12143      ae_int_t nec,
12144      ae_int_t nic,
12145      qpbleicsettings* settings,
12146      qpbleicbuffers* sstate,
12147      ae_bool* firstcall,
12148      /* Real    */ ae_vector* xs,
12149      ae_int_t* terminationtype,
12150      ae_state *_state);
12151 void _qpbleicsettings_init(void* _p, ae_state *_state, ae_bool make_automatic);
12152 void _qpbleicsettings_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12153 void _qpbleicsettings_clear(void* _p);
12154 void _qpbleicsettings_destroy(void* _p);
12155 void _qpbleicbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic);
12156 void _qpbleicbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12157 void _qpbleicbuffers_clear(void* _p);
12158 void _qpbleicbuffers_destroy(void* _p);
12159 #endif
12160 #if defined(AE_COMPILE_VIPMSOLVER) || !defined(AE_PARTIAL_BUILD)
12161 void vipminitdense(vipmstate* state,
12162      /* Real    */ ae_vector* s,
12163      /* Real    */ ae_vector* xorigin,
12164      ae_int_t n,
12165      ae_state *_state);
12166 void vipminitdensewithslacks(vipmstate* state,
12167      /* Real    */ ae_vector* s,
12168      /* Real    */ ae_vector* xorigin,
12169      ae_int_t nmain,
12170      ae_int_t n,
12171      ae_state *_state);
12172 void vipminitsparse(vipmstate* state,
12173      /* Real    */ ae_vector* s,
12174      /* Real    */ ae_vector* xorigin,
12175      ae_int_t n,
12176      ae_state *_state);
12177 void vipmsetquadraticlinear(vipmstate* state,
12178      /* Real    */ ae_matrix* denseh,
12179      sparsematrix* sparseh,
12180      ae_int_t hkind,
12181      ae_bool isupper,
12182      /* Real    */ ae_vector* c,
12183      ae_state *_state);
12184 void vipmsetconstraints(vipmstate* state,
12185      /* Real    */ ae_vector* bndl,
12186      /* Real    */ ae_vector* bndu,
12187      sparsematrix* sparsea,
12188      ae_int_t msparse,
12189      /* Real    */ ae_matrix* densea,
12190      ae_int_t mdense,
12191      /* Real    */ ae_vector* cl,
12192      /* Real    */ ae_vector* cu,
12193      ae_state *_state);
12194 void vipmsetcond(vipmstate* state,
12195      double epsp,
12196      double epsd,
12197      double epsgap,
12198      ae_state *_state);
12199 void vipmoptimize(vipmstate* state,
12200      ae_bool dropbigbounds,
12201      /* Real    */ ae_vector* xs,
12202      /* Real    */ ae_vector* lagbc,
12203      /* Real    */ ae_vector* laglc,
12204      ae_int_t* terminationtype,
12205      ae_state *_state);
12206 void _vipmvars_init(void* _p, ae_state *_state, ae_bool make_automatic);
12207 void _vipmvars_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12208 void _vipmvars_clear(void* _p);
12209 void _vipmvars_destroy(void* _p);
12210 void _vipmrighthandside_init(void* _p, ae_state *_state, ae_bool make_automatic);
12211 void _vipmrighthandside_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12212 void _vipmrighthandside_clear(void* _p);
12213 void _vipmrighthandside_destroy(void* _p);
12214 void _vipmstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
12215 void _vipmstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12216 void _vipmstate_clear(void* _p);
12217 void _vipmstate_destroy(void* _p);
12218 #endif
12219 #if defined(AE_COMPILE_MINQP) || !defined(AE_PARTIAL_BUILD)
12220 void minqpcreate(ae_int_t n, minqpstate* state, ae_state *_state);
12221 void minqpsetlinearterm(minqpstate* state,
12222      /* Real    */ ae_vector* b,
12223      ae_state *_state);
12224 void minqpsetquadraticterm(minqpstate* state,
12225      /* Real    */ ae_matrix* a,
12226      ae_bool isupper,
12227      ae_state *_state);
12228 void minqpsetquadratictermsparse(minqpstate* state,
12229      sparsematrix* a,
12230      ae_bool isupper,
12231      ae_state *_state);
12232 void minqpsetstartingpoint(minqpstate* state,
12233      /* Real    */ ae_vector* x,
12234      ae_state *_state);
12235 void minqpsetorigin(minqpstate* state,
12236      /* Real    */ ae_vector* xorigin,
12237      ae_state *_state);
12238 void minqpsetscale(minqpstate* state,
12239      /* Real    */ ae_vector* s,
12240      ae_state *_state);
12241 void minqpsetscaleautodiag(minqpstate* state, ae_state *_state);
12242 void minqpsetalgobleic(minqpstate* state,
12243      double epsg,
12244      double epsf,
12245      double epsx,
12246      ae_int_t maxits,
12247      ae_state *_state);
12248 void minqpsetalgodenseaul(minqpstate* state,
12249      double epsx,
12250      double rho,
12251      ae_int_t itscnt,
12252      ae_state *_state);
12253 void minqpsetalgodenseipm(minqpstate* state, double eps, ae_state *_state);
12254 void minqpsetalgosparseipm(minqpstate* state,
12255      double eps,
12256      ae_state *_state);
12257 void minqpsetalgoquickqp(minqpstate* state,
12258      double epsg,
12259      double epsf,
12260      double epsx,
12261      ae_int_t maxouterits,
12262      ae_bool usenewton,
12263      ae_state *_state);
12264 void minqpsetbc(minqpstate* state,
12265      /* Real    */ ae_vector* bndl,
12266      /* Real    */ ae_vector* bndu,
12267      ae_state *_state);
12268 void minqpsetbcall(minqpstate* state,
12269      double bndl,
12270      double bndu,
12271      ae_state *_state);
12272 void minqpsetbci(minqpstate* state,
12273      ae_int_t i,
12274      double bndl,
12275      double bndu,
12276      ae_state *_state);
12277 void minqpsetlc(minqpstate* state,
12278      /* Real    */ ae_matrix* c,
12279      /* Integer */ ae_vector* ct,
12280      ae_int_t k,
12281      ae_state *_state);
12282 void minqpsetlcsparse(minqpstate* state,
12283      sparsematrix* c,
12284      /* Integer */ ae_vector* ct,
12285      ae_int_t k,
12286      ae_state *_state);
12287 void minqpsetlcmixed(minqpstate* state,
12288      sparsematrix* sparsec,
12289      /* Integer */ ae_vector* sparsect,
12290      ae_int_t sparsek,
12291      /* Real    */ ae_matrix* densec,
12292      /* Integer */ ae_vector* densect,
12293      ae_int_t densek,
12294      ae_state *_state);
12295 void minqpsetlcmixedlegacy(minqpstate* state,
12296      /* Real    */ ae_matrix* densec,
12297      /* Integer */ ae_vector* densect,
12298      ae_int_t densek,
12299      sparsematrix* sparsec,
12300      /* Integer */ ae_vector* sparsect,
12301      ae_int_t sparsek,
12302      ae_state *_state);
12303 void minqpsetlc2dense(minqpstate* state,
12304      /* Real    */ ae_matrix* a,
12305      /* Real    */ ae_vector* al,
12306      /* Real    */ ae_vector* au,
12307      ae_int_t k,
12308      ae_state *_state);
12309 void minqpsetlc2(minqpstate* state,
12310      sparsematrix* a,
12311      /* Real    */ ae_vector* al,
12312      /* Real    */ ae_vector* au,
12313      ae_int_t k,
12314      ae_state *_state);
12315 void minqpsetlc2mixed(minqpstate* state,
12316      sparsematrix* sparsea,
12317      ae_int_t ksparse,
12318      /* Real    */ ae_matrix* densea,
12319      ae_int_t kdense,
12320      /* Real    */ ae_vector* al,
12321      /* Real    */ ae_vector* au,
12322      ae_state *_state);
12323 void minqpaddlc2dense(minqpstate* state,
12324      /* Real    */ ae_vector* a,
12325      double al,
12326      double au,
12327      ae_state *_state);
12328 void minqpaddlc2(minqpstate* state,
12329      /* Integer */ ae_vector* idxa,
12330      /* Real    */ ae_vector* vala,
12331      ae_int_t nnz,
12332      double al,
12333      double au,
12334      ae_state *_state);
12335 void minqpaddlc2sparsefromdense(minqpstate* state,
12336      /* Real    */ ae_vector* da,
12337      double al,
12338      double au,
12339      ae_state *_state);
12340 void minqpoptimize(minqpstate* state, ae_state *_state);
12341 void minqpresults(minqpstate* state,
12342      /* Real    */ ae_vector* x,
12343      minqpreport* rep,
12344      ae_state *_state);
12345 void minqpresultsbuf(minqpstate* state,
12346      /* Real    */ ae_vector* x,
12347      minqpreport* rep,
12348      ae_state *_state);
12349 void minqpsetlineartermfast(minqpstate* state,
12350      /* Real    */ ae_vector* b,
12351      ae_state *_state);
12352 void minqpsetquadratictermfast(minqpstate* state,
12353      /* Real    */ ae_matrix* a,
12354      ae_bool isupper,
12355      double s,
12356      ae_state *_state);
12357 void minqprewritediagonal(minqpstate* state,
12358      /* Real    */ ae_vector* s,
12359      ae_state *_state);
12360 void minqpsetstartingpointfast(minqpstate* state,
12361      /* Real    */ ae_vector* x,
12362      ae_state *_state);
12363 void minqpsetoriginfast(minqpstate* state,
12364      /* Real    */ ae_vector* xorigin,
12365      ae_state *_state);
12366 void _minqpstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
12367 void _minqpstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12368 void _minqpstate_clear(void* _p);
12369 void _minqpstate_destroy(void* _p);
12370 void _minqpreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
12371 void _minqpreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12372 void _minqpreport_clear(void* _p);
12373 void _minqpreport_destroy(void* _p);
12374 #endif
12375 #if defined(AE_COMPILE_MINLM) || !defined(AE_PARTIAL_BUILD)
12376 void minlmcreatevj(ae_int_t n,
12377      ae_int_t m,
12378      /* Real    */ ae_vector* x,
12379      minlmstate* state,
12380      ae_state *_state);
12381 void minlmcreatev(ae_int_t n,
12382      ae_int_t m,
12383      /* Real    */ ae_vector* x,
12384      double diffstep,
12385      minlmstate* state,
12386      ae_state *_state);
12387 void minlmcreatefgh(ae_int_t n,
12388      /* Real    */ ae_vector* x,
12389      minlmstate* state,
12390      ae_state *_state);
12391 void minlmsetcond(minlmstate* state,
12392      double epsx,
12393      ae_int_t maxits,
12394      ae_state *_state);
12395 void minlmsetxrep(minlmstate* state, ae_bool needxrep, ae_state *_state);
12396 void minlmsetstpmax(minlmstate* state, double stpmax, ae_state *_state);
12397 void minlmsetscale(minlmstate* state,
12398      /* Real    */ ae_vector* s,
12399      ae_state *_state);
12400 void minlmsetbc(minlmstate* state,
12401      /* Real    */ ae_vector* bndl,
12402      /* Real    */ ae_vector* bndu,
12403      ae_state *_state);
12404 void minlmsetlc(minlmstate* state,
12405      /* Real    */ ae_matrix* c,
12406      /* Integer */ ae_vector* ct,
12407      ae_int_t k,
12408      ae_state *_state);
12409 void minlmsetacctype(minlmstate* state,
12410      ae_int_t acctype,
12411      ae_state *_state);
12412 ae_bool minlmiteration(minlmstate* state, ae_state *_state);
12413 void minlmoptguardgradient(minlmstate* state,
12414      double teststep,
12415      ae_state *_state);
12416 void minlmoptguardresults(minlmstate* state,
12417      optguardreport* rep,
12418      ae_state *_state);
12419 void minlmresults(minlmstate* state,
12420      /* Real    */ ae_vector* x,
12421      minlmreport* rep,
12422      ae_state *_state);
12423 void minlmresultsbuf(minlmstate* state,
12424      /* Real    */ ae_vector* x,
12425      minlmreport* rep,
12426      ae_state *_state);
12427 void minlmrestartfrom(minlmstate* state,
12428      /* Real    */ ae_vector* x,
12429      ae_state *_state);
12430 void minlmrequesttermination(minlmstate* state, ae_state *_state);
12431 void minlmcreatevgj(ae_int_t n,
12432      ae_int_t m,
12433      /* Real    */ ae_vector* x,
12434      minlmstate* state,
12435      ae_state *_state);
12436 void minlmcreatefgj(ae_int_t n,
12437      ae_int_t m,
12438      /* Real    */ ae_vector* x,
12439      minlmstate* state,
12440      ae_state *_state);
12441 void minlmcreatefj(ae_int_t n,
12442      ae_int_t m,
12443      /* Real    */ ae_vector* x,
12444      minlmstate* state,
12445      ae_state *_state);
12446 void _minlmstepfinder_init(void* _p, ae_state *_state, ae_bool make_automatic);
12447 void _minlmstepfinder_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12448 void _minlmstepfinder_clear(void* _p);
12449 void _minlmstepfinder_destroy(void* _p);
12450 void _minlmstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
12451 void _minlmstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12452 void _minlmstate_clear(void* _p);
12453 void _minlmstate_destroy(void* _p);
12454 void _minlmreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
12455 void _minlmreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12456 void _minlmreport_clear(void* _p);
12457 void _minlmreport_destroy(void* _p);
12458 #endif
12459 #if defined(AE_COMPILE_MINCG) || !defined(AE_PARTIAL_BUILD)
12460 void mincgcreate(ae_int_t n,
12461      /* Real    */ ae_vector* x,
12462      mincgstate* state,
12463      ae_state *_state);
12464 void mincgcreatef(ae_int_t n,
12465      /* Real    */ ae_vector* x,
12466      double diffstep,
12467      mincgstate* state,
12468      ae_state *_state);
12469 void mincgsetcond(mincgstate* state,
12470      double epsg,
12471      double epsf,
12472      double epsx,
12473      ae_int_t maxits,
12474      ae_state *_state);
12475 void mincgsetscale(mincgstate* state,
12476      /* Real    */ ae_vector* s,
12477      ae_state *_state);
12478 void mincgsetxrep(mincgstate* state, ae_bool needxrep, ae_state *_state);
12479 void mincgsetdrep(mincgstate* state, ae_bool needdrep, ae_state *_state);
12480 void mincgsetcgtype(mincgstate* state, ae_int_t cgtype, ae_state *_state);
12481 void mincgsetstpmax(mincgstate* state, double stpmax, ae_state *_state);
12482 void mincgsuggeststep(mincgstate* state, double stp, ae_state *_state);
12483 double mincglastgoodstep(mincgstate* state, ae_state *_state);
12484 void mincgsetprecdefault(mincgstate* state, ae_state *_state);
12485 void mincgsetprecdiag(mincgstate* state,
12486      /* Real    */ ae_vector* d,
12487      ae_state *_state);
12488 void mincgsetprecscale(mincgstate* state, ae_state *_state);
12489 ae_bool mincgiteration(mincgstate* state, ae_state *_state);
12490 void mincgoptguardgradient(mincgstate* state,
12491      double teststep,
12492      ae_state *_state);
12493 void mincgoptguardsmoothness(mincgstate* state,
12494      ae_int_t level,
12495      ae_state *_state);
12496 void mincgoptguardresults(mincgstate* state,
12497      optguardreport* rep,
12498      ae_state *_state);
12499 void mincgoptguardnonc1test0results(mincgstate* state,
12500      optguardnonc1test0report* strrep,
12501      optguardnonc1test0report* lngrep,
12502      ae_state *_state);
12503 void mincgoptguardnonc1test1results(mincgstate* state,
12504      optguardnonc1test1report* strrep,
12505      optguardnonc1test1report* lngrep,
12506      ae_state *_state);
12507 void mincgresults(mincgstate* state,
12508      /* Real    */ ae_vector* x,
12509      mincgreport* rep,
12510      ae_state *_state);
12511 void mincgresultsbuf(mincgstate* state,
12512      /* Real    */ ae_vector* x,
12513      mincgreport* rep,
12514      ae_state *_state);
12515 void mincgrestartfrom(mincgstate* state,
12516      /* Real    */ ae_vector* x,
12517      ae_state *_state);
12518 void mincgrequesttermination(mincgstate* state, ae_state *_state);
12519 void mincgsetprecdiagfast(mincgstate* state,
12520      /* Real    */ ae_vector* d,
12521      ae_state *_state);
12522 void mincgsetpreclowrankfast(mincgstate* state,
12523      /* Real    */ ae_vector* d1,
12524      /* Real    */ ae_vector* c,
12525      /* Real    */ ae_matrix* v,
12526      ae_int_t vcnt,
12527      ae_state *_state);
12528 void mincgsetprecvarpart(mincgstate* state,
12529      /* Real    */ ae_vector* d2,
12530      ae_state *_state);
12531 void _mincgstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
12532 void _mincgstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12533 void _mincgstate_clear(void* _p);
12534 void _mincgstate_destroy(void* _p);
12535 void _mincgreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
12536 void _mincgreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12537 void _mincgreport_clear(void* _p);
12538 void _mincgreport_destroy(void* _p);
12539 #endif
12540 #if defined(AE_COMPILE_NLCSQP) || !defined(AE_PARTIAL_BUILD)
12541 void minsqpinitbuf(/* Real    */ ae_vector* bndl,
12542      /* Real    */ ae_vector* bndu,
12543      /* Real    */ ae_vector* s,
12544      /* Real    */ ae_vector* x0,
12545      ae_int_t n,
12546      /* Real    */ ae_matrix* cleic,
12547      /* Integer */ ae_vector* lcsrcidx,
12548      ae_int_t nec,
12549      ae_int_t nic,
12550      ae_int_t nlec,
12551      ae_int_t nlic,
12552      double epsx,
12553      ae_int_t maxits,
12554      minsqpstate* state,
12555      ae_state *_state);
12556 ae_bool minsqpiteration(minsqpstate* state,
12557      smoothnessmonitor* smonitor,
12558      ae_bool userterminationneeded,
12559      ae_state *_state);
12560 void _minsqpsubsolver_init(void* _p, ae_state *_state, ae_bool make_automatic);
12561 void _minsqpsubsolver_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12562 void _minsqpsubsolver_clear(void* _p);
12563 void _minsqpsubsolver_destroy(void* _p);
12564 void _minsqptmplagrangian_init(void* _p, ae_state *_state, ae_bool make_automatic);
12565 void _minsqptmplagrangian_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12566 void _minsqptmplagrangian_clear(void* _p);
12567 void _minsqptmplagrangian_destroy(void* _p);
12568 void _minsqptmpmerit_init(void* _p, ae_state *_state, ae_bool make_automatic);
12569 void _minsqptmpmerit_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12570 void _minsqptmpmerit_clear(void* _p);
12571 void _minsqptmpmerit_destroy(void* _p);
12572 void _minsqpmeritphasestate_init(void* _p, ae_state *_state, ae_bool make_automatic);
12573 void _minsqpmeritphasestate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12574 void _minsqpmeritphasestate_clear(void* _p);
12575 void _minsqpmeritphasestate_destroy(void* _p);
12576 void _minsqpstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
12577 void _minsqpstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12578 void _minsqpstate_clear(void* _p);
12579 void _minsqpstate_destroy(void* _p);
12580 #endif
12581 #if defined(AE_COMPILE_LPQPPRESOLVE) || !defined(AE_PARTIAL_BUILD)
12582 void presolvenonescaleuser(/* Real    */ ae_vector* s,
12583      /* Real    */ ae_vector* c,
12584      /* Real    */ ae_vector* bndl,
12585      /* Real    */ ae_vector* bndu,
12586      ae_int_t n,
12587      sparsematrix* sparsea,
12588      /* Real    */ ae_vector* al,
12589      /* Real    */ ae_vector* au,
12590      ae_int_t k,
12591      presolveinfo* info,
12592      ae_state *_state);
12593 void presolvebwd(presolveinfo* info,
12594      /* Real    */ ae_vector* x,
12595      /* Integer */ ae_vector* stats,
12596      /* Real    */ ae_vector* lagbc,
12597      /* Real    */ ae_vector* laglc,
12598      ae_state *_state);
12599 void _presolveinfo_init(void* _p, ae_state *_state, ae_bool make_automatic);
12600 void _presolveinfo_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12601 void _presolveinfo_clear(void* _p);
12602 void _presolveinfo_destroy(void* _p);
12603 #endif
12604 #if defined(AE_COMPILE_REVISEDDUALSIMPLEX) || !defined(AE_PARTIAL_BUILD)
12605 void dsssettingsinit(dualsimplexsettings* settings, ae_state *_state);
12606 void dssinit(ae_int_t n, dualsimplexstate* s, ae_state *_state);
12607 void dsssetproblem(dualsimplexstate* state,
12608      /* Real    */ ae_vector* c,
12609      /* Real    */ ae_vector* bndl,
12610      /* Real    */ ae_vector* bndu,
12611      /* Real    */ ae_matrix* densea,
12612      sparsematrix* sparsea,
12613      ae_int_t akind,
12614      /* Real    */ ae_vector* al,
12615      /* Real    */ ae_vector* au,
12616      ae_int_t k,
12617      dualsimplexbasis* proposedbasis,
12618      ae_int_t basisinittype,
12619      dualsimplexsettings* settings,
12620      ae_state *_state);
12621 void dssexportbasis(dualsimplexstate* state,
12622      dualsimplexbasis* basis,
12623      ae_state *_state);
12624 void dssoptimize(dualsimplexstate* state,
12625      dualsimplexsettings* settings,
12626      ae_state *_state);
12627 void _dualsimplexsettings_init(void* _p, ae_state *_state, ae_bool make_automatic);
12628 void _dualsimplexsettings_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12629 void _dualsimplexsettings_clear(void* _p);
12630 void _dualsimplexsettings_destroy(void* _p);
12631 void _dssvector_init(void* _p, ae_state *_state, ae_bool make_automatic);
12632 void _dssvector_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12633 void _dssvector_clear(void* _p);
12634 void _dssvector_destroy(void* _p);
12635 void _dualsimplexbasis_init(void* _p, ae_state *_state, ae_bool make_automatic);
12636 void _dualsimplexbasis_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12637 void _dualsimplexbasis_clear(void* _p);
12638 void _dualsimplexbasis_destroy(void* _p);
12639 void _dualsimplexsubproblem_init(void* _p, ae_state *_state, ae_bool make_automatic);
12640 void _dualsimplexsubproblem_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12641 void _dualsimplexsubproblem_clear(void* _p);
12642 void _dualsimplexsubproblem_destroy(void* _p);
12643 void _dualsimplexstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
12644 void _dualsimplexstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12645 void _dualsimplexstate_clear(void* _p);
12646 void _dualsimplexstate_destroy(void* _p);
12647 #endif
12648 #if defined(AE_COMPILE_MINLP) || !defined(AE_PARTIAL_BUILD)
12649 void minlpcreate(ae_int_t n, minlpstate* state, ae_state *_state);
12650 void minlpsetalgodss(minlpstate* state, double eps, ae_state *_state);
12651 void minlpsetalgoipm(minlpstate* state, double eps, ae_state *_state);
12652 void minlpsetcost(minlpstate* state,
12653      /* Real    */ ae_vector* c,
12654      ae_state *_state);
12655 void minlpsetscale(minlpstate* state,
12656      /* Real    */ ae_vector* s,
12657      ae_state *_state);
12658 void minlpsetbc(minlpstate* state,
12659      /* Real    */ ae_vector* bndl,
12660      /* Real    */ ae_vector* bndu,
12661      ae_state *_state);
12662 void minlpsetbcall(minlpstate* state,
12663      double bndl,
12664      double bndu,
12665      ae_state *_state);
12666 void minlpsetbci(minlpstate* state,
12667      ae_int_t i,
12668      double bndl,
12669      double bndu,
12670      ae_state *_state);
12671 void minlpsetlc(minlpstate* state,
12672      /* Real    */ ae_matrix* a,
12673      /* Integer */ ae_vector* ct,
12674      ae_int_t k,
12675      ae_state *_state);
12676 void minlpsetlc2dense(minlpstate* state,
12677      /* Real    */ ae_matrix* a,
12678      /* Real    */ ae_vector* al,
12679      /* Real    */ ae_vector* au,
12680      ae_int_t k,
12681      ae_state *_state);
12682 void minlpsetlc2(minlpstate* state,
12683      sparsematrix* a,
12684      /* Real    */ ae_vector* al,
12685      /* Real    */ ae_vector* au,
12686      ae_int_t k,
12687      ae_state *_state);
12688 void minlpaddlc2dense(minlpstate* state,
12689      /* Real    */ ae_vector* a,
12690      double al,
12691      double au,
12692      ae_state *_state);
12693 void minlpaddlc2(minlpstate* state,
12694      /* Integer */ ae_vector* idxa,
12695      /* Real    */ ae_vector* vala,
12696      ae_int_t nnz,
12697      double al,
12698      double au,
12699      ae_state *_state);
12700 void minlpoptimize(minlpstate* state, ae_state *_state);
12701 void minlpresults(minlpstate* state,
12702      /* Real    */ ae_vector* x,
12703      minlpreport* rep,
12704      ae_state *_state);
12705 void minlpresultsbuf(minlpstate* state,
12706      /* Real    */ ae_vector* x,
12707      minlpreport* rep,
12708      ae_state *_state);
12709 void _minlpstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
12710 void _minlpstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12711 void _minlpstate_clear(void* _p);
12712 void _minlpstate_destroy(void* _p);
12713 void _minlpreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
12714 void _minlpreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12715 void _minlpreport_clear(void* _p);
12716 void _minlpreport_destroy(void* _p);
12717 #endif
12718 #if defined(AE_COMPILE_NLCSLP) || !defined(AE_PARTIAL_BUILD)
12719 void minslpinitbuf(/* Real    */ ae_vector* bndl,
12720      /* Real    */ ae_vector* bndu,
12721      /* Real    */ ae_vector* s,
12722      /* Real    */ ae_vector* x0,
12723      ae_int_t n,
12724      /* Real    */ ae_matrix* cleic,
12725      /* Integer */ ae_vector* lcsrcidx,
12726      ae_int_t nec,
12727      ae_int_t nic,
12728      ae_int_t nlec,
12729      ae_int_t nlic,
12730      double epsx,
12731      ae_int_t maxits,
12732      minslpstate* state,
12733      ae_state *_state);
12734 ae_bool minslpiteration(minslpstate* state,
12735      smoothnessmonitor* smonitor,
12736      ae_bool userterminationneeded,
12737      ae_state *_state);
12738 void _minslpsubsolver_init(void* _p, ae_state *_state, ae_bool make_automatic);
12739 void _minslpsubsolver_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12740 void _minslpsubsolver_clear(void* _p);
12741 void _minslpsubsolver_destroy(void* _p);
12742 void _minslptmplagrangian_init(void* _p, ae_state *_state, ae_bool make_automatic);
12743 void _minslptmplagrangian_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12744 void _minslptmplagrangian_clear(void* _p);
12745 void _minslptmplagrangian_destroy(void* _p);
12746 void _minslptmpmerit_init(void* _p, ae_state *_state, ae_bool make_automatic);
12747 void _minslptmpmerit_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12748 void _minslptmpmerit_clear(void* _p);
12749 void _minslptmpmerit_destroy(void* _p);
12750 void _minslpphase13state_init(void* _p, ae_state *_state, ae_bool make_automatic);
12751 void _minslpphase13state_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12752 void _minslpphase13state_clear(void* _p);
12753 void _minslpphase13state_destroy(void* _p);
12754 void _minslpphase2state_init(void* _p, ae_state *_state, ae_bool make_automatic);
12755 void _minslpphase2state_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12756 void _minslpphase2state_clear(void* _p);
12757 void _minslpphase2state_destroy(void* _p);
12758 void _minslpstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
12759 void _minslpstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12760 void _minslpstate_clear(void* _p);
12761 void _minslpstate_destroy(void* _p);
12762 #endif
12763 #if defined(AE_COMPILE_MINNLC) || !defined(AE_PARTIAL_BUILD)
12764 void minnlccreate(ae_int_t n,
12765      /* Real    */ ae_vector* x,
12766      minnlcstate* state,
12767      ae_state *_state);
12768 void minnlccreatef(ae_int_t n,
12769      /* Real    */ ae_vector* x,
12770      double diffstep,
12771      minnlcstate* state,
12772      ae_state *_state);
12773 void minnlcsetbc(minnlcstate* state,
12774      /* Real    */ ae_vector* bndl,
12775      /* Real    */ ae_vector* bndu,
12776      ae_state *_state);
12777 void minnlcsetlc(minnlcstate* state,
12778      /* Real    */ ae_matrix* c,
12779      /* Integer */ ae_vector* ct,
12780      ae_int_t k,
12781      ae_state *_state);
12782 void minnlcsetnlc(minnlcstate* state,
12783      ae_int_t nlec,
12784      ae_int_t nlic,
12785      ae_state *_state);
12786 void minnlcsetcond(minnlcstate* state,
12787      double epsx,
12788      ae_int_t maxits,
12789      ae_state *_state);
12790 void minnlcsetscale(minnlcstate* state,
12791      /* Real    */ ae_vector* s,
12792      ae_state *_state);
12793 void minnlcsetprecinexact(minnlcstate* state, ae_state *_state);
12794 void minnlcsetprecexactlowrank(minnlcstate* state,
12795      ae_int_t updatefreq,
12796      ae_state *_state);
12797 void minnlcsetprecexactrobust(minnlcstate* state,
12798      ae_int_t updatefreq,
12799      ae_state *_state);
12800 void minnlcsetprecnone(minnlcstate* state, ae_state *_state);
12801 void minnlcsetstpmax(minnlcstate* state, double stpmax, ae_state *_state);
12802 void minnlcsetalgoaul(minnlcstate* state,
12803      double rho,
12804      ae_int_t itscnt,
12805      ae_state *_state);
12806 void minnlcsetalgoslp(minnlcstate* state, ae_state *_state);
12807 void minnlcsetalgosqp(minnlcstate* state, ae_state *_state);
12808 void minnlcsetxrep(minnlcstate* state, ae_bool needxrep, ae_state *_state);
12809 ae_bool minnlciteration(minnlcstate* state, ae_state *_state);
12810 void minnlcoptguardgradient(minnlcstate* state,
12811      double teststep,
12812      ae_state *_state);
12813 void minnlcoptguardsmoothness(minnlcstate* state,
12814      ae_int_t level,
12815      ae_state *_state);
12816 void minnlcoptguardresults(minnlcstate* state,
12817      optguardreport* rep,
12818      ae_state *_state);
12819 void minnlcoptguardnonc1test0results(minnlcstate* state,
12820      optguardnonc1test0report* strrep,
12821      optguardnonc1test0report* lngrep,
12822      ae_state *_state);
12823 void minnlcoptguardnonc1test1results(minnlcstate* state,
12824      optguardnonc1test1report* strrep,
12825      optguardnonc1test1report* lngrep,
12826      ae_state *_state);
12827 void minnlcresults(minnlcstate* state,
12828      /* Real    */ ae_vector* x,
12829      minnlcreport* rep,
12830      ae_state *_state);
12831 void minnlcresultsbuf(minnlcstate* state,
12832      /* Real    */ ae_vector* x,
12833      minnlcreport* rep,
12834      ae_state *_state);
12835 void minnlcrequesttermination(minnlcstate* state, ae_state *_state);
12836 void minnlcrestartfrom(minnlcstate* state,
12837      /* Real    */ ae_vector* x,
12838      ae_state *_state);
12839 void minnlcequalitypenaltyfunction(double alpha,
12840      double* f,
12841      double* df,
12842      double* d2f,
12843      ae_state *_state);
12844 void minnlcinequalitypenaltyfunction(double alpha,
12845      double stabilizingpoint,
12846      double* f,
12847      double* df,
12848      double* d2f,
12849      ae_state *_state);
12850 void minnlcinequalityshiftfunction(double alpha,
12851      double* f,
12852      double* df,
12853      double* d2f,
12854      ae_state *_state);
12855 void _minnlcstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
12856 void _minnlcstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12857 void _minnlcstate_clear(void* _p);
12858 void _minnlcstate_destroy(void* _p);
12859 void _minnlcreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
12860 void _minnlcreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12861 void _minnlcreport_clear(void* _p);
12862 void _minnlcreport_destroy(void* _p);
12863 #endif
12864 #if defined(AE_COMPILE_MINNS) || !defined(AE_PARTIAL_BUILD)
12865 void minnscreate(ae_int_t n,
12866      /* Real    */ ae_vector* x,
12867      minnsstate* state,
12868      ae_state *_state);
12869 void minnscreatef(ae_int_t n,
12870      /* Real    */ ae_vector* x,
12871      double diffstep,
12872      minnsstate* state,
12873      ae_state *_state);
12874 void minnssetbc(minnsstate* state,
12875      /* Real    */ ae_vector* bndl,
12876      /* Real    */ ae_vector* bndu,
12877      ae_state *_state);
12878 void minnssetlc(minnsstate* state,
12879      /* Real    */ ae_matrix* c,
12880      /* Integer */ ae_vector* ct,
12881      ae_int_t k,
12882      ae_state *_state);
12883 void minnssetnlc(minnsstate* state,
12884      ae_int_t nlec,
12885      ae_int_t nlic,
12886      ae_state *_state);
12887 void minnssetcond(minnsstate* state,
12888      double epsx,
12889      ae_int_t maxits,
12890      ae_state *_state);
12891 void minnssetscale(minnsstate* state,
12892      /* Real    */ ae_vector* s,
12893      ae_state *_state);
12894 void minnssetalgoags(minnsstate* state,
12895      double radius,
12896      double penalty,
12897      ae_state *_state);
12898 void minnssetxrep(minnsstate* state, ae_bool needxrep, ae_state *_state);
12899 void minnsrequesttermination(minnsstate* state, ae_state *_state);
12900 ae_bool minnsiteration(minnsstate* state, ae_state *_state);
12901 void minnsresults(minnsstate* state,
12902      /* Real    */ ae_vector* x,
12903      minnsreport* rep,
12904      ae_state *_state);
12905 void minnsresultsbuf(minnsstate* state,
12906      /* Real    */ ae_vector* x,
12907      minnsreport* rep,
12908      ae_state *_state);
12909 void minnsrestartfrom(minnsstate* state,
12910      /* Real    */ ae_vector* x,
12911      ae_state *_state);
12912 void _minnsqp_init(void* _p, ae_state *_state, ae_bool make_automatic);
12913 void _minnsqp_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12914 void _minnsqp_clear(void* _p);
12915 void _minnsqp_destroy(void* _p);
12916 void _minnsstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
12917 void _minnsstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12918 void _minnsstate_clear(void* _p);
12919 void _minnsstate_destroy(void* _p);
12920 void _minnsreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
12921 void _minnsreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12922 void _minnsreport_clear(void* _p);
12923 void _minnsreport_destroy(void* _p);
12924 #endif
12925 #if defined(AE_COMPILE_MINCOMP) || !defined(AE_PARTIAL_BUILD)
12926 void minlbfgssetdefaultpreconditioner(minlbfgsstate* state,
12927      ae_state *_state);
12928 void minlbfgssetcholeskypreconditioner(minlbfgsstate* state,
12929      /* Real    */ ae_matrix* p,
12930      ae_bool isupper,
12931      ae_state *_state);
12932 void minbleicsetbarrierwidth(minbleicstate* state,
12933      double mu,
12934      ae_state *_state);
12935 void minbleicsetbarrierdecay(minbleicstate* state,
12936      double mudecay,
12937      ae_state *_state);
12938 void minasacreate(ae_int_t n,
12939      /* Real    */ ae_vector* x,
12940      /* Real    */ ae_vector* bndl,
12941      /* Real    */ ae_vector* bndu,
12942      minasastate* state,
12943      ae_state *_state);
12944 void minasasetcond(minasastate* state,
12945      double epsg,
12946      double epsf,
12947      double epsx,
12948      ae_int_t maxits,
12949      ae_state *_state);
12950 void minasasetxrep(minasastate* state, ae_bool needxrep, ae_state *_state);
12951 void minasasetalgorithm(minasastate* state,
12952      ae_int_t algotype,
12953      ae_state *_state);
12954 void minasasetstpmax(minasastate* state, double stpmax, ae_state *_state);
12955 ae_bool minasaiteration(minasastate* state, ae_state *_state);
12956 void minasaresults(minasastate* state,
12957      /* Real    */ ae_vector* x,
12958      minasareport* rep,
12959      ae_state *_state);
12960 void minasaresultsbuf(minasastate* state,
12961      /* Real    */ ae_vector* x,
12962      minasareport* rep,
12963      ae_state *_state);
12964 void minasarestartfrom(minasastate* state,
12965      /* Real    */ ae_vector* x,
12966      /* Real    */ ae_vector* bndl,
12967      /* Real    */ ae_vector* bndu,
12968      ae_state *_state);
12969 void _minasastate_init(void* _p, ae_state *_state, ae_bool make_automatic);
12970 void _minasastate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12971 void _minasastate_clear(void* _p);
12972 void _minasastate_destroy(void* _p);
12973 void _minasareport_init(void* _p, ae_state *_state, ae_bool make_automatic);
12974 void _minasareport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
12975 void _minasareport_clear(void* _p);
12976 void _minasareport_destroy(void* _p);
12977 #endif
12978 #if defined(AE_COMPILE_MINBC) || !defined(AE_PARTIAL_BUILD)
12979 void minbccreate(ae_int_t n,
12980      /* Real    */ ae_vector* x,
12981      minbcstate* state,
12982      ae_state *_state);
12983 void minbccreatef(ae_int_t n,
12984      /* Real    */ ae_vector* x,
12985      double diffstep,
12986      minbcstate* state,
12987      ae_state *_state);
12988 void minbcsetbc(minbcstate* state,
12989      /* Real    */ ae_vector* bndl,
12990      /* Real    */ ae_vector* bndu,
12991      ae_state *_state);
12992 void minbcsetcond(minbcstate* state,
12993      double epsg,
12994      double epsf,
12995      double epsx,
12996      ae_int_t maxits,
12997      ae_state *_state);
12998 void minbcsetscale(minbcstate* state,
12999      /* Real    */ ae_vector* s,
13000      ae_state *_state);
13001 void minbcsetprecdefault(minbcstate* state, ae_state *_state);
13002 void minbcsetprecdiag(minbcstate* state,
13003      /* Real    */ ae_vector* d,
13004      ae_state *_state);
13005 void minbcsetprecscale(minbcstate* state, ae_state *_state);
13006 void minbcsetxrep(minbcstate* state, ae_bool needxrep, ae_state *_state);
13007 void minbcsetstpmax(minbcstate* state, double stpmax, ae_state *_state);
13008 ae_bool minbciteration(minbcstate* state, ae_state *_state);
13009 void minbcoptguardgradient(minbcstate* state,
13010      double teststep,
13011      ae_state *_state);
13012 void minbcoptguardsmoothness(minbcstate* state,
13013      ae_int_t level,
13014      ae_state *_state);
13015 void minbcoptguardresults(minbcstate* state,
13016      optguardreport* rep,
13017      ae_state *_state);
13018 void minbcoptguardnonc1test0results(minbcstate* state,
13019      optguardnonc1test0report* strrep,
13020      optguardnonc1test0report* lngrep,
13021      ae_state *_state);
13022 void minbcoptguardnonc1test1results(minbcstate* state,
13023      optguardnonc1test1report* strrep,
13024      optguardnonc1test1report* lngrep,
13025      ae_state *_state);
13026 void minbcresults(minbcstate* state,
13027      /* Real    */ ae_vector* x,
13028      minbcreport* rep,
13029      ae_state *_state);
13030 void minbcresultsbuf(minbcstate* state,
13031      /* Real    */ ae_vector* x,
13032      minbcreport* rep,
13033      ae_state *_state);
13034 void minbcrestartfrom(minbcstate* state,
13035      /* Real    */ ae_vector* x,
13036      ae_state *_state);
13037 void minbcrequesttermination(minbcstate* state, ae_state *_state);
13038 void _minbcstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
13039 void _minbcstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
13040 void _minbcstate_clear(void* _p);
13041 void _minbcstate_destroy(void* _p);
13042 void _minbcreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
13043 void _minbcreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
13044 void _minbcreport_clear(void* _p);
13045 void _minbcreport_destroy(void* _p);
13046 #endif
13047 #if defined(AE_COMPILE_OPTS) || !defined(AE_PARTIAL_BUILD)
13048 void lptestproblemcreate(ae_int_t n,
13049      ae_bool hasknowntarget,
13050      double targetf,
13051      lptestproblem* p,
13052      ae_state *_state);
13053 void lptestproblemsetscale(lptestproblem* p,
13054      /* Real    */ ae_vector* s,
13055      ae_state *_state);
13056 void lptestproblemsetcost(lptestproblem* p,
13057      /* Real    */ ae_vector* c,
13058      ae_state *_state);
13059 void lptestproblemsetbc(lptestproblem* p,
13060      /* Real    */ ae_vector* bndl,
13061      /* Real    */ ae_vector* bndu,
13062      ae_state *_state);
13063 void lptestproblemsetlc2(lptestproblem* p,
13064      sparsematrix* a,
13065      /* Real    */ ae_vector* al,
13066      /* Real    */ ae_vector* au,
13067      ae_int_t m,
13068      ae_state *_state);
13069 void lptestproblemalloc(ae_serializer* s,
13070      lptestproblem* p,
13071      ae_state *_state);
13072 void lptestproblemserialize(ae_serializer* s,
13073      lptestproblem* p,
13074      ae_state *_state);
13075 void lptestproblemunserialize(ae_serializer* s,
13076      lptestproblem* p,
13077      ae_state *_state);
13078 void xdbgminlpcreatefromtestproblem(lptestproblem* p,
13079      minlpstate* state,
13080      ae_state *_state);
13081 void _lptestproblem_init(void* _p, ae_state *_state, ae_bool make_automatic);
13082 void _lptestproblem_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
13083 void _lptestproblem_clear(void* _p);
13084 void _lptestproblem_destroy(void* _p);
13085 #endif
13086 
13087 }
13088 #endif
13089 
13090