1 /*************************************************************************
2 ALGLIB 3.15.0 (source code generated 2019-02-20)
3 Copyright (c) Sergey Bochkanov (ALGLIB project).
4 
5 >>> SOURCE LICENSE >>>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation (www.fsf.org); either version 2 of the
9 License, or (at your option) any later version.
10 
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 A copy of the GNU General Public License is available at
17 http://www.fsf.org/licensing/licenses
18 >>> END OF LICENSE >>>
19 *************************************************************************/
20 #ifndef _dataanalysis_pkg_h
21 #define _dataanalysis_pkg_h
22 #include "ap.h"
23 #include "alglibinternal.h"
24 #include "alglibmisc.h"
25 #include "linalg.h"
26 #include "statistics.h"
27 #include "specialfunctions.h"
28 #include "solvers.h"
29 #include "optimization.h"
30 
31 /////////////////////////////////////////////////////////////////////////
32 //
33 // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (DATATYPES)
34 //
35 /////////////////////////////////////////////////////////////////////////
36 namespace alglib_impl
37 {
38 #if defined(AE_COMPILE_PCA) || !defined(AE_PARTIAL_BUILD)
39 #endif
40 #if defined(AE_COMPILE_BDSS) || !defined(AE_PARTIAL_BUILD)
41 typedef struct
42 {
43     double relclserror;
44     double avgce;
45     double rmserror;
46     double avgerror;
47     double avgrelerror;
48 } cvreport;
49 #endif
50 #if defined(AE_COMPILE_MLPBASE) || !defined(AE_PARTIAL_BUILD)
51 typedef struct
52 {
53     double relclserror;
54     double avgce;
55     double rmserror;
56     double avgerror;
57     double avgrelerror;
58 } modelerrors;
59 typedef struct
60 {
61     double f;
62     ae_vector g;
63 } smlpgrad;
64 typedef struct
65 {
66     ae_int_t hlnetworktype;
67     ae_int_t hlnormtype;
68     ae_vector hllayersizes;
69     ae_vector hlconnections;
70     ae_vector hlneurons;
71     ae_vector structinfo;
72     ae_vector weights;
73     ae_vector columnmeans;
74     ae_vector columnsigmas;
75     ae_vector neurons;
76     ae_vector dfdnet;
77     ae_vector derror;
78     ae_vector x;
79     ae_vector y;
80     ae_matrix xy;
81     ae_vector xyrow;
82     ae_vector nwbuf;
83     ae_vector integerbuf;
84     modelerrors err;
85     ae_vector rndbuf;
86     ae_shared_pool buf;
87     ae_shared_pool gradbuf;
88     ae_matrix dummydxy;
89     sparsematrix dummysxy;
90     ae_vector dummyidx;
91     ae_shared_pool dummypool;
92 } multilayerperceptron;
93 #endif
94 #if defined(AE_COMPILE_LDA) || !defined(AE_PARTIAL_BUILD)
95 #endif
96 #if defined(AE_COMPILE_SSA) || !defined(AE_PARTIAL_BUILD)
97 typedef struct
98 {
99     ae_int_t nsequences;
100     ae_vector sequenceidx;
101     ae_vector sequencedata;
102     ae_int_t algotype;
103     ae_int_t windowwidth;
104     ae_int_t rtpowerup;
105     ae_int_t topk;
106     ae_int_t precomputedwidth;
107     ae_int_t precomputednbasis;
108     ae_matrix precomputedbasis;
109     ae_int_t defaultsubspaceits;
110     ae_int_t memorylimit;
111     ae_bool arebasisandsolvervalid;
112     ae_matrix basis;
113     ae_matrix basist;
114     ae_vector sv;
115     ae_vector forecasta;
116     ae_int_t nbasis;
117     eigsubspacestate solver;
118     ae_matrix xxt;
119     hqrndstate rs;
120     ae_int_t rngseed;
121     ae_vector rtqueue;
122     ae_int_t rtqueuecnt;
123     ae_int_t rtqueuechunk;
124     ae_int_t dbgcntevd;
125     ae_vector tmp0;
126     ae_vector tmp1;
127     eigsubspacereport solverrep;
128     ae_vector alongtrend;
129     ae_vector alongnoise;
130     ae_matrix aseqtrajectory;
131     ae_matrix aseqtbproduct;
132     ae_vector aseqcounts;
133     ae_vector fctrend;
134     ae_vector fcnoise;
135     ae_matrix fctrendm;
136     ae_matrix uxbatch;
137     ae_int_t uxbatchwidth;
138     ae_int_t uxbatchsize;
139     ae_int_t uxbatchlimit;
140 } ssamodel;
141 #endif
142 #if defined(AE_COMPILE_LINREG) || !defined(AE_PARTIAL_BUILD)
143 typedef struct
144 {
145     ae_vector w;
146 } linearmodel;
147 typedef struct
148 {
149     ae_matrix c;
150     double rmserror;
151     double avgerror;
152     double avgrelerror;
153     double cvrmserror;
154     double cvavgerror;
155     double cvavgrelerror;
156     ae_int_t ncvdefects;
157     ae_vector cvdefects;
158 } lrreport;
159 #endif
160 #if defined(AE_COMPILE_FILTERS) || !defined(AE_PARTIAL_BUILD)
161 #endif
162 #if defined(AE_COMPILE_LOGIT) || !defined(AE_PARTIAL_BUILD)
163 typedef struct
164 {
165     ae_vector w;
166 } logitmodel;
167 typedef struct
168 {
169     ae_bool brackt;
170     ae_bool stage1;
171     ae_int_t infoc;
172     double dg;
173     double dgm;
174     double dginit;
175     double dgtest;
176     double dgx;
177     double dgxm;
178     double dgy;
179     double dgym;
180     double finit;
181     double ftest1;
182     double fm;
183     double fx;
184     double fxm;
185     double fy;
186     double fym;
187     double stx;
188     double sty;
189     double stmin;
190     double stmax;
191     double width;
192     double width1;
193     double xtrapf;
194 } logitmcstate;
195 typedef struct
196 {
197     ae_int_t ngrad;
198     ae_int_t nhess;
199 } mnlreport;
200 #endif
201 #if defined(AE_COMPILE_MCPD) || !defined(AE_PARTIAL_BUILD)
202 typedef struct
203 {
204     ae_int_t n;
205     ae_vector states;
206     ae_int_t npairs;
207     ae_matrix data;
208     ae_matrix ec;
209     ae_matrix bndl;
210     ae_matrix bndu;
211     ae_matrix c;
212     ae_vector ct;
213     ae_int_t ccnt;
214     ae_vector pw;
215     ae_matrix priorp;
216     double regterm;
217     minbleicstate bs;
218     ae_int_t repinneriterationscount;
219     ae_int_t repouteriterationscount;
220     ae_int_t repnfev;
221     ae_int_t repterminationtype;
222     minbleicreport br;
223     ae_vector tmpp;
224     ae_vector effectivew;
225     ae_vector effectivebndl;
226     ae_vector effectivebndu;
227     ae_matrix effectivec;
228     ae_vector effectivect;
229     ae_vector h;
230     ae_matrix p;
231 } mcpdstate;
232 typedef struct
233 {
234     ae_int_t inneriterationscount;
235     ae_int_t outeriterationscount;
236     ae_int_t nfev;
237     ae_int_t terminationtype;
238 } mcpdreport;
239 #endif
240 #if defined(AE_COMPILE_MLPE) || !defined(AE_PARTIAL_BUILD)
241 typedef struct
242 {
243     ae_int_t ensemblesize;
244     ae_vector weights;
245     ae_vector columnmeans;
246     ae_vector columnsigmas;
247     multilayerperceptron network;
248     ae_vector y;
249 } mlpensemble;
250 #endif
251 #if defined(AE_COMPILE_MLPTRAIN) || !defined(AE_PARTIAL_BUILD)
252 typedef struct
253 {
254     double relclserror;
255     double avgce;
256     double rmserror;
257     double avgerror;
258     double avgrelerror;
259     ae_int_t ngrad;
260     ae_int_t nhess;
261     ae_int_t ncholesky;
262 } mlpreport;
263 typedef struct
264 {
265     double relclserror;
266     double avgce;
267     double rmserror;
268     double avgerror;
269     double avgrelerror;
270 } mlpcvreport;
271 typedef struct
272 {
273     ae_vector bestparameters;
274     double bestrmserror;
275     ae_bool randomizenetwork;
276     multilayerperceptron network;
277     minlbfgsstate optimizer;
278     minlbfgsreport optimizerrep;
279     ae_vector wbuf0;
280     ae_vector wbuf1;
281     ae_vector allminibatches;
282     ae_vector currentminibatch;
283     rcommstate rstate;
284     ae_int_t algoused;
285     ae_int_t minibatchsize;
286     hqrndstate generator;
287 } smlptrnsession;
288 typedef struct
289 {
290     ae_vector trnsubset;
291     ae_vector valsubset;
292     ae_shared_pool mlpsessions;
293     mlpreport mlprep;
294     multilayerperceptron network;
295 } mlpetrnsession;
296 typedef struct
297 {
298     ae_int_t nin;
299     ae_int_t nout;
300     ae_bool rcpar;
301     ae_int_t lbfgsfactor;
302     double decay;
303     double wstep;
304     ae_int_t maxits;
305     ae_int_t datatype;
306     ae_int_t npoints;
307     ae_matrix densexy;
308     sparsematrix sparsexy;
309     smlptrnsession session;
310     ae_int_t ngradbatch;
311     ae_vector subset;
312     ae_int_t subsetsize;
313     ae_vector valsubset;
314     ae_int_t valsubsetsize;
315     ae_int_t algokind;
316     ae_int_t minibatchsize;
317 } mlptrainer;
318 typedef struct
319 {
320     multilayerperceptron network;
321     mlpreport rep;
322     ae_vector subset;
323     ae_int_t subsetsize;
324     ae_vector xyrow;
325     ae_vector y;
326     ae_int_t ngrad;
327     ae_shared_pool trnpool;
328 } mlpparallelizationcv;
329 #endif
330 #if defined(AE_COMPILE_CLUSTERING) || !defined(AE_PARTIAL_BUILD)
331 typedef struct
332 {
333     ae_matrix ct;
334     ae_matrix ctbest;
335     ae_vector xycbest;
336     ae_vector xycprev;
337     ae_vector d2;
338     ae_vector csizes;
339     apbuffers initbuf;
340     ae_shared_pool updatepool;
341 } kmeansbuffers;
342 typedef struct
343 {
344     ae_int_t npoints;
345     ae_int_t nfeatures;
346     ae_int_t disttype;
347     ae_matrix xy;
348     ae_matrix d;
349     ae_int_t ahcalgo;
350     ae_int_t kmeansrestarts;
351     ae_int_t kmeansmaxits;
352     ae_int_t kmeansinitalgo;
353     ae_bool kmeansdbgnoits;
354     ae_int_t seed;
355     ae_matrix tmpd;
356     apbuffers distbuf;
357     kmeansbuffers kmeanstmp;
358 } clusterizerstate;
359 typedef struct
360 {
361     ae_int_t terminationtype;
362     ae_int_t npoints;
363     ae_vector p;
364     ae_matrix z;
365     ae_matrix pz;
366     ae_matrix pm;
367     ae_vector mergedist;
368 } ahcreport;
369 typedef struct
370 {
371     ae_int_t npoints;
372     ae_int_t nfeatures;
373     ae_int_t terminationtype;
374     ae_int_t iterationscount;
375     double energy;
376     ae_int_t k;
377     ae_matrix c;
378     ae_vector cidx;
379 } kmeansreport;
380 #endif
381 #if defined(AE_COMPILE_DFOREST) || !defined(AE_PARTIAL_BUILD)
382 typedef struct
383 {
384     ae_int_t dstype;
385     ae_int_t npoints;
386     ae_int_t nvars;
387     ae_int_t nclasses;
388     ae_vector dsdata;
389     ae_vector dsrval;
390     ae_vector dsival;
391     ae_int_t rdfalgo;
392     double rdfratio;
393     double rdfvars;
394     ae_int_t rdfglobalseed;
395     ae_int_t rdfsplitstrength;
396     ae_vector dsmin;
397     ae_vector dsmax;
398     ae_vector dsbinary;
399     double dsravg;
400     ae_vector dsctotals;
401     ae_int_t rdfprogress;
402     ae_int_t rdftotal;
403     ae_shared_pool workpool;
404     ae_shared_pool votepool;
405     ae_shared_pool treepool;
406     ae_shared_pool treefactory;
407 } decisionforestbuilder;
408 typedef struct
409 {
410     ae_vector classpriors;
411     ae_vector varpool;
412     ae_int_t varpoolsize;
413     ae_vector trnset;
414     ae_int_t trnsize;
415     ae_vector trnlabelsr;
416     ae_vector trnlabelsi;
417     ae_vector oobset;
418     ae_int_t oobsize;
419     ae_vector treebuf;
420     ae_vector curvals;
421     ae_vector bestvals;
422     ae_vector tmp0i;
423     ae_vector tmp1i;
424     ae_vector tmp0r;
425     ae_vector tmp1r;
426     ae_vector tmp2r;
427     ae_vector tmp3r;
428     ae_vector classtotals0;
429     ae_vector classtotals1;
430     ae_vector classtotals01;
431 } dfworkbuf;
432 typedef struct
433 {
434     ae_vector trntotals;
435     ae_vector oobtotals;
436     ae_vector trncounts;
437     ae_vector oobcounts;
438 } dfvotebuf;
439 typedef struct
440 {
441     ae_vector treebuf;
442 } dftreebuf;
443 typedef struct
444 {
445     ae_vector x;
446     ae_vector y;
447 } decisionforestbuffer;
448 typedef struct
449 {
450     ae_int_t nvars;
451     ae_int_t nclasses;
452     ae_int_t ntrees;
453     ae_int_t bufsize;
454     ae_vector trees;
455     decisionforestbuffer buffer;
456 } decisionforest;
457 typedef struct
458 {
459     double relclserror;
460     double avgce;
461     double rmserror;
462     double avgerror;
463     double avgrelerror;
464     double oobrelclserror;
465     double oobavgce;
466     double oobrmserror;
467     double oobavgerror;
468     double oobavgrelerror;
469 } dfreport;
470 typedef struct
471 {
472     ae_vector treebuf;
473     ae_vector idxbuf;
474     ae_vector tmpbufr;
475     ae_vector tmpbufr2;
476     ae_vector tmpbufi;
477     ae_vector classibuf;
478     ae_vector sortrbuf;
479     ae_vector sortrbuf2;
480     ae_vector sortibuf;
481     ae_vector varpool;
482     ae_vector evsbin;
483     ae_vector evssplits;
484 } dfinternalbuffers;
485 #endif
486 #if defined(AE_COMPILE_KNN) || !defined(AE_PARTIAL_BUILD)
487 typedef struct
488 {
489     kdtreerequestbuffer treebuf;
490     ae_vector x;
491     ae_vector y;
492     ae_vector tags;
493     ae_matrix xy;
494 } knnbuffer;
495 typedef struct
496 {
497     ae_int_t dstype;
498     ae_int_t npoints;
499     ae_int_t nvars;
500     ae_bool iscls;
501     ae_int_t nout;
502     ae_matrix dsdata;
503     ae_vector dsrval;
504     ae_vector dsival;
505     ae_int_t knnnrm;
506 } knnbuilder;
507 typedef struct
508 {
509     ae_int_t nvars;
510     ae_int_t nout;
511     ae_int_t k;
512     double eps;
513     ae_bool iscls;
514     ae_bool isdummy;
515     kdtree tree;
516     knnbuffer buffer;
517 } knnmodel;
518 typedef struct
519 {
520     double relclserror;
521     double avgce;
522     double rmserror;
523     double avgerror;
524     double avgrelerror;
525 } knnreport;
526 #endif
527 #if defined(AE_COMPILE_DATACOMP) || !defined(AE_PARTIAL_BUILD)
528 #endif
529 
530 }
531 
532 /////////////////////////////////////////////////////////////////////////
533 //
534 // THIS SECTION CONTAINS C++ INTERFACE
535 //
536 /////////////////////////////////////////////////////////////////////////
537 namespace alglib
538 {
539 
540 #if defined(AE_COMPILE_PCA) || !defined(AE_PARTIAL_BUILD)
541 
542 #endif
543 
544 #if defined(AE_COMPILE_BDSS) || !defined(AE_PARTIAL_BUILD)
545 
546 #endif
547 
548 #if defined(AE_COMPILE_MLPBASE) || !defined(AE_PARTIAL_BUILD)
549 /*************************************************************************
550 Model's errors:
551     * RelCLSError   -   fraction of misclassified cases.
552     * AvgCE         -   acerage cross-entropy
553     * RMSError      -   root-mean-square error
554     * AvgError      -   average error
555     * AvgRelError   -   average relative error
556 
557 NOTE 1: RelCLSError/AvgCE are zero on regression problems.
558 
559 NOTE 2: on classification problems  RMSError/AvgError/AvgRelError  contain
560         errors in prediction of posterior probabilities
561 *************************************************************************/
562 class _modelerrors_owner
563 {
564 public:
565     _modelerrors_owner();
566     _modelerrors_owner(const _modelerrors_owner &rhs);
567     _modelerrors_owner& operator=(const _modelerrors_owner &rhs);
568     virtual ~_modelerrors_owner();
569     alglib_impl::modelerrors* c_ptr();
570     alglib_impl::modelerrors* c_ptr() const;
571 protected:
572     alglib_impl::modelerrors *p_struct;
573 };
574 class modelerrors : public _modelerrors_owner
575 {
576 public:
577     modelerrors();
578     modelerrors(const modelerrors &rhs);
579     modelerrors& operator=(const modelerrors &rhs);
580     virtual ~modelerrors();
581     double &relclserror;
582     double &avgce;
583     double &rmserror;
584     double &avgerror;
585     double &avgrelerror;
586 
587 };
588 
589 
590 /*************************************************************************
591 
592 *************************************************************************/
593 class _multilayerperceptron_owner
594 {
595 public:
596     _multilayerperceptron_owner();
597     _multilayerperceptron_owner(const _multilayerperceptron_owner &rhs);
598     _multilayerperceptron_owner& operator=(const _multilayerperceptron_owner &rhs);
599     virtual ~_multilayerperceptron_owner();
600     alglib_impl::multilayerperceptron* c_ptr();
601     alglib_impl::multilayerperceptron* c_ptr() const;
602 protected:
603     alglib_impl::multilayerperceptron *p_struct;
604 };
605 class multilayerperceptron : public _multilayerperceptron_owner
606 {
607 public:
608     multilayerperceptron();
609     multilayerperceptron(const multilayerperceptron &rhs);
610     multilayerperceptron& operator=(const multilayerperceptron &rhs);
611     virtual ~multilayerperceptron();
612 
613 };
614 #endif
615 
616 #if defined(AE_COMPILE_LDA) || !defined(AE_PARTIAL_BUILD)
617 
618 #endif
619 
620 #if defined(AE_COMPILE_SSA) || !defined(AE_PARTIAL_BUILD)
621 /*************************************************************************
622 This object stores state of the SSA model.
623 
624 You should use ALGLIB functions to work with this object.
625 *************************************************************************/
626 class _ssamodel_owner
627 {
628 public:
629     _ssamodel_owner();
630     _ssamodel_owner(const _ssamodel_owner &rhs);
631     _ssamodel_owner& operator=(const _ssamodel_owner &rhs);
632     virtual ~_ssamodel_owner();
633     alglib_impl::ssamodel* c_ptr();
634     alglib_impl::ssamodel* c_ptr() const;
635 protected:
636     alglib_impl::ssamodel *p_struct;
637 };
638 class ssamodel : public _ssamodel_owner
639 {
640 public:
641     ssamodel();
642     ssamodel(const ssamodel &rhs);
643     ssamodel& operator=(const ssamodel &rhs);
644     virtual ~ssamodel();
645 
646 };
647 #endif
648 
649 #if defined(AE_COMPILE_LINREG) || !defined(AE_PARTIAL_BUILD)
650 /*************************************************************************
651 
652 *************************************************************************/
653 class _linearmodel_owner
654 {
655 public:
656     _linearmodel_owner();
657     _linearmodel_owner(const _linearmodel_owner &rhs);
658     _linearmodel_owner& operator=(const _linearmodel_owner &rhs);
659     virtual ~_linearmodel_owner();
660     alglib_impl::linearmodel* c_ptr();
661     alglib_impl::linearmodel* c_ptr() const;
662 protected:
663     alglib_impl::linearmodel *p_struct;
664 };
665 class linearmodel : public _linearmodel_owner
666 {
667 public:
668     linearmodel();
669     linearmodel(const linearmodel &rhs);
670     linearmodel& operator=(const linearmodel &rhs);
671     virtual ~linearmodel();
672 
673 };
674 
675 
676 /*************************************************************************
677 LRReport structure contains additional information about linear model:
678 * C             -   covariation matrix,  array[0..NVars,0..NVars].
679                     C[i,j] = Cov(A[i],A[j])
680 * RMSError      -   root mean square error on a training set
681 * AvgError      -   average error on a training set
682 * AvgRelError   -   average relative error on a training set (excluding
683                     observations with zero function value).
684 * CVRMSError    -   leave-one-out cross-validation estimate of
685                     generalization error. Calculated using fast algorithm
686                     with O(NVars*NPoints) complexity.
687 * CVAvgError    -   cross-validation estimate of average error
688 * CVAvgRelError -   cross-validation estimate of average relative error
689 
690 All other fields of the structure are intended for internal use and should
691 not be used outside ALGLIB.
692 *************************************************************************/
693 class _lrreport_owner
694 {
695 public:
696     _lrreport_owner();
697     _lrreport_owner(const _lrreport_owner &rhs);
698     _lrreport_owner& operator=(const _lrreport_owner &rhs);
699     virtual ~_lrreport_owner();
700     alglib_impl::lrreport* c_ptr();
701     alglib_impl::lrreport* c_ptr() const;
702 protected:
703     alglib_impl::lrreport *p_struct;
704 };
705 class lrreport : public _lrreport_owner
706 {
707 public:
708     lrreport();
709     lrreport(const lrreport &rhs);
710     lrreport& operator=(const lrreport &rhs);
711     virtual ~lrreport();
712     real_2d_array c;
713     double &rmserror;
714     double &avgerror;
715     double &avgrelerror;
716     double &cvrmserror;
717     double &cvavgerror;
718     double &cvavgrelerror;
719     ae_int_t &ncvdefects;
720     integer_1d_array cvdefects;
721 
722 };
723 #endif
724 
725 #if defined(AE_COMPILE_FILTERS) || !defined(AE_PARTIAL_BUILD)
726 
727 #endif
728 
729 #if defined(AE_COMPILE_LOGIT) || !defined(AE_PARTIAL_BUILD)
730 /*************************************************************************
731 
732 *************************************************************************/
733 class _logitmodel_owner
734 {
735 public:
736     _logitmodel_owner();
737     _logitmodel_owner(const _logitmodel_owner &rhs);
738     _logitmodel_owner& operator=(const _logitmodel_owner &rhs);
739     virtual ~_logitmodel_owner();
740     alglib_impl::logitmodel* c_ptr();
741     alglib_impl::logitmodel* c_ptr() const;
742 protected:
743     alglib_impl::logitmodel *p_struct;
744 };
745 class logitmodel : public _logitmodel_owner
746 {
747 public:
748     logitmodel();
749     logitmodel(const logitmodel &rhs);
750     logitmodel& operator=(const logitmodel &rhs);
751     virtual ~logitmodel();
752 
753 };
754 
755 
756 /*************************************************************************
757 MNLReport structure contains information about training process:
758 * NGrad     -   number of gradient calculations
759 * NHess     -   number of Hessian calculations
760 *************************************************************************/
761 class _mnlreport_owner
762 {
763 public:
764     _mnlreport_owner();
765     _mnlreport_owner(const _mnlreport_owner &rhs);
766     _mnlreport_owner& operator=(const _mnlreport_owner &rhs);
767     virtual ~_mnlreport_owner();
768     alglib_impl::mnlreport* c_ptr();
769     alglib_impl::mnlreport* c_ptr() const;
770 protected:
771     alglib_impl::mnlreport *p_struct;
772 };
773 class mnlreport : public _mnlreport_owner
774 {
775 public:
776     mnlreport();
777     mnlreport(const mnlreport &rhs);
778     mnlreport& operator=(const mnlreport &rhs);
779     virtual ~mnlreport();
780     ae_int_t &ngrad;
781     ae_int_t &nhess;
782 
783 };
784 #endif
785 
786 #if defined(AE_COMPILE_MCPD) || !defined(AE_PARTIAL_BUILD)
787 /*************************************************************************
788 This structure is a MCPD (Markov Chains for Population Data) solver.
789 
790 You should use ALGLIB functions in order to work with this object.
791 
792   -- ALGLIB --
793      Copyright 23.05.2010 by Bochkanov Sergey
794 *************************************************************************/
795 class _mcpdstate_owner
796 {
797 public:
798     _mcpdstate_owner();
799     _mcpdstate_owner(const _mcpdstate_owner &rhs);
800     _mcpdstate_owner& operator=(const _mcpdstate_owner &rhs);
801     virtual ~_mcpdstate_owner();
802     alglib_impl::mcpdstate* c_ptr();
803     alglib_impl::mcpdstate* c_ptr() const;
804 protected:
805     alglib_impl::mcpdstate *p_struct;
806 };
807 class mcpdstate : public _mcpdstate_owner
808 {
809 public:
810     mcpdstate();
811     mcpdstate(const mcpdstate &rhs);
812     mcpdstate& operator=(const mcpdstate &rhs);
813     virtual ~mcpdstate();
814 
815 };
816 
817 
818 /*************************************************************************
819 This structure is a MCPD training report:
820     InnerIterationsCount    -   number of inner iterations of the
821                                 underlying optimization algorithm
822     OuterIterationsCount    -   number of outer iterations of the
823                                 underlying optimization algorithm
824     NFEV                    -   number of merit function evaluations
825     TerminationType         -   termination type
826                                 (same as for MinBLEIC optimizer, positive
827                                 values denote success, negative ones -
828                                 failure)
829 
830   -- ALGLIB --
831      Copyright 23.05.2010 by Bochkanov Sergey
832 *************************************************************************/
833 class _mcpdreport_owner
834 {
835 public:
836     _mcpdreport_owner();
837     _mcpdreport_owner(const _mcpdreport_owner &rhs);
838     _mcpdreport_owner& operator=(const _mcpdreport_owner &rhs);
839     virtual ~_mcpdreport_owner();
840     alglib_impl::mcpdreport* c_ptr();
841     alglib_impl::mcpdreport* c_ptr() const;
842 protected:
843     alglib_impl::mcpdreport *p_struct;
844 };
845 class mcpdreport : public _mcpdreport_owner
846 {
847 public:
848     mcpdreport();
849     mcpdreport(const mcpdreport &rhs);
850     mcpdreport& operator=(const mcpdreport &rhs);
851     virtual ~mcpdreport();
852     ae_int_t &inneriterationscount;
853     ae_int_t &outeriterationscount;
854     ae_int_t &nfev;
855     ae_int_t &terminationtype;
856 
857 };
858 #endif
859 
860 #if defined(AE_COMPILE_MLPE) || !defined(AE_PARTIAL_BUILD)
861 /*************************************************************************
862 Neural networks ensemble
863 *************************************************************************/
864 class _mlpensemble_owner
865 {
866 public:
867     _mlpensemble_owner();
868     _mlpensemble_owner(const _mlpensemble_owner &rhs);
869     _mlpensemble_owner& operator=(const _mlpensemble_owner &rhs);
870     virtual ~_mlpensemble_owner();
871     alglib_impl::mlpensemble* c_ptr();
872     alglib_impl::mlpensemble* c_ptr() const;
873 protected:
874     alglib_impl::mlpensemble *p_struct;
875 };
876 class mlpensemble : public _mlpensemble_owner
877 {
878 public:
879     mlpensemble();
880     mlpensemble(const mlpensemble &rhs);
881     mlpensemble& operator=(const mlpensemble &rhs);
882     virtual ~mlpensemble();
883 
884 };
885 #endif
886 
887 #if defined(AE_COMPILE_MLPTRAIN) || !defined(AE_PARTIAL_BUILD)
888 /*************************************************************************
889 Training report:
890     * RelCLSError   -   fraction of misclassified cases.
891     * AvgCE         -   acerage cross-entropy
892     * RMSError      -   root-mean-square error
893     * AvgError      -   average error
894     * AvgRelError   -   average relative error
895     * NGrad         -   number of gradient calculations
896     * NHess         -   number of Hessian calculations
897     * NCholesky     -   number of Cholesky decompositions
898 
899 NOTE 1: RelCLSError/AvgCE are zero on regression problems.
900 
901 NOTE 2: on classification problems  RMSError/AvgError/AvgRelError  contain
902         errors in prediction of posterior probabilities
903 *************************************************************************/
904 class _mlpreport_owner
905 {
906 public:
907     _mlpreport_owner();
908     _mlpreport_owner(const _mlpreport_owner &rhs);
909     _mlpreport_owner& operator=(const _mlpreport_owner &rhs);
910     virtual ~_mlpreport_owner();
911     alglib_impl::mlpreport* c_ptr();
912     alglib_impl::mlpreport* c_ptr() const;
913 protected:
914     alglib_impl::mlpreport *p_struct;
915 };
916 class mlpreport : public _mlpreport_owner
917 {
918 public:
919     mlpreport();
920     mlpreport(const mlpreport &rhs);
921     mlpreport& operator=(const mlpreport &rhs);
922     virtual ~mlpreport();
923     double &relclserror;
924     double &avgce;
925     double &rmserror;
926     double &avgerror;
927     double &avgrelerror;
928     ae_int_t &ngrad;
929     ae_int_t &nhess;
930     ae_int_t &ncholesky;
931 
932 };
933 
934 
935 /*************************************************************************
936 Cross-validation estimates of generalization error
937 *************************************************************************/
938 class _mlpcvreport_owner
939 {
940 public:
941     _mlpcvreport_owner();
942     _mlpcvreport_owner(const _mlpcvreport_owner &rhs);
943     _mlpcvreport_owner& operator=(const _mlpcvreport_owner &rhs);
944     virtual ~_mlpcvreport_owner();
945     alglib_impl::mlpcvreport* c_ptr();
946     alglib_impl::mlpcvreport* c_ptr() const;
947 protected:
948     alglib_impl::mlpcvreport *p_struct;
949 };
950 class mlpcvreport : public _mlpcvreport_owner
951 {
952 public:
953     mlpcvreport();
954     mlpcvreport(const mlpcvreport &rhs);
955     mlpcvreport& operator=(const mlpcvreport &rhs);
956     virtual ~mlpcvreport();
957     double &relclserror;
958     double &avgce;
959     double &rmserror;
960     double &avgerror;
961     double &avgrelerror;
962 
963 };
964 
965 
966 /*************************************************************************
967 Trainer object for neural network.
968 
969 You should not try to access fields of this object directly -  use  ALGLIB
970 functions to work with this object.
971 *************************************************************************/
972 class _mlptrainer_owner
973 {
974 public:
975     _mlptrainer_owner();
976     _mlptrainer_owner(const _mlptrainer_owner &rhs);
977     _mlptrainer_owner& operator=(const _mlptrainer_owner &rhs);
978     virtual ~_mlptrainer_owner();
979     alglib_impl::mlptrainer* c_ptr();
980     alglib_impl::mlptrainer* c_ptr() const;
981 protected:
982     alglib_impl::mlptrainer *p_struct;
983 };
984 class mlptrainer : public _mlptrainer_owner
985 {
986 public:
987     mlptrainer();
988     mlptrainer(const mlptrainer &rhs);
989     mlptrainer& operator=(const mlptrainer &rhs);
990     virtual ~mlptrainer();
991 
992 };
993 #endif
994 
995 #if defined(AE_COMPILE_CLUSTERING) || !defined(AE_PARTIAL_BUILD)
996 /*************************************************************************
997 This structure is a clusterization engine.
998 
999 You should not try to access its fields directly.
1000 Use ALGLIB functions in order to work with this object.
1001 
1002   -- ALGLIB --
1003      Copyright 10.07.2012 by Bochkanov Sergey
1004 *************************************************************************/
1005 class _clusterizerstate_owner
1006 {
1007 public:
1008     _clusterizerstate_owner();
1009     _clusterizerstate_owner(const _clusterizerstate_owner &rhs);
1010     _clusterizerstate_owner& operator=(const _clusterizerstate_owner &rhs);
1011     virtual ~_clusterizerstate_owner();
1012     alglib_impl::clusterizerstate* c_ptr();
1013     alglib_impl::clusterizerstate* c_ptr() const;
1014 protected:
1015     alglib_impl::clusterizerstate *p_struct;
1016 };
1017 class clusterizerstate : public _clusterizerstate_owner
1018 {
1019 public:
1020     clusterizerstate();
1021     clusterizerstate(const clusterizerstate &rhs);
1022     clusterizerstate& operator=(const clusterizerstate &rhs);
1023     virtual ~clusterizerstate();
1024 
1025 };
1026 
1027 
1028 /*************************************************************************
1029 This structure  is used to store results of the agglomerative hierarchical
1030 clustering (AHC).
1031 
1032 Following information is returned:
1033 
1034 * TerminationType - completion code:
1035   * 1   for successful completion of algorithm
1036   * -5  inappropriate combination of  clustering  algorithm  and  distance
1037         function was used. As for now, it  is  possible  only when  Ward's
1038         method is called for dataset with non-Euclidean distance function.
1039   In case negative completion code is returned,  other  fields  of  report
1040   structure are invalid and should not be used.
1041 
1042 * NPoints contains number of points in the original dataset
1043 
1044 * Z contains information about merges performed  (see below).  Z  contains
1045   indexes from the original (unsorted) dataset and it can be used when you
1046   need to know what points were merged. However, it is not convenient when
1047   you want to build a dendrograd (see below).
1048 
1049 * if  you  want  to  build  dendrogram, you  can use Z, but it is not good
1050   option, because Z contains  indexes from  unsorted  dataset.  Dendrogram
1051   built from such dataset is likely to have intersections. So, you have to
1052   reorder you points before building dendrogram.
1053   Permutation which reorders point is returned in P. Another representation
1054   of  merges,  which  is  more  convenient for dendorgram construction, is
1055   returned in PM.
1056 
1057 * more information on format of Z, P and PM can be found below and in the
1058   examples from ALGLIB Reference Manual.
1059 
1060 FORMAL DESCRIPTION OF FIELDS:
1061     NPoints         number of points
1062     Z               array[NPoints-1,2],  contains   indexes   of  clusters
1063                     linked in pairs to  form  clustering  tree.  I-th  row
1064                     corresponds to I-th merge:
1065                     * Z[I,0] - index of the first cluster to merge
1066                     * Z[I,1] - index of the second cluster to merge
1067                     * Z[I,0]<Z[I,1]
1068                     * clusters are  numbered  from 0 to 2*NPoints-2,  with
1069                       indexes from 0 to NPoints-1 corresponding to  points
1070                       of the original dataset, and indexes from NPoints to
1071                       2*NPoints-2  correspond  to  clusters  generated  by
1072                       subsequent  merges  (I-th  row  of Z creates cluster
1073                       with index NPoints+I).
1074 
1075                     IMPORTANT: indexes in Z[] are indexes in the ORIGINAL,
1076                     unsorted dataset. In addition to  Z algorithm  outputs
1077                     permutation which rearranges points in such  way  that
1078                     subsequent merges are  performed  on  adjacent  points
1079                     (such order is needed if you want to build dendrogram).
1080                     However,  indexes  in  Z  are  related  to   original,
1081                     unrearranged sequence of points.
1082 
1083     P               array[NPoints], permutation which reorders points  for
1084                     dendrogram  construction.  P[i] contains  index of the
1085                     position  where  we  should  move  I-th  point  of the
1086                     original dataset in order to apply merges PZ/PM.
1087 
1088     PZ              same as Z, but for permutation of points given  by  P.
1089                     The  only  thing  which  changed  are  indexes  of the
1090                     original points; indexes of clusters remained same.
1091 
1092     MergeDist       array[NPoints-1], contains distances between  clusters
1093                     being merged (MergeDist[i] correspond to merge  stored
1094                     in Z[i,...]):
1095                     * CLINK, SLINK and  average  linkage algorithms report
1096                       "raw", unmodified distance metric.
1097                     * Ward's   method   reports   weighted   intra-cluster
1098                       variance, which is equal to ||Ca-Cb||^2 * Sa*Sb/(Sa+Sb).
1099                       Here  A  and  B  are  clusters being merged, Ca is a
1100                       center of A, Cb is a center of B, Sa is a size of A,
1101                       Sb is a size of B.
1102 
1103     PM              array[NPoints-1,6], another representation of  merges,
1104                     which is suited for dendrogram construction. It  deals
1105                     with rearranged points (permutation P is applied)  and
1106                     represents merges in a form which different  from  one
1107                     used by Z.
1108                     For each I from 0 to NPoints-2, I-th row of PM represents
1109                     merge performed on two clusters C0 and C1. Here:
1110                     * C0 contains points with indexes PM[I,0]...PM[I,1]
1111                     * C1 contains points with indexes PM[I,2]...PM[I,3]
1112                     * indexes stored in PM are given for dataset sorted
1113                       according to permutation P
1114                     * PM[I,1]=PM[I,2]-1 (only adjacent clusters are merged)
1115                     * PM[I,0]<=PM[I,1], PM[I,2]<=PM[I,3], i.e. both
1116                       clusters contain at least one point
1117                     * heights of "subdendrograms" corresponding  to  C0/C1
1118                       are stored in PM[I,4]  and  PM[I,5].  Subdendrograms
1119                       corresponding   to   single-point   clusters    have
1120                       height=0. Dendrogram of the merge result has  height
1121                       H=max(H0,H1)+1.
1122 
1123 NOTE: there is one-to-one correspondence between merges described by Z and
1124       PM. I-th row of Z describes same merge of clusters as I-th row of PM,
1125       with "left" cluster from Z corresponding to the "left" one from PM.
1126 
1127   -- ALGLIB --
1128      Copyright 10.07.2012 by Bochkanov Sergey
1129 *************************************************************************/
1130 class _ahcreport_owner
1131 {
1132 public:
1133     _ahcreport_owner();
1134     _ahcreport_owner(const _ahcreport_owner &rhs);
1135     _ahcreport_owner& operator=(const _ahcreport_owner &rhs);
1136     virtual ~_ahcreport_owner();
1137     alglib_impl::ahcreport* c_ptr();
1138     alglib_impl::ahcreport* c_ptr() const;
1139 protected:
1140     alglib_impl::ahcreport *p_struct;
1141 };
1142 class ahcreport : public _ahcreport_owner
1143 {
1144 public:
1145     ahcreport();
1146     ahcreport(const ahcreport &rhs);
1147     ahcreport& operator=(const ahcreport &rhs);
1148     virtual ~ahcreport();
1149     ae_int_t &terminationtype;
1150     ae_int_t &npoints;
1151     integer_1d_array p;
1152     integer_2d_array z;
1153     integer_2d_array pz;
1154     integer_2d_array pm;
1155     real_1d_array mergedist;
1156 
1157 };
1158 
1159 
1160 /*************************************************************************
1161 This  structure   is  used  to  store  results of the  k-means  clustering
1162 algorithm.
1163 
1164 Following information is always returned:
1165 * NPoints contains number of points in the original dataset
1166 * TerminationType contains completion code, negative on failure, positive
1167   on success
1168 * K contains number of clusters
1169 
1170 For positive TerminationType we return:
1171 * NFeatures contains number of variables in the original dataset
1172 * C, which contains centers found by algorithm
1173 * CIdx, which maps points of the original dataset to clusters
1174 
1175 FORMAL DESCRIPTION OF FIELDS:
1176     NPoints         number of points, >=0
1177     NFeatures       number of variables, >=1
1178     TerminationType completion code:
1179                     * -5 if  distance  type  is  anything  different  from
1180                          Euclidean metric
1181                     * -3 for degenerate dataset: a) less  than  K  distinct
1182                          points, b) K=0 for non-empty dataset.
1183                     * +1 for successful completion
1184     K               number of clusters
1185     C               array[K,NFeatures], rows of the array store centers
1186     CIdx            array[NPoints], which contains cluster indexes
1187     IterationsCount actual number of iterations performed by clusterizer.
1188                     If algorithm performed more than one random restart,
1189                     total number of iterations is returned.
1190     Energy          merit function, "energy", sum  of  squared  deviations
1191                     from cluster centers
1192 
1193   -- ALGLIB --
1194      Copyright 27.11.2012 by Bochkanov Sergey
1195 *************************************************************************/
1196 class _kmeansreport_owner
1197 {
1198 public:
1199     _kmeansreport_owner();
1200     _kmeansreport_owner(const _kmeansreport_owner &rhs);
1201     _kmeansreport_owner& operator=(const _kmeansreport_owner &rhs);
1202     virtual ~_kmeansreport_owner();
1203     alglib_impl::kmeansreport* c_ptr();
1204     alglib_impl::kmeansreport* c_ptr() const;
1205 protected:
1206     alglib_impl::kmeansreport *p_struct;
1207 };
1208 class kmeansreport : public _kmeansreport_owner
1209 {
1210 public:
1211     kmeansreport();
1212     kmeansreport(const kmeansreport &rhs);
1213     kmeansreport& operator=(const kmeansreport &rhs);
1214     virtual ~kmeansreport();
1215     ae_int_t &npoints;
1216     ae_int_t &nfeatures;
1217     ae_int_t &terminationtype;
1218     ae_int_t &iterationscount;
1219     double &energy;
1220     ae_int_t &k;
1221     real_2d_array c;
1222     integer_1d_array cidx;
1223 
1224 };
1225 #endif
1226 
1227 #if defined(AE_COMPILE_DFOREST) || !defined(AE_PARTIAL_BUILD)
1228 /*************************************************************************
1229 A random forest (decision forest) builder object.
1230 
1231 Used to store dataset and specify random forest training algorithm settings.
1232 *************************************************************************/
1233 class _decisionforestbuilder_owner
1234 {
1235 public:
1236     _decisionforestbuilder_owner();
1237     _decisionforestbuilder_owner(const _decisionforestbuilder_owner &rhs);
1238     _decisionforestbuilder_owner& operator=(const _decisionforestbuilder_owner &rhs);
1239     virtual ~_decisionforestbuilder_owner();
1240     alglib_impl::decisionforestbuilder* c_ptr();
1241     alglib_impl::decisionforestbuilder* c_ptr() const;
1242 protected:
1243     alglib_impl::decisionforestbuilder *p_struct;
1244 };
1245 class decisionforestbuilder : public _decisionforestbuilder_owner
1246 {
1247 public:
1248     decisionforestbuilder();
1249     decisionforestbuilder(const decisionforestbuilder &rhs);
1250     decisionforestbuilder& operator=(const decisionforestbuilder &rhs);
1251     virtual ~decisionforestbuilder();
1252 
1253 };
1254 
1255 
1256 /*************************************************************************
1257 Buffer object which is used to perform  various  requests  (usually  model
1258 inference) in the multithreaded mode (multiple threads working  with  same
1259 DF object).
1260 
1261 This object should be created with DFCreateBuffer().
1262 *************************************************************************/
1263 class _decisionforestbuffer_owner
1264 {
1265 public:
1266     _decisionforestbuffer_owner();
1267     _decisionforestbuffer_owner(const _decisionforestbuffer_owner &rhs);
1268     _decisionforestbuffer_owner& operator=(const _decisionforestbuffer_owner &rhs);
1269     virtual ~_decisionforestbuffer_owner();
1270     alglib_impl::decisionforestbuffer* c_ptr();
1271     alglib_impl::decisionforestbuffer* c_ptr() const;
1272 protected:
1273     alglib_impl::decisionforestbuffer *p_struct;
1274 };
1275 class decisionforestbuffer : public _decisionforestbuffer_owner
1276 {
1277 public:
1278     decisionforestbuffer();
1279     decisionforestbuffer(const decisionforestbuffer &rhs);
1280     decisionforestbuffer& operator=(const decisionforestbuffer &rhs);
1281     virtual ~decisionforestbuffer();
1282 
1283 };
1284 
1285 
1286 /*************************************************************************
1287 Decision forest (random forest) model.
1288 *************************************************************************/
1289 class _decisionforest_owner
1290 {
1291 public:
1292     _decisionforest_owner();
1293     _decisionforest_owner(const _decisionforest_owner &rhs);
1294     _decisionforest_owner& operator=(const _decisionforest_owner &rhs);
1295     virtual ~_decisionforest_owner();
1296     alglib_impl::decisionforest* c_ptr();
1297     alglib_impl::decisionforest* c_ptr() const;
1298 protected:
1299     alglib_impl::decisionforest *p_struct;
1300 };
1301 class decisionforest : public _decisionforest_owner
1302 {
1303 public:
1304     decisionforest();
1305     decisionforest(const decisionforest &rhs);
1306     decisionforest& operator=(const decisionforest &rhs);
1307     virtual ~decisionforest();
1308 
1309 };
1310 
1311 
1312 /*************************************************************************
1313 Decision forest training report.
1314 
1315 Following fields store training set errors:
1316 * relclserror       -   fraction of misclassified cases, [0,1]
1317 * avgce             -   average cross-entropy in bits per symbol
1318 * rmserror          -   root-mean-square error
1319 * avgerror          -   average error
1320 * avgrelerror       -   average relative error
1321 
1322 Out-of-bag estimates are stored in fields with same names, but "oob" prefix.
1323 
1324 For classification problems:
1325 * RMS, AVG and AVGREL errors are calculated for posterior probabilities
1326 
1327 For regression problems:
1328 * RELCLS and AVGCE errors are zero
1329 *************************************************************************/
1330 class _dfreport_owner
1331 {
1332 public:
1333     _dfreport_owner();
1334     _dfreport_owner(const _dfreport_owner &rhs);
1335     _dfreport_owner& operator=(const _dfreport_owner &rhs);
1336     virtual ~_dfreport_owner();
1337     alglib_impl::dfreport* c_ptr();
1338     alglib_impl::dfreport* c_ptr() const;
1339 protected:
1340     alglib_impl::dfreport *p_struct;
1341 };
1342 class dfreport : public _dfreport_owner
1343 {
1344 public:
1345     dfreport();
1346     dfreport(const dfreport &rhs);
1347     dfreport& operator=(const dfreport &rhs);
1348     virtual ~dfreport();
1349     double &relclserror;
1350     double &avgce;
1351     double &rmserror;
1352     double &avgerror;
1353     double &avgrelerror;
1354     double &oobrelclserror;
1355     double &oobavgce;
1356     double &oobrmserror;
1357     double &oobavgerror;
1358     double &oobavgrelerror;
1359 
1360 };
1361 #endif
1362 
1363 #if defined(AE_COMPILE_KNN) || !defined(AE_PARTIAL_BUILD)
1364 /*************************************************************************
1365 Buffer object which is used to perform  various  requests  (usually  model
1366 inference) in the multithreaded mode (multiple threads working  with  same
1367 KNN object).
1368 
1369 This object should be created with KNNCreateBuffer().
1370 *************************************************************************/
1371 class _knnbuffer_owner
1372 {
1373 public:
1374     _knnbuffer_owner();
1375     _knnbuffer_owner(const _knnbuffer_owner &rhs);
1376     _knnbuffer_owner& operator=(const _knnbuffer_owner &rhs);
1377     virtual ~_knnbuffer_owner();
1378     alglib_impl::knnbuffer* c_ptr();
1379     alglib_impl::knnbuffer* c_ptr() const;
1380 protected:
1381     alglib_impl::knnbuffer *p_struct;
1382 };
1383 class knnbuffer : public _knnbuffer_owner
1384 {
1385 public:
1386     knnbuffer();
1387     knnbuffer(const knnbuffer &rhs);
1388     knnbuffer& operator=(const knnbuffer &rhs);
1389     virtual ~knnbuffer();
1390 
1391 };
1392 
1393 
1394 /*************************************************************************
1395 A KNN builder object; this object encapsulates  dataset  and  all  related
1396 settings, it is used to create an actual instance of KNN model.
1397 *************************************************************************/
1398 class _knnbuilder_owner
1399 {
1400 public:
1401     _knnbuilder_owner();
1402     _knnbuilder_owner(const _knnbuilder_owner &rhs);
1403     _knnbuilder_owner& operator=(const _knnbuilder_owner &rhs);
1404     virtual ~_knnbuilder_owner();
1405     alglib_impl::knnbuilder* c_ptr();
1406     alglib_impl::knnbuilder* c_ptr() const;
1407 protected:
1408     alglib_impl::knnbuilder *p_struct;
1409 };
1410 class knnbuilder : public _knnbuilder_owner
1411 {
1412 public:
1413     knnbuilder();
1414     knnbuilder(const knnbuilder &rhs);
1415     knnbuilder& operator=(const knnbuilder &rhs);
1416     virtual ~knnbuilder();
1417 
1418 };
1419 
1420 
1421 /*************************************************************************
1422 KNN model, can be used for classification or regression
1423 *************************************************************************/
1424 class _knnmodel_owner
1425 {
1426 public:
1427     _knnmodel_owner();
1428     _knnmodel_owner(const _knnmodel_owner &rhs);
1429     _knnmodel_owner& operator=(const _knnmodel_owner &rhs);
1430     virtual ~_knnmodel_owner();
1431     alglib_impl::knnmodel* c_ptr();
1432     alglib_impl::knnmodel* c_ptr() const;
1433 protected:
1434     alglib_impl::knnmodel *p_struct;
1435 };
1436 class knnmodel : public _knnmodel_owner
1437 {
1438 public:
1439     knnmodel();
1440     knnmodel(const knnmodel &rhs);
1441     knnmodel& operator=(const knnmodel &rhs);
1442     virtual ~knnmodel();
1443 
1444 };
1445 
1446 
1447 /*************************************************************************
1448 KNN training report.
1449 
1450 Following fields store training set errors:
1451 * relclserror       -   fraction of misclassified cases, [0,1]
1452 * avgce             -   average cross-entropy in bits per symbol
1453 * rmserror          -   root-mean-square error
1454 * avgerror          -   average error
1455 * avgrelerror       -   average relative error
1456 
1457 For classification problems:
1458 * RMS, AVG and AVGREL errors are calculated for posterior probabilities
1459 
1460 For regression problems:
1461 * RELCLS and AVGCE errors are zero
1462 *************************************************************************/
1463 class _knnreport_owner
1464 {
1465 public:
1466     _knnreport_owner();
1467     _knnreport_owner(const _knnreport_owner &rhs);
1468     _knnreport_owner& operator=(const _knnreport_owner &rhs);
1469     virtual ~_knnreport_owner();
1470     alglib_impl::knnreport* c_ptr();
1471     alglib_impl::knnreport* c_ptr() const;
1472 protected:
1473     alglib_impl::knnreport *p_struct;
1474 };
1475 class knnreport : public _knnreport_owner
1476 {
1477 public:
1478     knnreport();
1479     knnreport(const knnreport &rhs);
1480     knnreport& operator=(const knnreport &rhs);
1481     virtual ~knnreport();
1482     double &relclserror;
1483     double &avgce;
1484     double &rmserror;
1485     double &avgerror;
1486     double &avgrelerror;
1487 
1488 };
1489 #endif
1490 
1491 #if defined(AE_COMPILE_DATACOMP) || !defined(AE_PARTIAL_BUILD)
1492 
1493 #endif
1494 
1495 #if defined(AE_COMPILE_PCA) || !defined(AE_PARTIAL_BUILD)
1496 /*************************************************************************
1497 Principal components analysis
1498 
1499 This function builds orthogonal basis  where  first  axis  corresponds  to
1500 direction with maximum variance, second axis  maximizes  variance  in  the
1501 subspace orthogonal to first axis and so on.
1502 
1503 This function builds FULL basis, i.e. returns N vectors  corresponding  to
1504 ALL directions, no matter how informative. If you need  just a  few  (say,
1505 10 or 50) of the most important directions, you may find it faster to  use
1506 one of the reduced versions:
1507 * pcatruncatedsubspace() - for subspace iteration based method
1508 
1509 It should be noted that, unlike LDA, PCA does not use class labels.
1510 
1511   ! COMMERCIAL EDITION OF ALGLIB:
1512   !
1513   ! Commercial Edition of ALGLIB includes following important improvements
1514   ! of this function:
1515   ! * high-performance native backend with same C# interface (C# version)
1516   ! * multithreading support (C++ and C# versions)
1517   ! * hardware vendor (Intel) implementations of linear algebra primitives
1518   !   (C++ and C# versions, x86/x64 platform)
1519   !
1520   ! We recommend you to read 'Working with commercial version' section  of
1521   ! ALGLIB Reference Manual in order to find out how to  use  performance-
1522   ! related features provided by commercial edition of ALGLIB.
1523 
1524 INPUT PARAMETERS:
1525     X           -   dataset, array[0..NPoints-1,0..NVars-1].
1526                     matrix contains ONLY INDEPENDENT VARIABLES.
1527     NPoints     -   dataset size, NPoints>=0
1528     NVars       -   number of independent variables, NVars>=1
1529 
1530 OUTPUT PARAMETERS:
1531     Info        -   return code:
1532                     * -4, if SVD subroutine haven't converged
1533                     * -1, if wrong parameters has been passed (NPoints<0,
1534                           NVars<1)
1535                     *  1, if task is solved
1536     S2          -   array[0..NVars-1]. variance values corresponding
1537                     to basis vectors.
1538     V           -   array[0..NVars-1,0..NVars-1]
1539                     matrix, whose columns store basis vectors.
1540 
1541   -- ALGLIB --
1542      Copyright 25.08.2008 by Bochkanov Sergey
1543 *************************************************************************/
1544 void pcabuildbasis(const real_2d_array &x, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, real_1d_array &s2, real_2d_array &v, const xparams _xparams = alglib::xdefault);
1545 
1546 
1547 /*************************************************************************
1548 Principal components analysis
1549 
1550 This function performs truncated PCA, i.e. returns just a few most important
1551 directions.
1552 
1553 Internally it uses iterative eigensolver which is very efficient when only
1554 a minor fraction of full basis is required. Thus, if you need full  basis,
1555 it is better to use pcabuildbasis() function.
1556 
1557 It should be noted that, unlike LDA, PCA does not use class labels.
1558 
1559   ! COMMERCIAL EDITION OF ALGLIB:
1560   !
1561   ! Commercial Edition of ALGLIB includes following important improvements
1562   ! of this function:
1563   ! * high-performance native backend with same C# interface (C# version)
1564   ! * multithreading support (C++ and C# versions)
1565   ! * hardware vendor (Intel) implementations of linear algebra primitives
1566   !   (C++ and C# versions, x86/x64 platform)
1567   !
1568   ! We recommend you to read 'Working with commercial version' section  of
1569   ! ALGLIB Reference Manual in order to find out how to  use  performance-
1570   ! related features provided by commercial edition of ALGLIB.
1571 
1572 INPUT PARAMETERS:
1573     X           -   dataset, array[0..NPoints-1,0..NVars-1].
1574                     matrix contains ONLY INDEPENDENT VARIABLES.
1575     NPoints     -   dataset size, NPoints>=0
1576     NVars       -   number of independent variables, NVars>=1
1577     NNeeded     -   number of requested components, in [1,NVars] range;
1578                     this function is efficient only for NNeeded<<NVars.
1579     Eps         -   desired  precision  of  vectors  returned;  underlying
1580                     solver will stop iterations as soon as absolute  error
1581                     in corresponding singular values  reduces  to  roughly
1582                     eps*MAX(lambda[]), with lambda[] being array of  eigen
1583                     values.
1584                     Zero value means that  algorithm  performs  number  of
1585                     iterations  specified  by  maxits  parameter,  without
1586                     paying attention to precision.
1587     MaxIts      -   number of iterations performed by  subspace  iteration
1588                     method. Zero value means that no  limit  on  iteration
1589                     count is placed (eps-based stopping condition is used).
1590 
1591 
1592 OUTPUT PARAMETERS:
1593     S2          -   array[NNeeded]. Variance values corresponding
1594                     to basis vectors.
1595     V           -   array[NVars,NNeeded]
1596                     matrix, whose columns store basis vectors.
1597 
1598 NOTE: passing eps=0 and maxits=0 results in small eps  being  selected  as
1599 stopping condition. Exact value of automatically selected eps is  version-
1600 -dependent.
1601 
1602   -- ALGLIB --
1603      Copyright 10.01.2017 by Bochkanov Sergey
1604 *************************************************************************/
1605 void pcatruncatedsubspace(const real_2d_array &x, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nneeded, const double eps, const ae_int_t maxits, real_1d_array &s2, real_2d_array &v, const xparams _xparams = alglib::xdefault);
1606 
1607 
1608 /*************************************************************************
1609 Sparse truncated principal components analysis
1610 
1611 This function performs sparse truncated PCA, i.e. returns just a few  most
1612 important principal components for a sparse input X.
1613 
1614 Internally it uses iterative eigensolver which is very efficient when only
1615 a minor fraction of full basis is required.
1616 
1617 It should be noted that, unlike LDA, PCA does not use class labels.
1618 
1619   ! COMMERCIAL EDITION OF ALGLIB:
1620   !
1621   ! Commercial Edition of ALGLIB includes following important improvements
1622   ! of this function:
1623   ! * high-performance native backend with same C# interface (C# version)
1624   ! * multithreading support (C++ and C# versions)
1625   ! * hardware vendor (Intel) implementations of linear algebra primitives
1626   !   (C++ and C# versions, x86/x64 platform)
1627   !
1628   ! We recommend you to read 'Working with commercial version' section  of
1629   ! ALGLIB Reference Manual in order to find out how to  use  performance-
1630   ! related features provided by commercial edition of ALGLIB.
1631 
1632 INPUT PARAMETERS:
1633     X           -   sparse dataset, sparse  npoints*nvars  matrix.  It  is
1634                     recommended to use CRS sparse storage format;  non-CRS
1635                     input will be internally converted to CRS.
1636                     Matrix contains ONLY INDEPENDENT VARIABLES,  and  must
1637                     be EXACTLY npoints*nvars.
1638     NPoints     -   dataset size, NPoints>=0
1639     NVars       -   number of independent variables, NVars>=1
1640     NNeeded     -   number of requested components, in [1,NVars] range;
1641                     this function is efficient only for NNeeded<<NVars.
1642     Eps         -   desired  precision  of  vectors  returned;  underlying
1643                     solver will stop iterations as soon as absolute  error
1644                     in corresponding singular values  reduces  to  roughly
1645                     eps*MAX(lambda[]), with lambda[] being array of  eigen
1646                     values.
1647                     Zero value means that  algorithm  performs  number  of
1648                     iterations  specified  by  maxits  parameter,  without
1649                     paying attention to precision.
1650     MaxIts      -   number of iterations performed by  subspace  iteration
1651                     method. Zero value means that no  limit  on  iteration
1652                     count is placed (eps-based stopping condition is used).
1653 
1654 
1655 OUTPUT PARAMETERS:
1656     S2          -   array[NNeeded]. Variance values corresponding
1657                     to basis vectors.
1658     V           -   array[NVars,NNeeded]
1659                     matrix, whose columns store basis vectors.
1660 
1661 NOTE: passing eps=0 and maxits=0 results in small eps  being  selected  as
1662       a stopping condition. Exact value of automatically selected  eps  is
1663       version-dependent.
1664 
1665 NOTE: zero  MaxIts  is  silently  replaced  by some reasonable value which
1666       prevents eternal loops (possible when inputs are degenerate and  too
1667       stringent stopping criteria are specified). In  current  version  it
1668       is 50+2*NVars.
1669 
1670   -- ALGLIB --
1671      Copyright 10.01.2017 by Bochkanov Sergey
1672 *************************************************************************/
1673 void pcatruncatedsubspacesparse(const sparsematrix &x, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nneeded, const double eps, const ae_int_t maxits, real_1d_array &s2, real_2d_array &v, const xparams _xparams = alglib::xdefault);
1674 #endif
1675 
1676 #if defined(AE_COMPILE_BDSS) || !defined(AE_PARTIAL_BUILD)
1677 /*************************************************************************
1678 Optimal binary classification
1679 
1680 Algorithms finds optimal (=with minimal cross-entropy) binary partition.
1681 Internal subroutine.
1682 
1683 INPUT PARAMETERS:
1684     A       -   array[0..N-1], variable
1685     C       -   array[0..N-1], class numbers (0 or 1).
1686     N       -   array size
1687 
1688 OUTPUT PARAMETERS:
1689     Info    -   completetion code:
1690                 * -3, all values of A[] are same (partition is impossible)
1691                 * -2, one of C[] is incorrect (<0, >1)
1692                 * -1, incorrect pararemets were passed (N<=0).
1693                 *  1, OK
1694     Threshold-  partiton boundary. Left part contains values which are
1695                 strictly less than Threshold. Right part contains values
1696                 which are greater than or equal to Threshold.
1697     PAL, PBL-   probabilities P(0|v<Threshold) and P(1|v<Threshold)
1698     PAR, PBR-   probabilities P(0|v>=Threshold) and P(1|v>=Threshold)
1699     CVE     -   cross-validation estimate of cross-entropy
1700 
1701   -- ALGLIB --
1702      Copyright 22.05.2008 by Bochkanov Sergey
1703 *************************************************************************/
1704 void dsoptimalsplit2(const real_1d_array &a, const integer_1d_array &c, const ae_int_t n, ae_int_t &info, double &threshold, double &pal, double &pbl, double &par, double &pbr, double &cve, const xparams _xparams = alglib::xdefault);
1705 
1706 
1707 /*************************************************************************
1708 Optimal partition, internal subroutine. Fast version.
1709 
1710 Accepts:
1711     A       array[0..N-1]       array of attributes     array[0..N-1]
1712     C       array[0..N-1]       array of class labels
1713     TiesBuf array[0..N]         temporaries (ties)
1714     CntBuf  array[0..2*NC-1]    temporaries (counts)
1715     Alpha                       centering factor (0<=alpha<=1, recommended value - 0.05)
1716     BufR    array[0..N-1]       temporaries
1717     BufI    array[0..N-1]       temporaries
1718 
1719 Output:
1720     Info    error code (">0"=OK, "<0"=bad)
1721     RMS     training set RMS error
1722     CVRMS   leave-one-out RMS error
1723 
1724 Note:
1725     content of all arrays is changed by subroutine;
1726     it doesn't allocate temporaries.
1727 
1728   -- ALGLIB --
1729      Copyright 11.12.2008 by Bochkanov Sergey
1730 *************************************************************************/
1731 void dsoptimalsplit2fast(real_1d_array &a, integer_1d_array &c, integer_1d_array &tiesbuf, integer_1d_array &cntbuf, real_1d_array &bufr, integer_1d_array &bufi, const ae_int_t n, const ae_int_t nc, const double alpha, ae_int_t &info, double &threshold, double &rms, double &cvrms, const xparams _xparams = alglib::xdefault);
1732 #endif
1733 
1734 #if defined(AE_COMPILE_MLPBASE) || !defined(AE_PARTIAL_BUILD)
1735 /*************************************************************************
1736 This function serializes data structure to string.
1737 
1738 Important properties of s_out:
1739 * it contains alphanumeric characters, dots, underscores, minus signs
1740 * these symbols are grouped into words, which are separated by spaces
1741   and Windows-style (CR+LF) newlines
1742 * although  serializer  uses  spaces and CR+LF as separators, you can
1743   replace any separator character by arbitrary combination of spaces,
1744   tabs, Windows or Unix newlines. It allows flexible reformatting  of
1745   the  string  in  case you want to include it into text or XML file.
1746   But you should not insert separators into the middle of the "words"
1747   nor you should change case of letters.
1748 * s_out can be freely moved between 32-bit and 64-bit systems, little
1749   and big endian machines, and so on. You can serialize structure  on
1750   32-bit machine and unserialize it on 64-bit one (or vice versa), or
1751   serialize  it  on  SPARC  and  unserialize  on  x86.  You  can also
1752   serialize  it  in  C++ version of ALGLIB and unserialize in C# one,
1753   and vice versa.
1754 *************************************************************************/
1755 void mlpserialize(multilayerperceptron &obj, std::string &s_out);
1756 
1757 
1758 /*************************************************************************
1759 This function unserializes data structure from string.
1760 *************************************************************************/
1761 void mlpunserialize(const std::string &s_in, multilayerperceptron &obj);
1762 
1763 
1764 
1765 
1766 /*************************************************************************
1767 This function serializes data structure to C++ stream.
1768 
1769 Data stream generated by this function is same as  string  representation
1770 generated  by  string  version  of  serializer - alphanumeric characters,
1771 dots, underscores, minus signs, which are grouped into words separated by
1772 spaces and CR+LF.
1773 
1774 We recommend you to read comments on string version of serializer to find
1775 out more about serialization of AlGLIB objects.
1776 *************************************************************************/
1777 void mlpserialize(multilayerperceptron &obj, std::ostream &s_out);
1778 
1779 
1780 /*************************************************************************
1781 This function unserializes data structure from stream.
1782 *************************************************************************/
1783 void mlpunserialize(const std::istream &s_in, multilayerperceptron &obj);
1784 
1785 
1786 /*************************************************************************
1787 Creates  neural  network  with  NIn  inputs,  NOut outputs, without hidden
1788 layers, with linear output layer. Network weights are  filled  with  small
1789 random values.
1790 
1791   -- ALGLIB --
1792      Copyright 04.11.2007 by Bochkanov Sergey
1793 *************************************************************************/
1794 void mlpcreate0(const ae_int_t nin, const ae_int_t nout, multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
1795 
1796 
1797 /*************************************************************************
1798 Same  as  MLPCreate0,  but  with  one  hidden  layer  (NHid  neurons) with
1799 non-linear activation function. Output layer is linear.
1800 
1801   -- ALGLIB --
1802      Copyright 04.11.2007 by Bochkanov Sergey
1803 *************************************************************************/
1804 void mlpcreate1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
1805 
1806 
1807 /*************************************************************************
1808 Same as MLPCreate0, but with two hidden layers (NHid1 and  NHid2  neurons)
1809 with non-linear activation function. Output layer is linear.
1810  $ALL
1811 
1812   -- ALGLIB --
1813      Copyright 04.11.2007 by Bochkanov Sergey
1814 *************************************************************************/
1815 void mlpcreate2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
1816 
1817 
1818 /*************************************************************************
1819 Creates  neural  network  with  NIn  inputs,  NOut outputs, without hidden
1820 layers with non-linear output layer. Network weights are filled with small
1821 random values.
1822 
1823 Activation function of the output layer takes values:
1824 
1825     (B, +INF), if D>=0
1826 
1827 or
1828 
1829     (-INF, B), if D<0.
1830 
1831 
1832   -- ALGLIB --
1833      Copyright 30.03.2008 by Bochkanov Sergey
1834 *************************************************************************/
1835 void mlpcreateb0(const ae_int_t nin, const ae_int_t nout, const double b, const double d, multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
1836 
1837 
1838 /*************************************************************************
1839 Same as MLPCreateB0 but with non-linear hidden layer.
1840 
1841   -- ALGLIB --
1842      Copyright 30.03.2008 by Bochkanov Sergey
1843 *************************************************************************/
1844 void mlpcreateb1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double b, const double d, multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
1845 
1846 
1847 /*************************************************************************
1848 Same as MLPCreateB0 but with two non-linear hidden layers.
1849 
1850   -- ALGLIB --
1851      Copyright 30.03.2008 by Bochkanov Sergey
1852 *************************************************************************/
1853 void mlpcreateb2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double b, const double d, multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
1854 
1855 
1856 /*************************************************************************
1857 Creates  neural  network  with  NIn  inputs,  NOut outputs, without hidden
1858 layers with non-linear output layer. Network weights are filled with small
1859 random values. Activation function of the output layer takes values [A,B].
1860 
1861   -- ALGLIB --
1862      Copyright 30.03.2008 by Bochkanov Sergey
1863 *************************************************************************/
1864 void mlpcreater0(const ae_int_t nin, const ae_int_t nout, const double a, const double b, multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
1865 
1866 
1867 /*************************************************************************
1868 Same as MLPCreateR0, but with non-linear hidden layer.
1869 
1870   -- ALGLIB --
1871      Copyright 30.03.2008 by Bochkanov Sergey
1872 *************************************************************************/
1873 void mlpcreater1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double a, const double b, multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
1874 
1875 
1876 /*************************************************************************
1877 Same as MLPCreateR0, but with two non-linear hidden layers.
1878 
1879   -- ALGLIB --
1880      Copyright 30.03.2008 by Bochkanov Sergey
1881 *************************************************************************/
1882 void mlpcreater2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double a, const double b, multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
1883 
1884 
1885 /*************************************************************************
1886 Creates classifier network with NIn  inputs  and  NOut  possible  classes.
1887 Network contains no hidden layers and linear output  layer  with  SOFTMAX-
1888 normalization  (so  outputs  sums  up  to  1.0  and  converge to posterior
1889 probabilities).
1890 
1891   -- ALGLIB --
1892      Copyright 04.11.2007 by Bochkanov Sergey
1893 *************************************************************************/
1894 void mlpcreatec0(const ae_int_t nin, const ae_int_t nout, multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
1895 
1896 
1897 /*************************************************************************
1898 Same as MLPCreateC0, but with one non-linear hidden layer.
1899 
1900   -- ALGLIB --
1901      Copyright 04.11.2007 by Bochkanov Sergey
1902 *************************************************************************/
1903 void mlpcreatec1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
1904 
1905 
1906 /*************************************************************************
1907 Same as MLPCreateC0, but with two non-linear hidden layers.
1908 
1909   -- ALGLIB --
1910      Copyright 04.11.2007 by Bochkanov Sergey
1911 *************************************************************************/
1912 void mlpcreatec2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
1913 
1914 
1915 /*************************************************************************
1916 Copying of neural network
1917 
1918 INPUT PARAMETERS:
1919     Network1 -   original
1920 
1921 OUTPUT PARAMETERS:
1922     Network2 -   copy
1923 
1924   -- ALGLIB --
1925      Copyright 04.11.2007 by Bochkanov Sergey
1926 *************************************************************************/
1927 void mlpcopy(const multilayerperceptron &network1, multilayerperceptron &network2, const xparams _xparams = alglib::xdefault);
1928 
1929 
1930 /*************************************************************************
1931 This function copies tunable  parameters (weights/means/sigmas)  from  one
1932 network to another with same architecture. It  performs  some  rudimentary
1933 checks that architectures are same, and throws exception if check fails.
1934 
1935 It is intended for fast copying of states between two  network  which  are
1936 known to have same geometry.
1937 
1938 INPUT PARAMETERS:
1939     Network1 -   source, must be correctly initialized
1940     Network2 -   target, must have same architecture
1941 
1942 OUTPUT PARAMETERS:
1943     Network2 -   network state is copied from source to target
1944 
1945   -- ALGLIB --
1946      Copyright 20.06.2013 by Bochkanov Sergey
1947 *************************************************************************/
1948 void mlpcopytunableparameters(const multilayerperceptron &network1, const multilayerperceptron &network2, const xparams _xparams = alglib::xdefault);
1949 
1950 
1951 /*************************************************************************
1952 Randomization of neural network weights
1953 
1954   -- ALGLIB --
1955      Copyright 06.11.2007 by Bochkanov Sergey
1956 *************************************************************************/
1957 void mlprandomize(const multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
1958 
1959 
1960 /*************************************************************************
1961 Randomization of neural network weights and standartisator
1962 
1963   -- ALGLIB --
1964      Copyright 10.03.2008 by Bochkanov Sergey
1965 *************************************************************************/
1966 void mlprandomizefull(const multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
1967 
1968 
1969 /*************************************************************************
1970 Internal subroutine.
1971 
1972   -- ALGLIB --
1973      Copyright 30.03.2008 by Bochkanov Sergey
1974 *************************************************************************/
1975 void mlpinitpreprocessor(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, const xparams _xparams = alglib::xdefault);
1976 
1977 
1978 /*************************************************************************
1979 Returns information about initialized network: number of inputs, outputs,
1980 weights.
1981 
1982   -- ALGLIB --
1983      Copyright 04.11.2007 by Bochkanov Sergey
1984 *************************************************************************/
1985 void mlpproperties(const multilayerperceptron &network, ae_int_t &nin, ae_int_t &nout, ae_int_t &wcount, const xparams _xparams = alglib::xdefault);
1986 
1987 
1988 /*************************************************************************
1989 Returns number of inputs.
1990 
1991   -- ALGLIB --
1992      Copyright 19.10.2011 by Bochkanov Sergey
1993 *************************************************************************/
1994 ae_int_t mlpgetinputscount(const multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
1995 
1996 
1997 /*************************************************************************
1998 Returns number of outputs.
1999 
2000   -- ALGLIB --
2001      Copyright 19.10.2011 by Bochkanov Sergey
2002 *************************************************************************/
2003 ae_int_t mlpgetoutputscount(const multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
2004 
2005 
2006 /*************************************************************************
2007 Returns number of weights.
2008 
2009   -- ALGLIB --
2010      Copyright 19.10.2011 by Bochkanov Sergey
2011 *************************************************************************/
2012 ae_int_t mlpgetweightscount(const multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
2013 
2014 
2015 /*************************************************************************
2016 Tells whether network is SOFTMAX-normalized (i.e. classifier) or not.
2017 
2018   -- ALGLIB --
2019      Copyright 04.11.2007 by Bochkanov Sergey
2020 *************************************************************************/
2021 bool mlpissoftmax(const multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
2022 
2023 
2024 /*************************************************************************
2025 This function returns total number of layers (including input, hidden and
2026 output layers).
2027 
2028   -- ALGLIB --
2029      Copyright 25.03.2011 by Bochkanov Sergey
2030 *************************************************************************/
2031 ae_int_t mlpgetlayerscount(const multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
2032 
2033 
2034 /*************************************************************************
2035 This function returns size of K-th layer.
2036 
2037 K=0 corresponds to input layer, K=CNT-1 corresponds to output layer.
2038 
2039 Size of the output layer is always equal to the number of outputs, although
2040 when we have softmax-normalized network, last neuron doesn't have any
2041 connections - it is just zero.
2042 
2043   -- ALGLIB --
2044      Copyright 25.03.2011 by Bochkanov Sergey
2045 *************************************************************************/
2046 ae_int_t mlpgetlayersize(const multilayerperceptron &network, const ae_int_t k, const xparams _xparams = alglib::xdefault);
2047 
2048 
2049 /*************************************************************************
2050 This function returns offset/scaling coefficients for I-th input of the
2051 network.
2052 
2053 INPUT PARAMETERS:
2054     Network     -   network
2055     I           -   input index
2056 
2057 OUTPUT PARAMETERS:
2058     Mean        -   mean term
2059     Sigma       -   sigma term, guaranteed to be nonzero.
2060 
2061 I-th input is passed through linear transformation
2062     IN[i] = (IN[i]-Mean)/Sigma
2063 before feeding to the network
2064 
2065   -- ALGLIB --
2066      Copyright 25.03.2011 by Bochkanov Sergey
2067 *************************************************************************/
2068 void mlpgetinputscaling(const multilayerperceptron &network, const ae_int_t i, double &mean, double &sigma, const xparams _xparams = alglib::xdefault);
2069 
2070 
2071 /*************************************************************************
2072 This function returns offset/scaling coefficients for I-th output of the
2073 network.
2074 
2075 INPUT PARAMETERS:
2076     Network     -   network
2077     I           -   input index
2078 
2079 OUTPUT PARAMETERS:
2080     Mean        -   mean term
2081     Sigma       -   sigma term, guaranteed to be nonzero.
2082 
2083 I-th output is passed through linear transformation
2084     OUT[i] = OUT[i]*Sigma+Mean
2085 before returning it to user. In case we have SOFTMAX-normalized network,
2086 we return (Mean,Sigma)=(0.0,1.0).
2087 
2088   -- ALGLIB --
2089      Copyright 25.03.2011 by Bochkanov Sergey
2090 *************************************************************************/
2091 void mlpgetoutputscaling(const multilayerperceptron &network, const ae_int_t i, double &mean, double &sigma, const xparams _xparams = alglib::xdefault);
2092 
2093 
2094 /*************************************************************************
2095 This function returns information about Ith neuron of Kth layer
2096 
2097 INPUT PARAMETERS:
2098     Network     -   network
2099     K           -   layer index
2100     I           -   neuron index (within layer)
2101 
2102 OUTPUT PARAMETERS:
2103     FKind       -   activation function type (used by MLPActivationFunction())
2104                     this value is zero for input or linear neurons
2105     Threshold   -   also called offset, bias
2106                     zero for input neurons
2107 
2108 NOTE: this function throws exception if layer or neuron with  given  index
2109 do not exists.
2110 
2111   -- ALGLIB --
2112      Copyright 25.03.2011 by Bochkanov Sergey
2113 *************************************************************************/
2114 void mlpgetneuroninfo(const multilayerperceptron &network, const ae_int_t k, const ae_int_t i, ae_int_t &fkind, double &threshold, const xparams _xparams = alglib::xdefault);
2115 
2116 
2117 /*************************************************************************
2118 This function returns information about connection from I0-th neuron of
2119 K0-th layer to I1-th neuron of K1-th layer.
2120 
2121 INPUT PARAMETERS:
2122     Network     -   network
2123     K0          -   layer index
2124     I0          -   neuron index (within layer)
2125     K1          -   layer index
2126     I1          -   neuron index (within layer)
2127 
2128 RESULT:
2129     connection weight (zero for non-existent connections)
2130 
2131 This function:
2132 1. throws exception if layer or neuron with given index do not exists.
2133 2. returns zero if neurons exist, but there is no connection between them
2134 
2135   -- ALGLIB --
2136      Copyright 25.03.2011 by Bochkanov Sergey
2137 *************************************************************************/
2138 double mlpgetweight(const multilayerperceptron &network, const ae_int_t k0, const ae_int_t i0, const ae_int_t k1, const ae_int_t i1, const xparams _xparams = alglib::xdefault);
2139 
2140 
2141 /*************************************************************************
2142 This function sets offset/scaling coefficients for I-th input of the
2143 network.
2144 
2145 INPUT PARAMETERS:
2146     Network     -   network
2147     I           -   input index
2148     Mean        -   mean term
2149     Sigma       -   sigma term (if zero, will be replaced by 1.0)
2150 
2151 NTE: I-th input is passed through linear transformation
2152     IN[i] = (IN[i]-Mean)/Sigma
2153 before feeding to the network. This function sets Mean and Sigma.
2154 
2155   -- ALGLIB --
2156      Copyright 25.03.2011 by Bochkanov Sergey
2157 *************************************************************************/
2158 void mlpsetinputscaling(const multilayerperceptron &network, const ae_int_t i, const double mean, const double sigma, const xparams _xparams = alglib::xdefault);
2159 
2160 
2161 /*************************************************************************
2162 This function sets offset/scaling coefficients for I-th output of the
2163 network.
2164 
2165 INPUT PARAMETERS:
2166     Network     -   network
2167     I           -   input index
2168     Mean        -   mean term
2169     Sigma       -   sigma term (if zero, will be replaced by 1.0)
2170 
2171 OUTPUT PARAMETERS:
2172 
2173 NOTE: I-th output is passed through linear transformation
2174     OUT[i] = OUT[i]*Sigma+Mean
2175 before returning it to user. This function sets Sigma/Mean. In case we
2176 have SOFTMAX-normalized network, you can not set (Sigma,Mean) to anything
2177 other than(0.0,1.0) - this function will throw exception.
2178 
2179   -- ALGLIB --
2180      Copyright 25.03.2011 by Bochkanov Sergey
2181 *************************************************************************/
2182 void mlpsetoutputscaling(const multilayerperceptron &network, const ae_int_t i, const double mean, const double sigma, const xparams _xparams = alglib::xdefault);
2183 
2184 
2185 /*************************************************************************
2186 This function modifies information about Ith neuron of Kth layer
2187 
2188 INPUT PARAMETERS:
2189     Network     -   network
2190     K           -   layer index
2191     I           -   neuron index (within layer)
2192     FKind       -   activation function type (used by MLPActivationFunction())
2193                     this value must be zero for input neurons
2194                     (you can not set activation function for input neurons)
2195     Threshold   -   also called offset, bias
2196                     this value must be zero for input neurons
2197                     (you can not set threshold for input neurons)
2198 
2199 NOTES:
2200 1. this function throws exception if layer or neuron with given index do
2201    not exists.
2202 2. this function also throws exception when you try to set non-linear
2203    activation function for input neurons (any kind of network) or for output
2204    neurons of classifier network.
2205 3. this function throws exception when you try to set non-zero threshold for
2206    input neurons (any kind of network).
2207 
2208   -- ALGLIB --
2209      Copyright 25.03.2011 by Bochkanov Sergey
2210 *************************************************************************/
2211 void mlpsetneuroninfo(const multilayerperceptron &network, const ae_int_t k, const ae_int_t i, const ae_int_t fkind, const double threshold, const xparams _xparams = alglib::xdefault);
2212 
2213 
2214 /*************************************************************************
2215 This function modifies information about connection from I0-th neuron of
2216 K0-th layer to I1-th neuron of K1-th layer.
2217 
2218 INPUT PARAMETERS:
2219     Network     -   network
2220     K0          -   layer index
2221     I0          -   neuron index (within layer)
2222     K1          -   layer index
2223     I1          -   neuron index (within layer)
2224     W           -   connection weight (must be zero for non-existent
2225                     connections)
2226 
2227 This function:
2228 1. throws exception if layer or neuron with given index do not exists.
2229 2. throws exception if you try to set non-zero weight for non-existent
2230    connection
2231 
2232   -- ALGLIB --
2233      Copyright 25.03.2011 by Bochkanov Sergey
2234 *************************************************************************/
2235 void mlpsetweight(const multilayerperceptron &network, const ae_int_t k0, const ae_int_t i0, const ae_int_t k1, const ae_int_t i1, const double w, const xparams _xparams = alglib::xdefault);
2236 
2237 
2238 /*************************************************************************
2239 Neural network activation function
2240 
2241 INPUT PARAMETERS:
2242     NET         -   neuron input
2243     K           -   function index (zero for linear function)
2244 
2245 OUTPUT PARAMETERS:
2246     F           -   function
2247     DF          -   its derivative
2248     D2F         -   its second derivative
2249 
2250   -- ALGLIB --
2251      Copyright 04.11.2007 by Bochkanov Sergey
2252 *************************************************************************/
2253 void mlpactivationfunction(const double net, const ae_int_t k, double &f, double &df, double &d2f, const xparams _xparams = alglib::xdefault);
2254 
2255 
2256 /*************************************************************************
2257 Procesing
2258 
2259 INPUT PARAMETERS:
2260     Network -   neural network
2261     X       -   input vector,  array[0..NIn-1].
2262 
2263 OUTPUT PARAMETERS:
2264     Y       -   result. Regression estimate when solving regression  task,
2265                 vector of posterior probabilities for classification task.
2266 
2267 See also MLPProcessI
2268 
2269   -- ALGLIB --
2270      Copyright 04.11.2007 by Bochkanov Sergey
2271 *************************************************************************/
2272 void mlpprocess(const multilayerperceptron &network, const real_1d_array &x, real_1d_array &y, const xparams _xparams = alglib::xdefault);
2273 
2274 
2275 /*************************************************************************
2276 'interactive'  variant  of  MLPProcess  for  languages  like  Python which
2277 support constructs like "Y = MLPProcess(NN,X)" and interactive mode of the
2278 interpreter
2279 
2280 This function allocates new array on each call,  so  it  is  significantly
2281 slower than its 'non-interactive' counterpart, but it is  more  convenient
2282 when you call it from command line.
2283 
2284   -- ALGLIB --
2285      Copyright 21.09.2010 by Bochkanov Sergey
2286 *************************************************************************/
2287 void mlpprocessi(const multilayerperceptron &network, const real_1d_array &x, real_1d_array &y, const xparams _xparams = alglib::xdefault);
2288 
2289 
2290 /*************************************************************************
2291 Error of the neural network on dataset.
2292 
2293   ! COMMERCIAL EDITION OF ALGLIB:
2294   !
2295   ! Commercial Edition of ALGLIB includes following important improvements
2296   ! of this function:
2297   ! * high-performance native backend with same C# interface (C# version)
2298   ! * multithreading support (C++ and C# versions)
2299   !
2300   ! We recommend you to read 'Working with commercial version' section  of
2301   ! ALGLIB Reference Manual in order to find out how to  use  performance-
2302   ! related features provided by commercial edition of ALGLIB.
2303 
2304 INPUT PARAMETERS:
2305     Network     -   neural network;
2306     XY          -   training  set,  see  below  for  information  on   the
2307                     training set format;
2308     NPoints     -   points count.
2309 
2310 RESULT:
2311     sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2)
2312 
2313 DATASET FORMAT:
2314 
2315 This  function  uses  two  different  dataset formats - one for regression
2316 networks, another one for classification networks.
2317 
2318 For regression networks with NIn inputs and NOut outputs following dataset
2319 format is used:
2320 * dataset is given by NPoints*(NIn+NOut) matrix
2321 * each row corresponds to one example
2322 * first NIn columns are inputs, next NOut columns are outputs
2323 
2324 For classification networks with NIn inputs and NClasses clases  following
2325 dataset format is used:
2326 * dataset is given by NPoints*(NIn+1) matrix
2327 * each row corresponds to one example
2328 * first NIn columns are inputs, last column stores class number (from 0 to
2329   NClasses-1).
2330 
2331   -- ALGLIB --
2332      Copyright 04.11.2007 by Bochkanov Sergey
2333 *************************************************************************/
2334 double mlperror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
2335 
2336 
2337 /*************************************************************************
2338 Error of the neural network on dataset given by sparse matrix.
2339 
2340   ! COMMERCIAL EDITION OF ALGLIB:
2341   !
2342   ! Commercial Edition of ALGLIB includes following important improvements
2343   ! of this function:
2344   ! * high-performance native backend with same C# interface (C# version)
2345   ! * multithreading support (C++ and C# versions)
2346   !
2347   ! We recommend you to read 'Working with commercial version' section  of
2348   ! ALGLIB Reference Manual in order to find out how to  use  performance-
2349   ! related features provided by commercial edition of ALGLIB.
2350 
2351 INPUT PARAMETERS:
2352     Network     -   neural network
2353     XY          -   training  set,  see  below  for  information  on   the
2354                     training set format. This function checks  correctness
2355                     of  the  dataset  (no  NANs/INFs,  class  numbers  are
2356                     correct) and throws exception when  incorrect  dataset
2357                     is passed.  Sparse  matrix  must  use  CRS  format for
2358                     storage.
2359     NPoints     -   points count, >=0
2360 
2361 RESULT:
2362     sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2)
2363 
2364 DATASET FORMAT:
2365 
2366 This  function  uses  two  different  dataset formats - one for regression
2367 networks, another one for classification networks.
2368 
2369 For regression networks with NIn inputs and NOut outputs following dataset
2370 format is used:
2371 * dataset is given by NPoints*(NIn+NOut) matrix
2372 * each row corresponds to one example
2373 * first NIn columns are inputs, next NOut columns are outputs
2374 
2375 For classification networks with NIn inputs and NClasses clases  following
2376 dataset format is used:
2377 * dataset is given by NPoints*(NIn+1) matrix
2378 * each row corresponds to one example
2379 * first NIn columns are inputs, last column stores class number (from 0 to
2380   NClasses-1).
2381 
2382   -- ALGLIB --
2383      Copyright 23.07.2012 by Bochkanov Sergey
2384 *************************************************************************/
2385 double mlperrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
2386 
2387 
2388 /*************************************************************************
2389 Natural error function for neural network, internal subroutine.
2390 
2391 NOTE: this function is single-threaded. Unlike other  error  function,  it
2392 receives no speed-up from being executed in SMP mode.
2393 
2394   -- ALGLIB --
2395      Copyright 04.11.2007 by Bochkanov Sergey
2396 *************************************************************************/
2397 double mlperrorn(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, const xparams _xparams = alglib::xdefault);
2398 
2399 
2400 /*************************************************************************
2401 Classification error of the neural network on dataset.
2402 
2403   ! COMMERCIAL EDITION OF ALGLIB:
2404   !
2405   ! Commercial Edition of ALGLIB includes following important improvements
2406   ! of this function:
2407   ! * high-performance native backend with same C# interface (C# version)
2408   ! * multithreading support (C++ and C# versions)
2409   !
2410   ! We recommend you to read 'Working with commercial version' section  of
2411   ! ALGLIB Reference Manual in order to find out how to  use  performance-
2412   ! related features provided by commercial edition of ALGLIB.
2413 
2414 INPUT PARAMETERS:
2415     Network     -   neural network;
2416     XY          -   training  set,  see  below  for  information  on   the
2417                     training set format;
2418     NPoints     -   points count.
2419 
2420 RESULT:
2421     classification error (number of misclassified cases)
2422 
2423 DATASET FORMAT:
2424 
2425 This  function  uses  two  different  dataset formats - one for regression
2426 networks, another one for classification networks.
2427 
2428 For regression networks with NIn inputs and NOut outputs following dataset
2429 format is used:
2430 * dataset is given by NPoints*(NIn+NOut) matrix
2431 * each row corresponds to one example
2432 * first NIn columns are inputs, next NOut columns are outputs
2433 
2434 For classification networks with NIn inputs and NClasses clases  following
2435 dataset format is used:
2436 * dataset is given by NPoints*(NIn+1) matrix
2437 * each row corresponds to one example
2438 * first NIn columns are inputs, last column stores class number (from 0 to
2439   NClasses-1).
2440 
2441   -- ALGLIB --
2442      Copyright 04.11.2007 by Bochkanov Sergey
2443 *************************************************************************/
2444 ae_int_t mlpclserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
2445 
2446 
2447 /*************************************************************************
2448 Relative classification error on the test set.
2449 
2450   ! COMMERCIAL EDITION OF ALGLIB:
2451   !
2452   ! Commercial Edition of ALGLIB includes following important improvements
2453   ! of this function:
2454   ! * high-performance native backend with same C# interface (C# version)
2455   ! * multithreading support (C++ and C# versions)
2456   !
2457   ! We recommend you to read 'Working with commercial version' section  of
2458   ! ALGLIB Reference Manual in order to find out how to  use  performance-
2459   ! related features provided by commercial edition of ALGLIB.
2460 
2461 INPUT PARAMETERS:
2462     Network     -   neural network;
2463     XY          -   training  set,  see  below  for  information  on   the
2464                     training set format;
2465     NPoints     -   points count.
2466 
2467 RESULT:
2468 Percent   of incorrectly   classified  cases.  Works  both  for classifier
2469 networks and general purpose networks used as classifiers.
2470 
2471 DATASET FORMAT:
2472 
2473 This  function  uses  two  different  dataset formats - one for regression
2474 networks, another one for classification networks.
2475 
2476 For regression networks with NIn inputs and NOut outputs following dataset
2477 format is used:
2478 * dataset is given by NPoints*(NIn+NOut) matrix
2479 * each row corresponds to one example
2480 * first NIn columns are inputs, next NOut columns are outputs
2481 
2482 For classification networks with NIn inputs and NClasses clases  following
2483 dataset format is used:
2484 * dataset is given by NPoints*(NIn+1) matrix
2485 * each row corresponds to one example
2486 * first NIn columns are inputs, last column stores class number (from 0 to
2487   NClasses-1).
2488 
2489   -- ALGLIB --
2490      Copyright 25.12.2008 by Bochkanov Sergey
2491 *************************************************************************/
2492 double mlprelclserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
2493 
2494 
2495 /*************************************************************************
2496 Relative classification error on the test set given by sparse matrix.
2497 
2498   ! COMMERCIAL EDITION OF ALGLIB:
2499   !
2500   ! Commercial Edition of ALGLIB includes following important improvements
2501   ! of this function:
2502   ! * high-performance native backend with same C# interface (C# version)
2503   ! * multithreading support (C++ and C# versions)
2504   !
2505   ! We recommend you to read 'Working with commercial version' section  of
2506   ! ALGLIB Reference Manual in order to find out how to  use  performance-
2507   ! related features provided by commercial edition of ALGLIB.
2508 
2509 INPUT PARAMETERS:
2510     Network     -   neural network;
2511     XY          -   training  set,  see  below  for  information  on   the
2512                     training set format. Sparse matrix must use CRS format
2513                     for storage.
2514     NPoints     -   points count, >=0.
2515 
2516 RESULT:
2517 Percent   of incorrectly   classified  cases.  Works  both  for classifier
2518 networks and general purpose networks used as classifiers.
2519 
2520 DATASET FORMAT:
2521 
2522 This  function  uses  two  different  dataset formats - one for regression
2523 networks, another one for classification networks.
2524 
2525 For regression networks with NIn inputs and NOut outputs following dataset
2526 format is used:
2527 * dataset is given by NPoints*(NIn+NOut) matrix
2528 * each row corresponds to one example
2529 * first NIn columns are inputs, next NOut columns are outputs
2530 
2531 For classification networks with NIn inputs and NClasses clases  following
2532 dataset format is used:
2533 * dataset is given by NPoints*(NIn+1) matrix
2534 * each row corresponds to one example
2535 * first NIn columns are inputs, last column stores class number (from 0 to
2536   NClasses-1).
2537 
2538   -- ALGLIB --
2539      Copyright 09.08.2012 by Bochkanov Sergey
2540 *************************************************************************/
2541 double mlprelclserrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
2542 
2543 
2544 /*************************************************************************
2545 Average cross-entropy  (in bits  per element) on the test set.
2546 
2547   ! COMMERCIAL EDITION OF ALGLIB:
2548   !
2549   ! Commercial Edition of ALGLIB includes following important improvements
2550   ! of this function:
2551   ! * high-performance native backend with same C# interface (C# version)
2552   ! * multithreading support (C++ and C# versions)
2553   !
2554   ! We recommend you to read 'Working with commercial version' section  of
2555   ! ALGLIB Reference Manual in order to find out how to  use  performance-
2556   ! related features provided by commercial edition of ALGLIB.
2557 
2558 INPUT PARAMETERS:
2559     Network     -   neural network;
2560     XY          -   training  set,  see  below  for  information  on   the
2561                     training set format;
2562     NPoints     -   points count.
2563 
2564 RESULT:
2565 CrossEntropy/(NPoints*LN(2)).
2566 Zero if network solves regression task.
2567 
2568 DATASET FORMAT:
2569 
2570 This  function  uses  two  different  dataset formats - one for regression
2571 networks, another one for classification networks.
2572 
2573 For regression networks with NIn inputs and NOut outputs following dataset
2574 format is used:
2575 * dataset is given by NPoints*(NIn+NOut) matrix
2576 * each row corresponds to one example
2577 * first NIn columns are inputs, next NOut columns are outputs
2578 
2579 For classification networks with NIn inputs and NClasses clases  following
2580 dataset format is used:
2581 * dataset is given by NPoints*(NIn+1) matrix
2582 * each row corresponds to one example
2583 * first NIn columns are inputs, last column stores class number (from 0 to
2584   NClasses-1).
2585 
2586   -- ALGLIB --
2587      Copyright 08.01.2009 by Bochkanov Sergey
2588 *************************************************************************/
2589 double mlpavgce(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
2590 
2591 
2592 /*************************************************************************
2593 Average  cross-entropy  (in bits  per element)  on the  test set  given by
2594 sparse matrix.
2595 
2596   ! COMMERCIAL EDITION OF ALGLIB:
2597   !
2598   ! Commercial Edition of ALGLIB includes following important improvements
2599   ! of this function:
2600   ! * high-performance native backend with same C# interface (C# version)
2601   ! * multithreading support (C++ and C# versions)
2602   !
2603   ! We recommend you to read 'Working with commercial version' section  of
2604   ! ALGLIB Reference Manual in order to find out how to  use  performance-
2605   ! related features provided by commercial edition of ALGLIB.
2606 
2607 INPUT PARAMETERS:
2608     Network     -   neural network;
2609     XY          -   training  set,  see  below  for  information  on   the
2610                     training set format. This function checks  correctness
2611                     of  the  dataset  (no  NANs/INFs,  class  numbers  are
2612                     correct) and throws exception when  incorrect  dataset
2613                     is passed.  Sparse  matrix  must  use  CRS  format for
2614                     storage.
2615     NPoints     -   points count, >=0.
2616 
2617 RESULT:
2618 CrossEntropy/(NPoints*LN(2)).
2619 Zero if network solves regression task.
2620 
2621 DATASET FORMAT:
2622 
2623 This  function  uses  two  different  dataset formats - one for regression
2624 networks, another one for classification networks.
2625 
2626 For regression networks with NIn inputs and NOut outputs following dataset
2627 format is used:
2628 * dataset is given by NPoints*(NIn+NOut) matrix
2629 * each row corresponds to one example
2630 * first NIn columns are inputs, next NOut columns are outputs
2631 
2632 For classification networks with NIn inputs and NClasses clases  following
2633 dataset format is used:
2634 * dataset is given by NPoints*(NIn+1) matrix
2635 * each row corresponds to one example
2636 * first NIn columns are inputs, last column stores class number (from 0 to
2637   NClasses-1).
2638 
2639   -- ALGLIB --
2640      Copyright 9.08.2012 by Bochkanov Sergey
2641 *************************************************************************/
2642 double mlpavgcesparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
2643 
2644 
2645 /*************************************************************************
2646 RMS error on the test set given.
2647 
2648   ! COMMERCIAL EDITION OF ALGLIB:
2649   !
2650   ! Commercial Edition of ALGLIB includes following important improvements
2651   ! of this function:
2652   ! * high-performance native backend with same C# interface (C# version)
2653   ! * multithreading support (C++ and C# versions)
2654   !
2655   ! We recommend you to read 'Working with commercial version' section  of
2656   ! ALGLIB Reference Manual in order to find out how to  use  performance-
2657   ! related features provided by commercial edition of ALGLIB.
2658 
2659 INPUT PARAMETERS:
2660     Network     -   neural network;
2661     XY          -   training  set,  see  below  for  information  on   the
2662                     training set format;
2663     NPoints     -   points count.
2664 
2665 RESULT:
2666 Root mean  square error. Its meaning for regression task is obvious. As for
2667 classification  task,  RMS  error  means  error  when estimating  posterior
2668 probabilities.
2669 
2670 DATASET FORMAT:
2671 
2672 This  function  uses  two  different  dataset formats - one for regression
2673 networks, another one for classification networks.
2674 
2675 For regression networks with NIn inputs and NOut outputs following dataset
2676 format is used:
2677 * dataset is given by NPoints*(NIn+NOut) matrix
2678 * each row corresponds to one example
2679 * first NIn columns are inputs, next NOut columns are outputs
2680 
2681 For classification networks with NIn inputs and NClasses clases  following
2682 dataset format is used:
2683 * dataset is given by NPoints*(NIn+1) matrix
2684 * each row corresponds to one example
2685 * first NIn columns are inputs, last column stores class number (from 0 to
2686   NClasses-1).
2687 
2688   -- ALGLIB --
2689      Copyright 04.11.2007 by Bochkanov Sergey
2690 *************************************************************************/
2691 double mlprmserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
2692 
2693 
2694 /*************************************************************************
2695 RMS error on the test set given by sparse matrix.
2696 
2697   ! COMMERCIAL EDITION OF ALGLIB:
2698   !
2699   ! Commercial Edition of ALGLIB includes following important improvements
2700   ! of this function:
2701   ! * high-performance native backend with same C# interface (C# version)
2702   ! * multithreading support (C++ and C# versions)
2703   !
2704   ! We recommend you to read 'Working with commercial version' section  of
2705   ! ALGLIB Reference Manual in order to find out how to  use  performance-
2706   ! related features provided by commercial edition of ALGLIB.
2707 
2708 INPUT PARAMETERS:
2709     Network     -   neural network;
2710     XY          -   training  set,  see  below  for  information  on   the
2711                     training set format. This function checks  correctness
2712                     of  the  dataset  (no  NANs/INFs,  class  numbers  are
2713                     correct) and throws exception when  incorrect  dataset
2714                     is passed.  Sparse  matrix  must  use  CRS  format for
2715                     storage.
2716     NPoints     -   points count, >=0.
2717 
2718 RESULT:
2719 Root mean  square error. Its meaning for regression task is obvious. As for
2720 classification  task,  RMS  error  means  error  when estimating  posterior
2721 probabilities.
2722 
2723 DATASET FORMAT:
2724 
2725 This  function  uses  two  different  dataset formats - one for regression
2726 networks, another one for classification networks.
2727 
2728 For regression networks with NIn inputs and NOut outputs following dataset
2729 format is used:
2730 * dataset is given by NPoints*(NIn+NOut) matrix
2731 * each row corresponds to one example
2732 * first NIn columns are inputs, next NOut columns are outputs
2733 
2734 For classification networks with NIn inputs and NClasses clases  following
2735 dataset format is used:
2736 * dataset is given by NPoints*(NIn+1) matrix
2737 * each row corresponds to one example
2738 * first NIn columns are inputs, last column stores class number (from 0 to
2739   NClasses-1).
2740 
2741   -- ALGLIB --
2742      Copyright 09.08.2012 by Bochkanov Sergey
2743 *************************************************************************/
2744 double mlprmserrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
2745 
2746 
2747 /*************************************************************************
2748 Average absolute error on the test set.
2749 
2750   ! COMMERCIAL EDITION OF ALGLIB:
2751   !
2752   ! Commercial Edition of ALGLIB includes following important improvements
2753   ! of this function:
2754   ! * high-performance native backend with same C# interface (C# version)
2755   ! * multithreading support (C++ and C# versions)
2756   !
2757   ! We recommend you to read 'Working with commercial version' section  of
2758   ! ALGLIB Reference Manual in order to find out how to  use  performance-
2759   ! related features provided by commercial edition of ALGLIB.
2760 
2761 INPUT PARAMETERS:
2762     Network     -   neural network;
2763     XY          -   training  set,  see  below  for  information  on   the
2764                     training set format;
2765     NPoints     -   points count.
2766 
2767 RESULT:
2768 Its meaning for regression task is obvious. As for classification task, it
2769 means average error when estimating posterior probabilities.
2770 
2771 DATASET FORMAT:
2772 
2773 This  function  uses  two  different  dataset formats - one for regression
2774 networks, another one for classification networks.
2775 
2776 For regression networks with NIn inputs and NOut outputs following dataset
2777 format is used:
2778 * dataset is given by NPoints*(NIn+NOut) matrix
2779 * each row corresponds to one example
2780 * first NIn columns are inputs, next NOut columns are outputs
2781 
2782 For classification networks with NIn inputs and NClasses clases  following
2783 dataset format is used:
2784 * dataset is given by NPoints*(NIn+1) matrix
2785 * each row corresponds to one example
2786 * first NIn columns are inputs, last column stores class number (from 0 to
2787   NClasses-1).
2788 
2789   -- ALGLIB --
2790      Copyright 11.03.2008 by Bochkanov Sergey
2791 *************************************************************************/
2792 double mlpavgerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
2793 
2794 
2795 /*************************************************************************
2796 Average absolute error on the test set given by sparse matrix.
2797 
2798   ! COMMERCIAL EDITION OF ALGLIB:
2799   !
2800   ! Commercial Edition of ALGLIB includes following important improvements
2801   ! of this function:
2802   ! * high-performance native backend with same C# interface (C# version)
2803   ! * multithreading support (C++ and C# versions)
2804   !
2805   ! We recommend you to read 'Working with commercial version' section  of
2806   ! ALGLIB Reference Manual in order to find out how to  use  performance-
2807   ! related features provided by commercial edition of ALGLIB.
2808 
2809 INPUT PARAMETERS:
2810     Network     -   neural network;
2811     XY          -   training  set,  see  below  for  information  on   the
2812                     training set format. This function checks  correctness
2813                     of  the  dataset  (no  NANs/INFs,  class  numbers  are
2814                     correct) and throws exception when  incorrect  dataset
2815                     is passed.  Sparse  matrix  must  use  CRS  format for
2816                     storage.
2817     NPoints     -   points count, >=0.
2818 
2819 RESULT:
2820 Its meaning for regression task is obvious. As for classification task, it
2821 means average error when estimating posterior probabilities.
2822 
2823 DATASET FORMAT:
2824 
2825 This  function  uses  two  different  dataset formats - one for regression
2826 networks, another one for classification networks.
2827 
2828 For regression networks with NIn inputs and NOut outputs following dataset
2829 format is used:
2830 * dataset is given by NPoints*(NIn+NOut) matrix
2831 * each row corresponds to one example
2832 * first NIn columns are inputs, next NOut columns are outputs
2833 
2834 For classification networks with NIn inputs and NClasses clases  following
2835 dataset format is used:
2836 * dataset is given by NPoints*(NIn+1) matrix
2837 * each row corresponds to one example
2838 * first NIn columns are inputs, last column stores class number (from 0 to
2839   NClasses-1).
2840 
2841   -- ALGLIB --
2842      Copyright 09.08.2012 by Bochkanov Sergey
2843 *************************************************************************/
2844 double mlpavgerrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
2845 
2846 
2847 /*************************************************************************
2848 Average relative error on the test set.
2849 
2850   ! COMMERCIAL EDITION OF ALGLIB:
2851   !
2852   ! Commercial Edition of ALGLIB includes following important improvements
2853   ! of this function:
2854   ! * high-performance native backend with same C# interface (C# version)
2855   ! * multithreading support (C++ and C# versions)
2856   !
2857   ! We recommend you to read 'Working with commercial version' section  of
2858   ! ALGLIB Reference Manual in order to find out how to  use  performance-
2859   ! related features provided by commercial edition of ALGLIB.
2860 
2861 INPUT PARAMETERS:
2862     Network     -   neural network;
2863     XY          -   training  set,  see  below  for  information  on   the
2864                     training set format;
2865     NPoints     -   points count.
2866 
2867 RESULT:
2868 Its meaning for regression task is obvious. As for classification task, it
2869 means  average  relative  error  when  estimating posterior probability of
2870 belonging to the correct class.
2871 
2872 DATASET FORMAT:
2873 
2874 This  function  uses  two  different  dataset formats - one for regression
2875 networks, another one for classification networks.
2876 
2877 For regression networks with NIn inputs and NOut outputs following dataset
2878 format is used:
2879 * dataset is given by NPoints*(NIn+NOut) matrix
2880 * each row corresponds to one example
2881 * first NIn columns are inputs, next NOut columns are outputs
2882 
2883 For classification networks with NIn inputs and NClasses clases  following
2884 dataset format is used:
2885 * dataset is given by NPoints*(NIn+1) matrix
2886 * each row corresponds to one example
2887 * first NIn columns are inputs, last column stores class number (from 0 to
2888   NClasses-1).
2889 
2890   -- ALGLIB --
2891      Copyright 11.03.2008 by Bochkanov Sergey
2892 *************************************************************************/
2893 double mlpavgrelerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
2894 
2895 
2896 /*************************************************************************
2897 Average relative error on the test set given by sparse matrix.
2898 
2899   ! COMMERCIAL EDITION OF ALGLIB:
2900   !
2901   ! Commercial Edition of ALGLIB includes following important improvements
2902   ! of this function:
2903   ! * high-performance native backend with same C# interface (C# version)
2904   ! * multithreading support (C++ and C# versions)
2905   !
2906   ! We recommend you to read 'Working with commercial version' section  of
2907   ! ALGLIB Reference Manual in order to find out how to  use  performance-
2908   ! related features provided by commercial edition of ALGLIB.
2909 
2910 INPUT PARAMETERS:
2911     Network     -   neural network;
2912     XY          -   training  set,  see  below  for  information  on   the
2913                     training set format. This function checks  correctness
2914                     of  the  dataset  (no  NANs/INFs,  class  numbers  are
2915                     correct) and throws exception when  incorrect  dataset
2916                     is passed.  Sparse  matrix  must  use  CRS  format for
2917                     storage.
2918     NPoints     -   points count, >=0.
2919 
2920 RESULT:
2921 Its meaning for regression task is obvious. As for classification task, it
2922 means  average  relative  error  when  estimating posterior probability of
2923 belonging to the correct class.
2924 
2925 DATASET FORMAT:
2926 
2927 This  function  uses  two  different  dataset formats - one for regression
2928 networks, another one for classification networks.
2929 
2930 For regression networks with NIn inputs and NOut outputs following dataset
2931 format is used:
2932 * dataset is given by NPoints*(NIn+NOut) matrix
2933 * each row corresponds to one example
2934 * first NIn columns are inputs, next NOut columns are outputs
2935 
2936 For classification networks with NIn inputs and NClasses clases  following
2937 dataset format is used:
2938 * dataset is given by NPoints*(NIn+1) matrix
2939 * each row corresponds to one example
2940 * first NIn columns are inputs, last column stores class number (from 0 to
2941   NClasses-1).
2942 
2943   -- ALGLIB --
2944      Copyright 09.08.2012 by Bochkanov Sergey
2945 *************************************************************************/
2946 double mlpavgrelerrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
2947 
2948 
2949 /*************************************************************************
2950 Gradient calculation
2951 
2952 INPUT PARAMETERS:
2953     Network -   network initialized with one of the network creation funcs
2954     X       -   input vector, length of array must be at least NIn
2955     DesiredY-   desired outputs, length of array must be at least NOut
2956     Grad    -   possibly preallocated array. If size of array is smaller
2957                 than WCount, it will be reallocated. It is recommended to
2958                 reuse previously allocated array to reduce allocation
2959                 overhead.
2960 
2961 OUTPUT PARAMETERS:
2962     E       -   error function, SUM(sqr(y[i]-desiredy[i])/2,i)
2963     Grad    -   gradient of E with respect to weights of network, array[WCount]
2964 
2965   -- ALGLIB --
2966      Copyright 04.11.2007 by Bochkanov Sergey
2967 *************************************************************************/
2968 void mlpgrad(const multilayerperceptron &network, const real_1d_array &x, const real_1d_array &desiredy, double &e, real_1d_array &grad, const xparams _xparams = alglib::xdefault);
2969 
2970 
2971 /*************************************************************************
2972 Gradient calculation (natural error function is used)
2973 
2974 INPUT PARAMETERS:
2975     Network -   network initialized with one of the network creation funcs
2976     X       -   input vector, length of array must be at least NIn
2977     DesiredY-   desired outputs, length of array must be at least NOut
2978     Grad    -   possibly preallocated array. If size of array is smaller
2979                 than WCount, it will be reallocated. It is recommended to
2980                 reuse previously allocated array to reduce allocation
2981                 overhead.
2982 
2983 OUTPUT PARAMETERS:
2984     E       -   error function, sum-of-squares for regression networks,
2985                 cross-entropy for classification networks.
2986     Grad    -   gradient of E with respect to weights of network, array[WCount]
2987 
2988   -- ALGLIB --
2989      Copyright 04.11.2007 by Bochkanov Sergey
2990 *************************************************************************/
2991 void mlpgradn(const multilayerperceptron &network, const real_1d_array &x, const real_1d_array &desiredy, double &e, real_1d_array &grad, const xparams _xparams = alglib::xdefault);
2992 
2993 
2994 /*************************************************************************
2995 Batch gradient calculation for a set of inputs/outputs
2996 
2997   ! COMMERCIAL EDITION OF ALGLIB:
2998   !
2999   ! Commercial Edition of ALGLIB includes following important improvements
3000   ! of this function:
3001   ! * high-performance native backend with same C# interface (C# version)
3002   ! * multithreading support (C++ and C# versions)
3003   !
3004   ! We recommend you to read 'Working with commercial version' section  of
3005   ! ALGLIB Reference Manual in order to find out how to  use  performance-
3006   ! related features provided by commercial edition of ALGLIB.
3007 
3008 INPUT PARAMETERS:
3009     Network -   network initialized with one of the network creation funcs
3010     XY      -   original dataset in dense format; one sample = one row:
3011                 * first NIn columns contain inputs,
3012                 * for regression problem, next NOut columns store
3013                   desired outputs.
3014                 * for classification problem, next column (just one!)
3015                   stores class number.
3016     SSize   -   number of elements in XY
3017     Grad    -   possibly preallocated array. If size of array is smaller
3018                 than WCount, it will be reallocated. It is recommended to
3019                 reuse previously allocated array to reduce allocation
3020                 overhead.
3021 
3022 OUTPUT PARAMETERS:
3023     E       -   error function, SUM(sqr(y[i]-desiredy[i])/2,i)
3024     Grad    -   gradient of E with respect to weights of network, array[WCount]
3025 
3026   -- ALGLIB --
3027      Copyright 04.11.2007 by Bochkanov Sergey
3028 *************************************************************************/
3029 void mlpgradbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad, const xparams _xparams = alglib::xdefault);
3030 
3031 
3032 /*************************************************************************
3033 Batch gradient calculation for a set  of inputs/outputs  given  by  sparse
3034 matrices
3035 
3036   ! COMMERCIAL EDITION OF ALGLIB:
3037   !
3038   ! Commercial Edition of ALGLIB includes following important improvements
3039   ! of this function:
3040   ! * high-performance native backend with same C# interface (C# version)
3041   ! * multithreading support (C++ and C# versions)
3042   !
3043   ! We recommend you to read 'Working with commercial version' section  of
3044   ! ALGLIB Reference Manual in order to find out how to  use  performance-
3045   ! related features provided by commercial edition of ALGLIB.
3046 
3047 INPUT PARAMETERS:
3048     Network -   network initialized with one of the network creation funcs
3049     XY      -   original dataset in sparse format; one sample = one row:
3050                 * MATRIX MUST BE STORED IN CRS FORMAT
3051                 * first NIn columns contain inputs.
3052                 * for regression problem, next NOut columns store
3053                   desired outputs.
3054                 * for classification problem, next column (just one!)
3055                   stores class number.
3056     SSize   -   number of elements in XY
3057     Grad    -   possibly preallocated array. If size of array is smaller
3058                 than WCount, it will be reallocated. It is recommended to
3059                 reuse previously allocated array to reduce allocation
3060                 overhead.
3061 
3062 OUTPUT PARAMETERS:
3063     E       -   error function, SUM(sqr(y[i]-desiredy[i])/2,i)
3064     Grad    -   gradient of E with respect to weights of network, array[WCount]
3065 
3066   -- ALGLIB --
3067      Copyright 26.07.2012 by Bochkanov Sergey
3068 *************************************************************************/
3069 void mlpgradbatchsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t ssize, double &e, real_1d_array &grad, const xparams _xparams = alglib::xdefault);
3070 
3071 
3072 /*************************************************************************
3073 Batch gradient calculation for a subset of dataset
3074 
3075   ! COMMERCIAL EDITION OF ALGLIB:
3076   !
3077   ! Commercial Edition of ALGLIB includes following important improvements
3078   ! of this function:
3079   ! * high-performance native backend with same C# interface (C# version)
3080   ! * multithreading support (C++ and C# versions)
3081   !
3082   ! We recommend you to read 'Working with commercial version' section  of
3083   ! ALGLIB Reference Manual in order to find out how to  use  performance-
3084   ! related features provided by commercial edition of ALGLIB.
3085 
3086 INPUT PARAMETERS:
3087     Network -   network initialized with one of the network creation funcs
3088     XY      -   original dataset in dense format; one sample = one row:
3089                 * first NIn columns contain inputs,
3090                 * for regression problem, next NOut columns store
3091                   desired outputs.
3092                 * for classification problem, next column (just one!)
3093                   stores class number.
3094     SetSize -   real size of XY, SetSize>=0;
3095     Idx     -   subset of SubsetSize elements, array[SubsetSize]:
3096                 * Idx[I] stores row index in the original dataset which is
3097                   given by XY. Gradient is calculated with respect to rows
3098                   whose indexes are stored in Idx[].
3099                 * Idx[]  must store correct indexes; this function  throws
3100                   an  exception  in  case  incorrect index (less than 0 or
3101                   larger than rows(XY)) is given
3102                 * Idx[]  may  store  indexes  in  any  order and even with
3103                   repetitions.
3104     SubsetSize- number of elements in Idx[] array:
3105                 * positive value means that subset given by Idx[] is processed
3106                 * zero value results in zero gradient
3107                 * negative value means that full dataset is processed
3108     Grad      - possibly  preallocated array. If size of array is  smaller
3109                 than WCount, it will be reallocated. It is  recommended to
3110                 reuse  previously  allocated  array  to  reduce allocation
3111                 overhead.
3112 
3113 OUTPUT PARAMETERS:
3114     E         - error function, SUM(sqr(y[i]-desiredy[i])/2,i)
3115     Grad      - gradient  of  E  with  respect   to  weights  of  network,
3116                 array[WCount]
3117 
3118   -- ALGLIB --
3119      Copyright 26.07.2012 by Bochkanov Sergey
3120 *************************************************************************/
3121 void mlpgradbatchsubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_int_t subsetsize, double &e, real_1d_array &grad, const xparams _xparams = alglib::xdefault);
3122 
3123 
3124 /*************************************************************************
3125 Batch gradient calculation for a set of inputs/outputs  for  a  subset  of
3126 dataset given by set of indexes.
3127 
3128   ! COMMERCIAL EDITION OF ALGLIB:
3129   !
3130   ! Commercial Edition of ALGLIB includes following important improvements
3131   ! of this function:
3132   ! * high-performance native backend with same C# interface (C# version)
3133   ! * multithreading support (C++ and C# versions)
3134   !
3135   ! We recommend you to read 'Working with commercial version' section  of
3136   ! ALGLIB Reference Manual in order to find out how to  use  performance-
3137   ! related features provided by commercial edition of ALGLIB.
3138 
3139 INPUT PARAMETERS:
3140     Network -   network initialized with one of the network creation funcs
3141     XY      -   original dataset in sparse format; one sample = one row:
3142                 * MATRIX MUST BE STORED IN CRS FORMAT
3143                 * first NIn columns contain inputs,
3144                 * for regression problem, next NOut columns store
3145                   desired outputs.
3146                 * for classification problem, next column (just one!)
3147                   stores class number.
3148     SetSize -   real size of XY, SetSize>=0;
3149     Idx     -   subset of SubsetSize elements, array[SubsetSize]:
3150                 * Idx[I] stores row index in the original dataset which is
3151                   given by XY. Gradient is calculated with respect to rows
3152                   whose indexes are stored in Idx[].
3153                 * Idx[]  must store correct indexes; this function  throws
3154                   an  exception  in  case  incorrect index (less than 0 or
3155                   larger than rows(XY)) is given
3156                 * Idx[]  may  store  indexes  in  any  order and even with
3157                   repetitions.
3158     SubsetSize- number of elements in Idx[] array:
3159                 * positive value means that subset given by Idx[] is processed
3160                 * zero value results in zero gradient
3161                 * negative value means that full dataset is processed
3162     Grad      - possibly  preallocated array. If size of array is  smaller
3163                 than WCount, it will be reallocated. It is  recommended to
3164                 reuse  previously  allocated  array  to  reduce allocation
3165                 overhead.
3166 
3167 OUTPUT PARAMETERS:
3168     E       -   error function, SUM(sqr(y[i]-desiredy[i])/2,i)
3169     Grad    -   gradient  of  E  with  respect   to  weights  of  network,
3170                 array[WCount]
3171 
3172 NOTE: when  SubsetSize<0 is used full dataset by call MLPGradBatchSparse
3173       function.
3174 
3175   -- ALGLIB --
3176      Copyright 26.07.2012 by Bochkanov Sergey
3177 *************************************************************************/
3178 void mlpgradbatchsparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_int_t subsetsize, double &e, real_1d_array &grad, const xparams _xparams = alglib::xdefault);
3179 
3180 
3181 /*************************************************************************
3182 Batch gradient calculation for a set of inputs/outputs
3183 (natural error function is used)
3184 
3185 INPUT PARAMETERS:
3186     Network -   network initialized with one of the network creation funcs
3187     XY      -   set of inputs/outputs; one sample = one row;
3188                 first NIn columns contain inputs,
3189                 next NOut columns - desired outputs.
3190     SSize   -   number of elements in XY
3191     Grad    -   possibly preallocated array. If size of array is smaller
3192                 than WCount, it will be reallocated. It is recommended to
3193                 reuse previously allocated array to reduce allocation
3194                 overhead.
3195 
3196 OUTPUT PARAMETERS:
3197     E       -   error function, sum-of-squares for regression networks,
3198                 cross-entropy for classification networks.
3199     Grad    -   gradient of E with respect to weights of network, array[WCount]
3200 
3201   -- ALGLIB --
3202      Copyright 04.11.2007 by Bochkanov Sergey
3203 *************************************************************************/
3204 void mlpgradnbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad, const xparams _xparams = alglib::xdefault);
3205 
3206 
3207 /*************************************************************************
3208 Batch Hessian calculation (natural error function) using R-algorithm.
3209 Internal subroutine.
3210 
3211   -- ALGLIB --
3212      Copyright 26.01.2008 by Bochkanov Sergey.
3213 
3214      Hessian calculation based on R-algorithm described in
3215      "Fast Exact Multiplication by the Hessian",
3216      B. A. Pearlmutter,
3217      Neural Computation, 1994.
3218 *************************************************************************/
3219 void mlphessiannbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad, real_2d_array &h, const xparams _xparams = alglib::xdefault);
3220 
3221 
3222 /*************************************************************************
3223 Batch Hessian calculation using R-algorithm.
3224 Internal subroutine.
3225 
3226   -- ALGLIB --
3227      Copyright 26.01.2008 by Bochkanov Sergey.
3228 
3229      Hessian calculation based on R-algorithm described in
3230      "Fast Exact Multiplication by the Hessian",
3231      B. A. Pearlmutter,
3232      Neural Computation, 1994.
3233 *************************************************************************/
3234 void mlphessianbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad, real_2d_array &h, const xparams _xparams = alglib::xdefault);
3235 
3236 
3237 /*************************************************************************
3238 Calculation of all types of errors on subset of dataset.
3239 
3240   ! COMMERCIAL EDITION OF ALGLIB:
3241   !
3242   ! Commercial Edition of ALGLIB includes following important improvements
3243   ! of this function:
3244   ! * high-performance native backend with same C# interface (C# version)
3245   ! * multithreading support (C++ and C# versions)
3246   !
3247   ! We recommend you to read 'Working with commercial version' section  of
3248   ! ALGLIB Reference Manual in order to find out how to  use  performance-
3249   ! related features provided by commercial edition of ALGLIB.
3250 
3251 INPUT PARAMETERS:
3252     Network -   network initialized with one of the network creation funcs
3253     XY      -   original dataset; one sample = one row;
3254                 first NIn columns contain inputs,
3255                 next NOut columns - desired outputs.
3256     SetSize -   real size of XY, SetSize>=0;
3257     Subset  -   subset of SubsetSize elements, array[SubsetSize];
3258     SubsetSize- number of elements in Subset[] array:
3259                 * if SubsetSize>0, rows of XY with indices Subset[0]...
3260                   ...Subset[SubsetSize-1] are processed
3261                 * if SubsetSize=0, zeros are returned
3262                 * if SubsetSize<0, entire dataset is  processed;  Subset[]
3263                   array is ignored in this case.
3264 
3265 OUTPUT PARAMETERS:
3266     Rep     -   it contains all type of errors.
3267 
3268   -- ALGLIB --
3269      Copyright 04.09.2012 by Bochkanov Sergey
3270 *************************************************************************/
3271 void mlpallerrorssubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, modelerrors &rep, const xparams _xparams = alglib::xdefault);
3272 
3273 
3274 /*************************************************************************
3275 Calculation of all types of errors on subset of dataset.
3276 
3277   ! COMMERCIAL EDITION OF ALGLIB:
3278   !
3279   ! Commercial Edition of ALGLIB includes following important improvements
3280   ! of this function:
3281   ! * high-performance native backend with same C# interface (C# version)
3282   ! * multithreading support (C++ and C# versions)
3283   !
3284   ! We recommend you to read 'Working with commercial version' section  of
3285   ! ALGLIB Reference Manual in order to find out how to  use  performance-
3286   ! related features provided by commercial edition of ALGLIB.
3287 
3288 INPUT PARAMETERS:
3289     Network -   network initialized with one of the network creation funcs
3290     XY      -   original dataset given by sparse matrix;
3291                 one sample = one row;
3292                 first NIn columns contain inputs,
3293                 next NOut columns - desired outputs.
3294     SetSize -   real size of XY, SetSize>=0;
3295     Subset  -   subset of SubsetSize elements, array[SubsetSize];
3296     SubsetSize- number of elements in Subset[] array:
3297                 * if SubsetSize>0, rows of XY with indices Subset[0]...
3298                   ...Subset[SubsetSize-1] are processed
3299                 * if SubsetSize=0, zeros are returned
3300                 * if SubsetSize<0, entire dataset is  processed;  Subset[]
3301                   array is ignored in this case.
3302 
3303 OUTPUT PARAMETERS:
3304     Rep     -   it contains all type of errors.
3305 
3306 
3307   -- ALGLIB --
3308      Copyright 04.09.2012 by Bochkanov Sergey
3309 *************************************************************************/
3310 void mlpallerrorssparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, modelerrors &rep, const xparams _xparams = alglib::xdefault);
3311 
3312 
3313 /*************************************************************************
3314 Error of the neural network on subset of dataset.
3315 
3316   ! COMMERCIAL EDITION OF ALGLIB:
3317   !
3318   ! Commercial Edition of ALGLIB includes following important improvements
3319   ! of this function:
3320   ! * high-performance native backend with same C# interface (C# version)
3321   ! * multithreading support (C++ and C# versions)
3322   !
3323   ! We recommend you to read 'Working with commercial version' section  of
3324   ! ALGLIB Reference Manual in order to find out how to  use  performance-
3325   ! related features provided by commercial edition of ALGLIB.
3326 
3327 INPUT PARAMETERS:
3328     Network   -     neural network;
3329     XY        -     training  set,  see  below  for  information  on   the
3330                     training set format;
3331     SetSize   -     real size of XY, SetSize>=0;
3332     Subset    -     subset of SubsetSize elements, array[SubsetSize];
3333     SubsetSize-     number of elements in Subset[] array:
3334                     * if SubsetSize>0, rows of XY with indices Subset[0]...
3335                       ...Subset[SubsetSize-1] are processed
3336                     * if SubsetSize=0, zeros are returned
3337                     * if SubsetSize<0, entire dataset is  processed;  Subset[]
3338                       array is ignored in this case.
3339 
3340 RESULT:
3341     sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2)
3342 
3343 DATASET FORMAT:
3344 
3345 This  function  uses  two  different  dataset formats - one for regression
3346 networks, another one for classification networks.
3347 
3348 For regression networks with NIn inputs and NOut outputs following dataset
3349 format is used:
3350 * dataset is given by NPoints*(NIn+NOut) matrix
3351 * each row corresponds to one example
3352 * first NIn columns are inputs, next NOut columns are outputs
3353 
3354 For classification networks with NIn inputs and NClasses clases  following
3355 dataset format is used:
3356 * dataset is given by NPoints*(NIn+1) matrix
3357 * each row corresponds to one example
3358 * first NIn columns are inputs, last column stores class number (from 0 to
3359   NClasses-1).
3360 
3361   -- ALGLIB --
3362      Copyright 04.09.2012 by Bochkanov Sergey
3363 *************************************************************************/
3364 double mlperrorsubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, const xparams _xparams = alglib::xdefault);
3365 
3366 
3367 /*************************************************************************
3368 Error of the neural network on subset of sparse dataset.
3369 
3370   ! COMMERCIAL EDITION OF ALGLIB:
3371   !
3372   ! Commercial Edition of ALGLIB includes following important improvements
3373   ! of this function:
3374   ! * high-performance native backend with same C# interface (C# version)
3375   ! * multithreading support (C++ and C# versions)
3376   !
3377   ! We recommend you to read 'Working with commercial version' section  of
3378   ! ALGLIB Reference Manual in order to find out how to  use  performance-
3379   ! related features provided by commercial edition of ALGLIB.
3380 
3381 INPUT PARAMETERS:
3382     Network   -     neural network;
3383     XY        -     training  set,  see  below  for  information  on   the
3384                     training set format. This function checks  correctness
3385                     of  the  dataset  (no  NANs/INFs,  class  numbers  are
3386                     correct) and throws exception when  incorrect  dataset
3387                     is passed.  Sparse  matrix  must  use  CRS  format for
3388                     storage.
3389     SetSize   -     real size of XY, SetSize>=0;
3390                     it is used when SubsetSize<0;
3391     Subset    -     subset of SubsetSize elements, array[SubsetSize];
3392     SubsetSize-     number of elements in Subset[] array:
3393                     * if SubsetSize>0, rows of XY with indices Subset[0]...
3394                       ...Subset[SubsetSize-1] are processed
3395                     * if SubsetSize=0, zeros are returned
3396                     * if SubsetSize<0, entire dataset is  processed;  Subset[]
3397                       array is ignored in this case.
3398 
3399 RESULT:
3400     sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2)
3401 
3402 DATASET FORMAT:
3403 
3404 This  function  uses  two  different  dataset formats - one for regression
3405 networks, another one for classification networks.
3406 
3407 For regression networks with NIn inputs and NOut outputs following dataset
3408 format is used:
3409 * dataset is given by NPoints*(NIn+NOut) matrix
3410 * each row corresponds to one example
3411 * first NIn columns are inputs, next NOut columns are outputs
3412 
3413 For classification networks with NIn inputs and NClasses clases  following
3414 dataset format is used:
3415 * dataset is given by NPoints*(NIn+1) matrix
3416 * each row corresponds to one example
3417 * first NIn columns are inputs, last column stores class number (from 0 to
3418   NClasses-1).
3419 
3420   -- ALGLIB --
3421      Copyright 04.09.2012 by Bochkanov Sergey
3422 *************************************************************************/
3423 double mlperrorsparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, const xparams _xparams = alglib::xdefault);
3424 #endif
3425 
3426 #if defined(AE_COMPILE_LDA) || !defined(AE_PARTIAL_BUILD)
3427 /*************************************************************************
3428 Multiclass Fisher LDA
3429 
3430 Subroutine finds coefficients of linear combination which optimally separates
3431 training set on classes.
3432 
3433 COMMERCIAL EDITION OF ALGLIB:
3434 
3435   ! Commercial version of ALGLIB includes two important  improvements   of
3436   ! this function, which can be used from C++ and C#:
3437   ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
3438   ! * multithreading support
3439   !
3440   ! Intel MKL gives approximately constant  (with  respect  to  number  of
3441   ! worker threads) acceleration factor which depends on CPU  being  used,
3442   ! problem  size  and  "baseline"  ALGLIB  edition  which  is  used   for
3443   ! comparison. Best results are achieved  for  high-dimensional  problems
3444   ! (NVars is at least 256).
3445   !
3446   ! Multithreading is used to  accelerate  initial  phase  of  LDA,  which
3447   ! includes calculation of products of large matrices.  Again,  for  best
3448   ! efficiency problem must be high-dimensional.
3449   !
3450   ! Generally, commercial ALGLIB is several times faster than  open-source
3451   ! generic C edition, and many times faster than open-source C# edition.
3452   !
3453   ! We recommend you to read 'Working with commercial version' section  of
3454   ! ALGLIB Reference Manual in order to find out how to  use  performance-
3455   ! related features provided by commercial edition of ALGLIB.
3456 
3457 INPUT PARAMETERS:
3458     XY          -   training set, array[0..NPoints-1,0..NVars].
3459                     First NVars columns store values of independent
3460                     variables, next column stores number of class (from 0
3461                     to NClasses-1) which dataset element belongs to. Fractional
3462                     values are rounded to nearest integer.
3463     NPoints     -   training set size, NPoints>=0
3464     NVars       -   number of independent variables, NVars>=1
3465     NClasses    -   number of classes, NClasses>=2
3466 
3467 
3468 OUTPUT PARAMETERS:
3469     Info        -   return code:
3470                     * -4, if internal EVD subroutine hasn't converged
3471                     * -2, if there is a point with class number
3472                           outside of [0..NClasses-1].
3473                     * -1, if incorrect parameters was passed (NPoints<0,
3474                           NVars<1, NClasses<2)
3475                     *  1, if task has been solved
3476                     *  2, if there was a multicollinearity in training set,
3477                           but task has been solved.
3478     W           -   linear combination coefficients, array[0..NVars-1]
3479 
3480   -- ALGLIB --
3481      Copyright 31.05.2008 by Bochkanov Sergey
3482 *************************************************************************/
3483 void fisherlda(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, ae_int_t &info, real_1d_array &w, const xparams _xparams = alglib::xdefault);
3484 
3485 
3486 /*************************************************************************
3487 N-dimensional multiclass Fisher LDA
3488 
3489 Subroutine finds coefficients of linear combinations which optimally separates
3490 training set on classes. It returns N-dimensional basis whose vector are sorted
3491 by quality of training set separation (in descending order).
3492 
3493   ! COMMERCIAL EDITION OF ALGLIB:
3494   !
3495   ! Commercial Edition of ALGLIB includes following important improvements
3496   ! of this function:
3497   ! * high-performance native backend with same C# interface (C# version)
3498   ! * multithreading support (C++ and C# versions)
3499   ! * hardware vendor (Intel) implementations of linear algebra primitives
3500   !   (C++ and C# versions, x86/x64 platform)
3501   !
3502   ! We recommend you to read 'Working with commercial version' section  of
3503   ! ALGLIB Reference Manual in order to find out how to  use  performance-
3504   ! related features provided by commercial edition of ALGLIB.
3505 
3506 INPUT PARAMETERS:
3507     XY          -   training set, array[0..NPoints-1,0..NVars].
3508                     First NVars columns store values of independent
3509                     variables, next column stores number of class (from 0
3510                     to NClasses-1) which dataset element belongs to. Fractional
3511                     values are rounded to nearest integer.
3512     NPoints     -   training set size, NPoints>=0
3513     NVars       -   number of independent variables, NVars>=1
3514     NClasses    -   number of classes, NClasses>=2
3515 
3516 
3517 OUTPUT PARAMETERS:
3518     Info        -   return code:
3519                     * -4, if internal EVD subroutine hasn't converged
3520                     * -2, if there is a point with class number
3521                           outside of [0..NClasses-1].
3522                     * -1, if incorrect parameters was passed (NPoints<0,
3523                           NVars<1, NClasses<2)
3524                     *  1, if task has been solved
3525                     *  2, if there was a multicollinearity in training set,
3526                           but task has been solved.
3527     W           -   basis, array[0..NVars-1,0..NVars-1]
3528                     columns of matrix stores basis vectors, sorted by
3529                     quality of training set separation (in descending order)
3530 
3531   -- ALGLIB --
3532      Copyright 31.05.2008 by Bochkanov Sergey
3533 *************************************************************************/
3534 void fisherldan(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, ae_int_t &info, real_2d_array &w, const xparams _xparams = alglib::xdefault);
3535 #endif
3536 
3537 #if defined(AE_COMPILE_SSA) || !defined(AE_PARTIAL_BUILD)
3538 /*************************************************************************
3539 This function creates SSA model object.  Right after creation model is  in
3540 "dummy" mode - you can add data,  but   analyzing/prediction  will  return
3541 just zeros (it assumes that basis is empty).
3542 
3543 HOW TO USE SSA MODEL:
3544 
3545 1. create model with ssacreate()
3546 2. add data with one/many ssaaddsequence() calls
3547 3. choose SSA algorithm with one of ssasetalgo...() functions:
3548    * ssasetalgotopkdirect() for direct one-run analysis
3549    * ssasetalgotopkrealtime() for algorithm optimized for many  subsequent
3550      runs with warm-start capabilities
3551    * ssasetalgoprecomputed() for user-supplied basis
3552 4. set window width with ssasetwindow()
3553 5. perform one of the analysis-related activities:
3554    a) call ssagetbasis() to get basis
3555    b) call ssaanalyzelast() ssaanalyzesequence() or ssaanalyzelastwindow()
3556       to perform analysis (trend/noise separation)
3557    c) call  one  of   the   forecasting   functions  (ssaforecastlast() or
3558       ssaforecastsequence()) to perform prediction; alternatively, you can
3559       extract linear recurrence coefficients with ssagetlrr().
3560    SSA analysis will be performed during first  call  to  analysis-related
3561    function. SSA model is smart enough to track all changes in the dataset
3562    and  model  settings,  to  cache  previously  computed  basis  and   to
3563    re-evaluate basis only when necessary.
3564 
3565 Additionally, if your setting involves constant stream  of  incoming data,
3566 you can perform quick update already calculated  model  with  one  of  the
3567 incremental   append-and-update   functions:  ssaappendpointandupdate() or
3568 ssaappendsequenceandupdate().
3569 
3570 NOTE: steps (2), (3), (4) can be performed in arbitrary order.
3571 
3572 INPUT PARAMETERS:
3573     none
3574 
3575 OUTPUT PARAMETERS:
3576     S               -   structure which stores model state
3577 
3578   -- ALGLIB --
3579      Copyright 30.10.2017 by Bochkanov Sergey
3580 *************************************************************************/
3581 void ssacreate(ssamodel &s, const xparams _xparams = alglib::xdefault);
3582 
3583 
3584 /*************************************************************************
3585 This function sets window width for SSA model. You should call  it  before
3586 analysis phase. Default window width is 1 (not for real use).
3587 
3588 Special notes:
3589 * this function call can be performed at any moment before  first call  to
3590   analysis-related functions
3591 * changing window width invalidates internally stored basis; if you change
3592   window width AFTER you call analysis-related  function,  next  analysis
3593   phase will require re-calculation of  the  basis  according  to  current
3594   algorithm.
3595 * calling this function with exactly  same window width as current one has
3596   no effect
3597 * if you specify window width larger  than any data sequence stored in the
3598   model, analysis will return zero basis.
3599 
3600 INPUT PARAMETERS:
3601     S               -   SSA model created with ssacreate()
3602     WindowWidth     -   >=1, new window width
3603 
3604 OUTPUT PARAMETERS:
3605     S               -   SSA model, updated
3606 
3607   -- ALGLIB --
3608      Copyright 30.10.2017 by Bochkanov Sergey
3609 *************************************************************************/
3610 void ssasetwindow(const ssamodel &s, const ae_int_t windowwidth, const xparams _xparams = alglib::xdefault);
3611 
3612 
3613 /*************************************************************************
3614 This  function  sets  seed  which  is used to initialize internal RNG when
3615 we make pseudorandom decisions on model updates.
3616 
3617 By default, deterministic seed is used - which results in same sequence of
3618 pseudorandom decisions every time you run SSA model. If you  specify  non-
3619 deterministic seed value, then SSA  model  may  return  slightly different
3620 results after each run.
3621 
3622 This function can be useful when you have several SSA models updated  with
3623 sseappendpointandupdate() called with 0<UpdateIts<1 (fractional value) and
3624 due to performance limitations want them to perform updates  at  different
3625 moments.
3626 
3627 INPUT PARAMETERS:
3628     S       -   SSA model
3629     Seed    -   seed:
3630                 * positive values = use deterministic seed for each run of
3631                   algorithms which depend on random initialization
3632                 * zero or negative values = use non-deterministic seed
3633 
3634   -- ALGLIB --
3635      Copyright 03.11.2017 by Bochkanov Sergey
3636 *************************************************************************/
3637 void ssasetseed(const ssamodel &s, const ae_int_t seed, const xparams _xparams = alglib::xdefault);
3638 
3639 
3640 /*************************************************************************
3641 This function sets length of power-up cycle for real-time algorithm.
3642 
3643 By default, this algorithm performs costly O(N*WindowWidth^2)  init  phase
3644 followed by full run of truncated  EVD.  However,  if  you  are  ready  to
3645 live with a bit lower-quality basis during first few iterations,  you  can
3646 split this O(N*WindowWidth^2) initialization  between  several  subsequent
3647 append-and-update rounds. It results in better latency of the algorithm.
3648 
3649 This function invalidates basis/solver, next analysis call will result  in
3650 full recalculation of everything.
3651 
3652 INPUT PARAMETERS:
3653     S       -   SSA model
3654     PWLen   -   length of the power-up stage:
3655                 * 0 means that no power-up is requested
3656                 * 1 is the same as 0
3657                 * >1 means that delayed power-up is performed
3658 
3659   -- ALGLIB --
3660      Copyright 03.11.2017 by Bochkanov Sergey
3661 *************************************************************************/
3662 void ssasetpoweruplength(const ssamodel &s, const ae_int_t pwlen, const xparams _xparams = alglib::xdefault);
3663 
3664 
3665 /*************************************************************************
3666 This function sets memory limit of SSA analysis.
3667 
3668 Straightforward SSA with sequence length T and window width W needs O(T*W)
3669 memory. It is possible to reduce memory consumption by splitting task into
3670 smaller chunks.
3671 
3672 Thus function allows you to specify approximate memory limit (measured  in
3673 double precision numbers used for buffers). Actual memory consumption will
3674 be comparable to the number specified by you.
3675 
3676 Default memory limit is 50.000.000 (400Mbytes) in current version.
3677 
3678 INPUT PARAMETERS:
3679     S       -   SSA model
3680     MemLimit-   memory limit, >=0. Zero value means no limit.
3681 
3682   -- ALGLIB --
3683      Copyright 20.12.2017 by Bochkanov Sergey
3684 *************************************************************************/
3685 void ssasetmemorylimit(const ssamodel &s, const ae_int_t memlimit, const xparams _xparams = alglib::xdefault);
3686 
3687 
3688 /*************************************************************************
3689 This function adds data sequence to SSA  model.  Only   single-dimensional
3690 sequences are supported.
3691 
3692 What is a sequences? Following definitions/requirements apply:
3693 * a sequence  is  an  array of  values  measured  in  subsequent,  equally
3694   separated time moments (ticks).
3695 * you may have many sequences  in your  dataset;  say,  one  sequence  may
3696   correspond to one trading session.
3697 * sequence length should be larger  than current  window  length  (shorter
3698   sequences will be ignored during analysis).
3699 * analysis is performed within a  sequence; different  sequences  are  NOT
3700   stacked together to produce one large contiguous stream of data.
3701 * analysis is performed for all  sequences at once, i.e. same set of basis
3702   vectors is computed for all sequences
3703 
3704 INCREMENTAL ANALYSIS
3705 
3706 This function is non intended for  incremental updates of previously found
3707 SSA basis. Calling it invalidates  all previous analysis results (basis is
3708 reset and will be recalculated from zero during next analysis).
3709 
3710 If  you  want  to  perform   incremental/real-time  SSA,  consider   using
3711 following functions:
3712 * ssaappendpointandupdate() for appending one point
3713 * ssaappendsequenceandupdate() for appending new sequence
3714 
3715 INPUT PARAMETERS:
3716     S               -   SSA model created with ssacreate()
3717     X               -   array[N], data, can be larger (additional values
3718                         are ignored)
3719     N               -   data length, can be automatically determined from
3720                         the array length. N>=0.
3721 
3722 OUTPUT PARAMETERS:
3723     S               -   SSA model, updated
3724 
3725 NOTE: you can clear dataset with ssacleardata()
3726 
3727   -- ALGLIB --
3728      Copyright 30.10.2017 by Bochkanov Sergey
3729 *************************************************************************/
3730 void ssaaddsequence(const ssamodel &s, const real_1d_array &x, const ae_int_t n, const xparams _xparams = alglib::xdefault);
3731 void ssaaddsequence(const ssamodel &s, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
3732 
3733 
3734 /*************************************************************************
3735 This function appends single point to last data sequence stored in the SSA
3736 model and tries to update model in the  incremental  manner  (if  possible
3737 with current algorithm).
3738 
3739 If you want to add more than one point at once:
3740 * if you want to add M points to the same sequence, perform M-1 calls with
3741   UpdateIts parameter set to 0.0, and last call with non-zero UpdateIts.
3742 * if you want to add new sequence, use ssaappendsequenceandupdate()
3743 
3744 Running time of this function does NOT depend on  dataset  size,  only  on
3745 window width and number of singular vectors. Depending on algorithm  being
3746 used, incremental update has complexity:
3747 * for top-K real time   - O(UpdateIts*K*Width^2), with fractional UpdateIts
3748 * for top-K direct      - O(Width^3) for any non-zero UpdateIts
3749 * for precomputed basis - O(1), no update is performed
3750 
3751 INPUT PARAMETERS:
3752     S               -   SSA model created with ssacreate()
3753     X               -   new point
3754     UpdateIts       -   >=0,  floating  point (!)  value,  desired  update
3755                         frequency:
3756                         * zero value means that point is  stored,  but  no
3757                           update is performed
3758                         * integer part of the value means  that  specified
3759                           number of iterations is always performed
3760                         * fractional part of  the  value  means  that  one
3761                           iteration is performed with this probability.
3762 
3763                         Recommended value: 0<UpdateIts<=1.  Values  larger
3764                         than 1 are VERY seldom  needed.  If  your  dataset
3765                         changes slowly, you can set it  to  0.1  and  skip
3766                         90% of updates.
3767 
3768                         In any case, no information is lost even with zero
3769                         value of UpdateIts! It will be  incorporated  into
3770                         model, sooner or later.
3771 
3772 OUTPUT PARAMETERS:
3773     S               -   SSA model, updated
3774 
3775 NOTE: this function uses internal  RNG  to  handle  fractional  values  of
3776       UpdateIts. By default it  is  initialized  with  fixed  seed  during
3777       initial calculation of basis. Thus subsequent calls to this function
3778       will result in the same sequence of pseudorandom decisions.
3779 
3780       However, if  you  have  several  SSA  models  which  are  calculated
3781       simultaneously, and if you want to reduce computational  bottlenecks
3782       by performing random updates at random moments, then fixed  seed  is
3783       not an option - all updates will fire at same moments.
3784 
3785       You may change it with ssasetseed() function.
3786 
3787 NOTE: this function throws an exception if called for empty dataset (there
3788       is no "last" sequence to modify).
3789 
3790   -- ALGLIB --
3791      Copyright 30.10.2017 by Bochkanov Sergey
3792 *************************************************************************/
3793 void ssaappendpointandupdate(const ssamodel &s, const double x, const double updateits, const xparams _xparams = alglib::xdefault);
3794 
3795 
3796 /*************************************************************************
3797 This function appends new sequence to dataset stored in the SSA  model and
3798 tries to update model in the incremental manner (if possible  with current
3799 algorithm).
3800 
3801 Notes:
3802 * if you want to add M sequences at once, perform M-1 calls with UpdateIts
3803   parameter set to 0.0, and last call with non-zero UpdateIts.
3804 * if you want to add just one point, use ssaappendpointandupdate()
3805 
3806 Running time of this function does NOT depend on  dataset  size,  only  on
3807 sequence length, window width and number of singular vectors. Depending on
3808 algorithm being used, incremental update has complexity:
3809 * for top-K real time   - O(UpdateIts*K*Width^2+(NTicks-Width)*Width^2)
3810 * for top-K direct      - O(Width^3+(NTicks-Width)*Width^2)
3811 * for precomputed basis - O(1), no update is performed
3812 
3813 INPUT PARAMETERS:
3814     S               -   SSA model created with ssacreate()
3815     X               -   new sequence, array[NTicks] or larget
3816     NTicks          -   >=1, number of ticks in the sequence
3817     UpdateIts       -   >=0,  floating  point (!)  value,  desired  update
3818                         frequency:
3819                         * zero value means that point is  stored,  but  no
3820                           update is performed
3821                         * integer part of the value means  that  specified
3822                           number of iterations is always performed
3823                         * fractional part of  the  value  means  that  one
3824                           iteration is performed with this probability.
3825 
3826                         Recommended value: 0<UpdateIts<=1.  Values  larger
3827                         than 1 are VERY seldom  needed.  If  your  dataset
3828                         changes slowly, you can set it  to  0.1  and  skip
3829                         90% of updates.
3830 
3831                         In any case, no information is lost even with zero
3832                         value of UpdateIts! It will be  incorporated  into
3833                         model, sooner or later.
3834 
3835 OUTPUT PARAMETERS:
3836     S               -   SSA model, updated
3837 
3838 NOTE: this function uses internal  RNG  to  handle  fractional  values  of
3839       UpdateIts. By default it  is  initialized  with  fixed  seed  during
3840       initial calculation of basis. Thus subsequent calls to this function
3841       will result in the same sequence of pseudorandom decisions.
3842 
3843       However, if  you  have  several  SSA  models  which  are  calculated
3844       simultaneously, and if you want to reduce computational  bottlenecks
3845       by performing random updates at random moments, then fixed  seed  is
3846       not an option - all updates will fire at same moments.
3847 
3848       You may change it with ssasetseed() function.
3849 
3850   -- ALGLIB --
3851      Copyright 30.10.2017 by Bochkanov Sergey
3852 *************************************************************************/
3853 void ssaappendsequenceandupdate(const ssamodel &s, const real_1d_array &x, const ae_int_t nticks, const double updateits, const xparams _xparams = alglib::xdefault);
3854 void ssaappendsequenceandupdate(const ssamodel &s, const real_1d_array &x, const double updateits, const xparams _xparams = alglib::xdefault);
3855 
3856 
3857 /*************************************************************************
3858 This  function sets SSA algorithm to "precomputed vectors" algorithm.
3859 
3860 This  algorithm  uses  precomputed  set  of  orthonormal  (orthogonal  AND
3861 normalized) basis vectors supplied by user. Thus, basis calculation  phase
3862 is not performed -  we  already  have  our  basis  -  and  only  analysis/
3863 forecasting phase requires actual calculations.
3864 
3865 This algorithm may handle "append" requests which add just  one/few  ticks
3866 to the end of the last sequence in O(1) time.
3867 
3868 NOTE: this algorithm accepts both basis and window  width,  because  these
3869       two parameters are naturally aligned.  Calling  this  function  sets
3870       window width; if you call ssasetwindow() with  other  window  width,
3871       then during analysis stage algorithm will detect conflict and  reset
3872       to zero basis.
3873 
3874 INPUT PARAMETERS:
3875     S               -   SSA model
3876     A               -   array[WindowWidth,NBasis], orthonormalized  basis;
3877                         this function does NOT control  orthogonality  and
3878                         does NOT perform any kind of  renormalization.  It
3879                         is your responsibility to provide it with  correct
3880                         basis.
3881     WindowWidth     -   window width, >=1
3882     NBasis          -   number of basis vectors, 1<=NBasis<=WindowWidth
3883 
3884 OUTPUT PARAMETERS:
3885     S               -   updated model
3886 
3887 NOTE: calling this function invalidates basis in all cases.
3888 
3889   -- ALGLIB --
3890      Copyright 30.10.2017 by Bochkanov Sergey
3891 *************************************************************************/
3892 void ssasetalgoprecomputed(const ssamodel &s, const real_2d_array &a, const ae_int_t windowwidth, const ae_int_t nbasis, const xparams _xparams = alglib::xdefault);
3893 void ssasetalgoprecomputed(const ssamodel &s, const real_2d_array &a, const xparams _xparams = alglib::xdefault);
3894 
3895 
3896 /*************************************************************************
3897 This  function sets SSA algorithm to "direct top-K" algorithm.
3898 
3899 "Direct top-K" algorithm performs full  SVD  of  the  N*WINDOW  trajectory
3900 matrix (hence its name - direct solver  is  used),  then  extracts  top  K
3901 components. Overall running time is O(N*WINDOW^2), where N is a number  of
3902 ticks in the dataset, WINDOW is window width.
3903 
3904 This algorithm may handle "append" requests which add just  one/few  ticks
3905 to the end of the last sequence in O(WINDOW^3) time,  which  is  ~N/WINDOW
3906 times faster than re-computing everything from scratch.
3907 
3908 INPUT PARAMETERS:
3909     S               -   SSA model
3910     TopK            -   number of components to analyze; TopK>=1.
3911 
3912 OUTPUT PARAMETERS:
3913     S               -   updated model
3914 
3915 
3916 NOTE: TopK>WindowWidth is silently decreased to WindowWidth during analysis
3917       phase
3918 
3919 NOTE: calling this function invalidates basis, except  for  the  situation
3920       when this algorithm was already set with same parameters.
3921 
3922   -- ALGLIB --
3923      Copyright 30.10.2017 by Bochkanov Sergey
3924 *************************************************************************/
3925 void ssasetalgotopkdirect(const ssamodel &s, const ae_int_t topk, const xparams _xparams = alglib::xdefault);
3926 
3927 
3928 /*************************************************************************
3929 This function sets SSA algorithm to "top-K real time algorithm". This algo
3930 extracts K components with largest singular values.
3931 
3932 It  is  real-time  version  of  top-K  algorithm  which  is  optimized for
3933 incremental processing and  fast  start-up. Internally  it  uses  subspace
3934 eigensolver for truncated SVD. It results  in  ability  to  perform  quick
3935 updates of the basis when only a few points/sequences is added to dataset.
3936 
3937 Performance profile of the algorithm is given below:
3938 * O(K*WindowWidth^2) running time for incremental update  of  the  dataset
3939   with one of the "append-and-update" functions (ssaappendpointandupdate()
3940   or ssaappendsequenceandupdate()).
3941 * O(N*WindowWidth^2) running time for initial basis evaluation (N=size  of
3942   dataset)
3943 * ability  to  split  costly  initialization  across  several  incremental
3944   updates of the basis (so called "Power-Up" functionality,  activated  by
3945   ssasetpoweruplength() function)
3946 
3947 INPUT PARAMETERS:
3948     S               -   SSA model
3949     TopK            -   number of components to analyze; TopK>=1.
3950 
3951 OUTPUT PARAMETERS:
3952     S               -   updated model
3953 
3954 NOTE: this  algorithm  is  optimized  for  large-scale  tasks  with  large
3955       datasets. On toy problems with just  5-10 points it can return basis
3956       which is slightly different from that returned by  direct  algorithm
3957       (ssasetalgotopkdirect() function). However, the  difference  becomes
3958       negligible as dataset grows.
3959 
3960 NOTE: TopK>WindowWidth is silently decreased to WindowWidth during analysis
3961       phase
3962 
3963 NOTE: calling this function invalidates basis, except  for  the  situation
3964       when this algorithm was already set with same parameters.
3965 
3966   -- ALGLIB --
3967      Copyright 30.10.2017 by Bochkanov Sergey
3968 *************************************************************************/
3969 void ssasetalgotopkrealtime(const ssamodel &s, const ae_int_t topk, const xparams _xparams = alglib::xdefault);
3970 
3971 
3972 /*************************************************************************
3973 This function clears all data stored in the  model  and  invalidates  all
3974 basis components found so far.
3975 
3976 INPUT PARAMETERS:
3977     S               -   SSA model created with ssacreate()
3978 
3979 OUTPUT PARAMETERS:
3980     S               -   SSA model, updated
3981 
3982   -- ALGLIB --
3983      Copyright 30.10.2017 by Bochkanov Sergey
3984 *************************************************************************/
3985 void ssacleardata(const ssamodel &s, const xparams _xparams = alglib::xdefault);
3986 
3987 
3988 /*************************************************************************
3989 This function executes SSA on internally stored dataset and returns  basis
3990 found by current method.
3991 
3992 INPUT PARAMETERS:
3993     S               -   SSA model
3994 
3995 OUTPUT PARAMETERS:
3996     A               -   array[WindowWidth,NBasis],   basis;  vectors  are
3997                         stored in matrix columns, by descreasing variance
3998     SV              -   array[NBasis]:
3999                         * zeros - for model initialized with SSASetAlgoPrecomputed()
4000                         * singular values - for other algorithms
4001     WindowWidth     -   current window
4002     NBasis          -   basis size
4003 
4004 
4005 CACHING/REUSE OF THE BASIS
4006 
4007 Caching/reuse of previous results is performed:
4008 * first call performs full run of SSA; basis is stored in the cache
4009 * subsequent calls reuse previously cached basis
4010 * if you call any function which changes model properties (window  length,
4011   algorithm, dataset), internal basis will be invalidated.
4012 * the only calls which do NOT invalidate basis are listed below:
4013   a) ssasetwindow() with same window length
4014   b) ssaappendpointandupdate()
4015   c) ssaappendsequenceandupdate()
4016   d) ssasetalgotopk...() with exactly same K
4017   Calling these functions will result in reuse of previously found basis.
4018 
4019 
4020 HANDLING OF DEGENERATE CASES
4021 
4022 Calling  this  function  in  degenerate  cases  (no  data  or all data are
4023 shorter than window size; no algorithm is specified)  returns  basis  with
4024 just one zero vector.
4025 
4026   -- ALGLIB --
4027      Copyright 30.10.2017 by Bochkanov Sergey
4028 *************************************************************************/
4029 void ssagetbasis(const ssamodel &s, real_2d_array &a, real_1d_array &sv, ae_int_t &windowwidth, ae_int_t &nbasis, const xparams _xparams = alglib::xdefault);
4030 
4031 
4032 /*************************************************************************
4033 This function returns linear recurrence relation (LRR) coefficients  found
4034 by current SSA algorithm.
4035 
4036 INPUT PARAMETERS:
4037     S               -   SSA model
4038 
4039 OUTPUT PARAMETERS:
4040     A               -   array[WindowWidth-1]. Coefficients  of  the
4041                         linear recurrence of the form:
4042                         X[W-1] = X[W-2]*A[W-2] + X[W-3]*A[W-3] + ... + X[0]*A[0].
4043                         Empty array for WindowWidth=1.
4044     WindowWidth     -   current window width
4045 
4046 
4047 CACHING/REUSE OF THE BASIS
4048 
4049 Caching/reuse of previous results is performed:
4050 * first call performs full run of SSA; basis is stored in the cache
4051 * subsequent calls reuse previously cached basis
4052 * if you call any function which changes model properties (window  length,
4053   algorithm, dataset), internal basis will be invalidated.
4054 * the only calls which do NOT invalidate basis are listed below:
4055   a) ssasetwindow() with same window length
4056   b) ssaappendpointandupdate()
4057   c) ssaappendsequenceandupdate()
4058   d) ssasetalgotopk...() with exactly same K
4059   Calling these functions will result in reuse of previously found basis.
4060 
4061 
4062 HANDLING OF DEGENERATE CASES
4063 
4064 Calling  this  function  in  degenerate  cases  (no  data  or all data are
4065 shorter than window size; no algorithm is specified) returns zeros.
4066 
4067   -- ALGLIB --
4068      Copyright 30.10.2017 by Bochkanov Sergey
4069 *************************************************************************/
4070 void ssagetlrr(const ssamodel &s, real_1d_array &a, ae_int_t &windowwidth, const xparams _xparams = alglib::xdefault);
4071 
4072 
4073 /*************************************************************************
4074 This  function  executes  SSA  on  internally  stored  dataset and returns
4075 analysis  for  the  last  window  of  the  last sequence. Such analysis is
4076 an lightweight alternative for full scale reconstruction (see below).
4077 
4078 Typical use case for this function is  real-time  setting,  when  you  are
4079 interested in quick-and-dirty (very quick and very  dirty)  processing  of
4080 just a few last ticks of the trend.
4081 
4082 IMPORTANT: full  scale  SSA  involves  analysis  of  the  ENTIRE  dataset,
4083            with reconstruction being done for  all  positions  of  sliding
4084            window with subsequent hankelization  (diagonal  averaging)  of
4085            the resulting matrix.
4086 
4087            Such analysis requires O((DataLen-Window)*Window*NBasis)  FLOPs
4088            and can be quite costly. However, it has  nice  noise-canceling
4089            effects due to averaging.
4090 
4091            This function performs REDUCED analysis of the last window.  It
4092            is much faster - just O(Window*NBasis),  but  its  results  are
4093            DIFFERENT from that of ssaanalyzelast(). In  particular,  first
4094            few points of the trend are much more prone to noise.
4095 
4096 INPUT PARAMETERS:
4097     S               -   SSA model
4098 
4099 OUTPUT PARAMETERS:
4100     Trend           -   array[WindowSize], reconstructed trend line
4101     Noise           -   array[WindowSize], the rest of the signal;
4102                         it holds that ActualData = Trend+Noise.
4103     NTicks          -   current WindowSize
4104 
4105 
4106 CACHING/REUSE OF THE BASIS
4107 
4108 Caching/reuse of previous results is performed:
4109 * first call performs full run of SSA; basis is stored in the cache
4110 * subsequent calls reuse previously cached basis
4111 * if you call any function which changes model properties (window  length,
4112   algorithm, dataset), internal basis will be invalidated.
4113 * the only calls which do NOT invalidate basis are listed below:
4114   a) ssasetwindow() with same window length
4115   b) ssaappendpointandupdate()
4116   c) ssaappendsequenceandupdate()
4117   d) ssasetalgotopk...() with exactly same K
4118   Calling these functions will result in reuse of previously found basis.
4119 
4120 In  any  case,  only  basis  is  reused. Reconstruction is performed  from
4121 scratch every time you call this function.
4122 
4123 
4124 HANDLING OF DEGENERATE CASES
4125 
4126 Following degenerate cases may happen:
4127 * dataset is empty (no analysis can be done)
4128 * all sequences are shorter than the window length,no analysis can be done
4129 * no algorithm is specified (no analysis can be done)
4130 * last sequence is shorter than the window length (analysis can  be  done,
4131   but we can not perform reconstruction on the last sequence)
4132 
4133 Calling this function in degenerate cases returns following result:
4134 * in any case, WindowWidth ticks is returned
4135 * trend is assumed to be zero
4136 * noise is initialized by the last sequence; if last sequence  is  shorter
4137   than the window size, it is moved to  the  end  of  the  array, and  the
4138   beginning of the noise array is filled by zeros
4139 
4140 No analysis is performed in degenerate cases (we immediately return  dummy
4141 values, no basis is constructed).
4142 
4143   -- ALGLIB --
4144      Copyright 30.10.2017 by Bochkanov Sergey
4145 *************************************************************************/
4146 void ssaanalyzelastwindow(const ssamodel &s, real_1d_array &trend, real_1d_array &noise, ae_int_t &nticks, const xparams _xparams = alglib::xdefault);
4147 
4148 
4149 /*************************************************************************
4150 This function:
4151 * builds SSA basis using internally stored (entire) dataset
4152 * returns reconstruction for the last NTicks of the last sequence
4153 
4154 If you want to analyze some other sequence, use ssaanalyzesequence().
4155 
4156 Reconstruction phase involves  generation  of  NTicks-WindowWidth  sliding
4157 windows, their decomposition using empirical orthogonal functions found by
4158 SSA, followed by averaging of each data point across  several  overlapping
4159 windows. Thus, every point in the output trend is reconstructed  using  up
4160 to WindowWidth overlapping  windows  (WindowWidth windows exactly  in  the
4161 inner points, just one window at the extremal points).
4162 
4163 IMPORTANT: due to averaging this function returns  different  results  for
4164            different values of NTicks. It is expected and not a bug.
4165 
4166            For example:
4167            * Trend[NTicks-1] is always same because it is not averaged  in
4168              any case (same applies to Trend[0]).
4169            * Trend[NTicks-2] has different values  for  NTicks=WindowWidth
4170              and NTicks=WindowWidth+1 because former  case  means that  no
4171              averaging is performed, and latter  case means that averaging
4172              using two sliding windows  is  performed.  Larger  values  of
4173              NTicks produce same results as NTicks=WindowWidth+1.
4174            * ...and so on...
4175 
4176 PERFORMANCE: this  function has O((NTicks-WindowWidth)*WindowWidth*NBasis)
4177              running time. If you work  in  time-constrained  setting  and
4178              have to analyze just a few last ticks, choosing NTicks  equal
4179              to WindowWidth+SmoothingLen, with SmoothingLen=1...WindowWidth
4180              will result in good compromise between noise cancellation and
4181              analysis speed.
4182 
4183 INPUT PARAMETERS:
4184     S               -   SSA model
4185     NTicks          -   number of ticks to analyze, Nticks>=1.
4186                         * special case of NTicks<=WindowWidth  is  handled
4187                           by analyzing last window and  returning   NTicks
4188                           last ticks.
4189                         * special case NTicks>LastSequenceLen  is  handled
4190                           by prepending result with NTicks-LastSequenceLen
4191                           zeros.
4192 
4193 OUTPUT PARAMETERS:
4194     Trend           -   array[NTicks], reconstructed trend line
4195     Noise           -   array[NTicks], the rest of the signal;
4196                         it holds that ActualData = Trend+Noise.
4197 
4198 
4199 CACHING/REUSE OF THE BASIS
4200 
4201 Caching/reuse of previous results is performed:
4202 * first call performs full run of SSA; basis is stored in the cache
4203 * subsequent calls reuse previously cached basis
4204 * if you call any function which changes model properties (window  length,
4205   algorithm, dataset), internal basis will be invalidated.
4206 * the only calls which do NOT invalidate basis are listed below:
4207   a) ssasetwindow() with same window length
4208   b) ssaappendpointandupdate()
4209   c) ssaappendsequenceandupdate()
4210   d) ssasetalgotopk...() with exactly same K
4211   Calling these functions will result in reuse of previously found basis.
4212 
4213 In  any  case,  only  basis  is  reused. Reconstruction is performed  from
4214 scratch every time you call this function.
4215 
4216 
4217 HANDLING OF DEGENERATE CASES
4218 
4219 Following degenerate cases may happen:
4220 * dataset is empty (no analysis can be done)
4221 * all sequences are shorter than the window length,no analysis can be done
4222 * no algorithm is specified (no analysis can be done)
4223 * last sequence is shorter than the window length (analysis  can  be done,
4224   but we can not perform reconstruction on the last sequence)
4225 
4226 Calling this function in degenerate cases returns following result:
4227 * in any case, NTicks ticks is returned
4228 * trend is assumed to be zero
4229 * noise is initialized by the last sequence; if last sequence  is  shorter
4230   than the window size, it is moved to  the  end  of  the  array, and  the
4231   beginning of the noise array is filled by zeros
4232 
4233 No analysis is performed in degenerate cases (we immediately return  dummy
4234 values, no basis is constructed).
4235 
4236   -- ALGLIB --
4237      Copyright 30.10.2017 by Bochkanov Sergey
4238 *************************************************************************/
4239 void ssaanalyzelast(const ssamodel &s, const ae_int_t nticks, real_1d_array &trend, real_1d_array &noise, const xparams _xparams = alglib::xdefault);
4240 
4241 
4242 /*************************************************************************
4243 This function:
4244 * builds SSA basis using internally stored (entire) dataset
4245 * returns reconstruction for the sequence being passed to this function
4246 
4247 If  you  want  to  analyze  last  sequence  stored  in   the   model,  use
4248 ssaanalyzelast().
4249 
4250 Reconstruction phase involves  generation  of  NTicks-WindowWidth  sliding
4251 windows, their decomposition using empirical orthogonal functions found by
4252 SSA, followed by averaging of each data point across  several  overlapping
4253 windows. Thus, every point in the output trend is reconstructed  using  up
4254 to WindowWidth overlapping  windows  (WindowWidth windows exactly  in  the
4255 inner points, just one window at the extremal points).
4256 
4257 PERFORMANCE: this  function has O((NTicks-WindowWidth)*WindowWidth*NBasis)
4258              running time. If you work  in  time-constrained  setting  and
4259              have to analyze just a few last ticks, choosing NTicks  equal
4260              to WindowWidth+SmoothingLen, with SmoothingLen=1...WindowWidth
4261              will result in good compromise between noise cancellation and
4262              analysis speed.
4263 
4264 INPUT PARAMETERS:
4265     S               -   SSA model
4266     Data            -   array[NTicks], can be larger (only NTicks  leading
4267                         elements will be used)
4268     NTicks          -   number of ticks to analyze, Nticks>=1.
4269                         * special case of NTicks<WindowWidth  is   handled
4270                           by returning zeros as trend, and signal as noise
4271 
4272 OUTPUT PARAMETERS:
4273     Trend           -   array[NTicks], reconstructed trend line
4274     Noise           -   array[NTicks], the rest of the signal;
4275                         it holds that ActualData = Trend+Noise.
4276 
4277 
4278 CACHING/REUSE OF THE BASIS
4279 
4280 Caching/reuse of previous results is performed:
4281 * first call performs full run of SSA; basis is stored in the cache
4282 * subsequent calls reuse previously cached basis
4283 * if you call any function which changes model properties (window  length,
4284   algorithm, dataset), internal basis will be invalidated.
4285 * the only calls which do NOT invalidate basis are listed below:
4286   a) ssasetwindow() with same window length
4287   b) ssaappendpointandupdate()
4288   c) ssaappendsequenceandupdate()
4289   d) ssasetalgotopk...() with exactly same K
4290   Calling these functions will result in reuse of previously found basis.
4291 
4292 In  any  case,  only  basis  is  reused. Reconstruction is performed  from
4293 scratch every time you call this function.
4294 
4295 
4296 HANDLING OF DEGENERATE CASES
4297 
4298 Following degenerate cases may happen:
4299 * dataset is empty (no analysis can be done)
4300 * all sequences are shorter than the window length,no analysis can be done
4301 * no algorithm is specified (no analysis can be done)
4302 * sequence being passed is shorter than the window length
4303 
4304 Calling this function in degenerate cases returns following result:
4305 * in any case, NTicks ticks is returned
4306 * trend is assumed to be zero
4307 * noise is initialized by the sequence.
4308 
4309 No analysis is performed in degenerate cases (we immediately return  dummy
4310 values, no basis is constructed).
4311 
4312   -- ALGLIB --
4313      Copyright 30.10.2017 by Bochkanov Sergey
4314 *************************************************************************/
4315 void ssaanalyzesequence(const ssamodel &s, const real_1d_array &data, const ae_int_t nticks, real_1d_array &trend, real_1d_array &noise, const xparams _xparams = alglib::xdefault);
4316 void ssaanalyzesequence(const ssamodel &s, const real_1d_array &data, real_1d_array &trend, real_1d_array &noise, const xparams _xparams = alglib::xdefault);
4317 
4318 
4319 /*************************************************************************
4320 This function builds SSA basis and performs forecasting  for  a  specified
4321 number of ticks, returning value of trend.
4322 
4323 Forecast is performed as follows:
4324 * SSA  trend  extraction  is  applied  to last WindowWidth elements of the
4325   internally stored dataset; this step is basically a noise reduction.
4326 * linear recurrence relation is applied to extracted trend
4327 
4328 This function has following running time:
4329 * O(NBasis*WindowWidth) for trend extraction phase (always performed)
4330 * O(WindowWidth*NTicks) for forecast phase
4331 
4332 NOTE: noise reduction is ALWAYS applied by this algorithm; if you want  to
4333       apply recurrence relation  to  raw  unprocessed  data,  use  another
4334       function - ssaforecastsequence() which allows to  turn  on  and  off
4335       noise reduction phase.
4336 
4337 NOTE: this algorithm performs prediction using only one - last  -  sliding
4338       window.  Predictions  produced   by   such   approach   are   smooth
4339       continuations of the reconstructed  trend  line,  but  they  can  be
4340       easily corrupted by noise. If you need  noise-resistant  prediction,
4341       use ssaforecastavglast() function, which averages predictions  built
4342       using several sliding windows.
4343 
4344 INPUT PARAMETERS:
4345     S               -   SSA model
4346     NTicks          -   number of ticks to forecast, NTicks>=1
4347 
4348 OUTPUT PARAMETERS:
4349     Trend           -   array[NTicks], predicted trend line
4350 
4351 
4352 CACHING/REUSE OF THE BASIS
4353 
4354 Caching/reuse of previous results is performed:
4355 * first call performs full run of SSA; basis is stored in the cache
4356 * subsequent calls reuse previously cached basis
4357 * if you call any function which changes model properties (window  length,
4358   algorithm, dataset), internal basis will be invalidated.
4359 * the only calls which do NOT invalidate basis are listed below:
4360   a) ssasetwindow() with same window length
4361   b) ssaappendpointandupdate()
4362   c) ssaappendsequenceandupdate()
4363   d) ssasetalgotopk...() with exactly same K
4364   Calling these functions will result in reuse of previously found basis.
4365 
4366 
4367 HANDLING OF DEGENERATE CASES
4368 
4369 Following degenerate cases may happen:
4370 * dataset is empty (no analysis can be done)
4371 * all sequences are shorter than the window length,no analysis can be done
4372 * no algorithm is specified (no analysis can be done)
4373 * last sequence is shorter than the WindowWidth   (analysis  can  be done,
4374   but we can not perform forecasting on the last sequence)
4375 * window lentgh is 1 (impossible to use for forecasting)
4376 * SSA analysis algorithm is  configured  to  extract  basis  whose size is
4377   equal to window length (impossible to use for  forecasting;  only  basis
4378   whose size is less than window length can be used).
4379 
4380 Calling this function in degenerate cases returns following result:
4381 * NTicks  copies  of  the  last  value is returned for non-empty task with
4382   large enough dataset, but with overcomplete  basis  (window  width=1  or
4383   basis size is equal to window width)
4384 * zero trend with length=NTicks is returned for empty task
4385 
4386 No analysis is performed in degenerate cases (we immediately return  dummy
4387 values, no basis is ever constructed).
4388 
4389   -- ALGLIB --
4390      Copyright 30.10.2017 by Bochkanov Sergey
4391 *************************************************************************/
4392 void ssaforecastlast(const ssamodel &s, const ae_int_t nticks, real_1d_array &trend, const xparams _xparams = alglib::xdefault);
4393 
4394 
4395 /*************************************************************************
4396 This function builds SSA  basis  and  performs  forecasting  for  a  user-
4397 specified sequence, returning value of trend.
4398 
4399 Forecasting is done in two stages:
4400 * first,  we  extract  trend  from the WindowWidth  last  elements of  the
4401   sequence. This stage is optional, you  can  turn  it  off  if  you  pass
4402   data which are already processed with SSA. Of course, you  can  turn  it
4403   off even for raw data, but it is not recommended - noise suppression  is
4404   very important for correct prediction.
4405 * then, we apply LRR for last  WindowWidth-1  elements  of  the  extracted
4406   trend.
4407 
4408 This function has following running time:
4409 * O(NBasis*WindowWidth) for trend extraction phase
4410 * O(WindowWidth*NTicks) for forecast phase
4411 
4412 NOTE: this algorithm performs prediction using only one - last  -  sliding
4413       window.  Predictions  produced   by   such   approach   are   smooth
4414       continuations of the reconstructed  trend  line,  but  they  can  be
4415       easily corrupted by noise. If you need  noise-resistant  prediction,
4416       use ssaforecastavgsequence() function,  which  averages  predictions
4417       built using several sliding windows.
4418 
4419 INPUT PARAMETERS:
4420     S               -   SSA model
4421     Data            -   array[NTicks], data to forecast
4422     DataLen         -   number of ticks in the data, DataLen>=1
4423     ForecastLen     -   number of ticks to predict, ForecastLen>=1
4424     ApplySmoothing  -   whether to apply smoothing trend extraction or not;
4425                         if you do not know what to specify, pass True.
4426 
4427 OUTPUT PARAMETERS:
4428     Trend           -   array[ForecastLen], forecasted trend
4429 
4430 
4431 CACHING/REUSE OF THE BASIS
4432 
4433 Caching/reuse of previous results is performed:
4434 * first call performs full run of SSA; basis is stored in the cache
4435 * subsequent calls reuse previously cached basis
4436 * if you call any function which changes model properties (window  length,
4437   algorithm, dataset), internal basis will be invalidated.
4438 * the only calls which do NOT invalidate basis are listed below:
4439   a) ssasetwindow() with same window length
4440   b) ssaappendpointandupdate()
4441   c) ssaappendsequenceandupdate()
4442   d) ssasetalgotopk...() with exactly same K
4443   Calling these functions will result in reuse of previously found basis.
4444 
4445 
4446 HANDLING OF DEGENERATE CASES
4447 
4448 Following degenerate cases may happen:
4449 * dataset is empty (no analysis can be done)
4450 * all sequences are shorter than the window length,no analysis can be done
4451 * no algorithm is specified (no analysis can be done)
4452 * data sequence is shorter than the WindowWidth   (analysis  can  be done,
4453   but we can not perform forecasting on the last sequence)
4454 * window lentgh is 1 (impossible to use for forecasting)
4455 * SSA analysis algorithm is  configured  to  extract  basis  whose size is
4456   equal to window length (impossible to use for  forecasting;  only  basis
4457   whose size is less than window length can be used).
4458 
4459 Calling this function in degenerate cases returns following result:
4460 * ForecastLen copies of the last value is returned for non-empty task with
4461   large enough dataset, but with overcomplete  basis  (window  width=1  or
4462   basis size is equal to window width)
4463 * zero trend with length=ForecastLen is returned for empty task
4464 
4465 No analysis is performed in degenerate cases (we immediately return  dummy
4466 values, no basis is ever constructed).
4467 
4468   -- ALGLIB --
4469      Copyright 30.10.2017 by Bochkanov Sergey
4470 *************************************************************************/
4471 void ssaforecastsequence(const ssamodel &s, const real_1d_array &data, const ae_int_t datalen, const ae_int_t forecastlen, const bool applysmoothing, real_1d_array &trend, const xparams _xparams = alglib::xdefault);
4472 void ssaforecastsequence(const ssamodel &s, const real_1d_array &data, const ae_int_t forecastlen, real_1d_array &trend, const xparams _xparams = alglib::xdefault);
4473 
4474 
4475 /*************************************************************************
4476 This function builds SSA basis and performs forecasting  for  a  specified
4477 number of ticks, returning value of trend.
4478 
4479 Forecast is performed as follows:
4480 * SSA  trend  extraction  is  applied to last  M  sliding windows  of  the
4481   internally stored dataset
4482 * for each of M sliding windows, M predictions are built
4483 * average value of M predictions is returned
4484 
4485 This function has following running time:
4486 * O(NBasis*WindowWidth*M) for trend extraction phase (always performed)
4487 * O(WindowWidth*NTicks*M) for forecast phase
4488 
4489 NOTE: noise reduction is ALWAYS applied by this algorithm; if you want  to
4490       apply recurrence relation  to  raw  unprocessed  data,  use  another
4491       function - ssaforecastsequence() which allows to  turn  on  and  off
4492       noise reduction phase.
4493 
4494 NOTE: combination of several predictions results in lesser sensitivity  to
4495       noise, but it may produce undesirable discontinuities  between  last
4496       point of the trend and first point of the prediction. The reason  is
4497       that  last  point  of  the  trend is usually corrupted by noise, but
4498       average  value of  several  predictions  is less sensitive to noise,
4499       thus discontinuity appears. It is not a bug.
4500 
4501 INPUT PARAMETERS:
4502     S               -   SSA model
4503     M               -   number  of  sliding  windows  to combine, M>=1. If
4504                         your dataset has less than M sliding windows, this
4505                         parameter will be silently reduced.
4506     NTicks          -   number of ticks to forecast, NTicks>=1
4507 
4508 OUTPUT PARAMETERS:
4509     Trend           -   array[NTicks], predicted trend line
4510 
4511 
4512 CACHING/REUSE OF THE BASIS
4513 
4514 Caching/reuse of previous results is performed:
4515 * first call performs full run of SSA; basis is stored in the cache
4516 * subsequent calls reuse previously cached basis
4517 * if you call any function which changes model properties (window  length,
4518   algorithm, dataset), internal basis will be invalidated.
4519 * the only calls which do NOT invalidate basis are listed below:
4520   a) ssasetwindow() with same window length
4521   b) ssaappendpointandupdate()
4522   c) ssaappendsequenceandupdate()
4523   d) ssasetalgotopk...() with exactly same K
4524   Calling these functions will result in reuse of previously found basis.
4525 
4526 
4527 HANDLING OF DEGENERATE CASES
4528 
4529 Following degenerate cases may happen:
4530 * dataset is empty (no analysis can be done)
4531 * all sequences are shorter than the window length,no analysis can be done
4532 * no algorithm is specified (no analysis can be done)
4533 * last sequence is shorter than the WindowWidth   (analysis  can  be done,
4534   but we can not perform forecasting on the last sequence)
4535 * window lentgh is 1 (impossible to use for forecasting)
4536 * SSA analysis algorithm is  configured  to  extract  basis  whose size is
4537   equal to window length (impossible to use for  forecasting;  only  basis
4538   whose size is less than window length can be used).
4539 
4540 Calling this function in degenerate cases returns following result:
4541 * NTicks  copies  of  the  last  value is returned for non-empty task with
4542   large enough dataset, but with overcomplete  basis  (window  width=1  or
4543   basis size is equal to window width)
4544 * zero trend with length=NTicks is returned for empty task
4545 
4546 No analysis is performed in degenerate cases (we immediately return  dummy
4547 values, no basis is ever constructed).
4548 
4549   -- ALGLIB --
4550      Copyright 30.10.2017 by Bochkanov Sergey
4551 *************************************************************************/
4552 void ssaforecastavglast(const ssamodel &s, const ae_int_t m, const ae_int_t nticks, real_1d_array &trend, const xparams _xparams = alglib::xdefault);
4553 
4554 
4555 /*************************************************************************
4556 This function builds SSA  basis  and  performs  forecasting  for  a  user-
4557 specified sequence, returning value of trend.
4558 
4559 Forecasting is done in two stages:
4560 * first,  we  extract  trend  from M last sliding windows of the sequence.
4561   This stage is optional, you can  turn  it  off  if  you  pass data which
4562   are already processed with SSA. Of course, you  can  turn  it  off  even
4563   for raw data, but it is not recommended  -  noise  suppression  is  very
4564   important for correct prediction.
4565 * then, we apply LRR independently for M sliding windows
4566 * average of M predictions is returned
4567 
4568 This function has following running time:
4569 * O(NBasis*WindowWidth*M) for trend extraction phase
4570 * O(WindowWidth*NTicks*M) for forecast phase
4571 
4572 NOTE: combination of several predictions results in lesser sensitivity  to
4573       noise, but it may produce undesirable discontinuities  between  last
4574       point of the trend and first point of the prediction. The reason  is
4575       that  last  point  of  the  trend is usually corrupted by noise, but
4576       average  value of  several  predictions  is less sensitive to noise,
4577       thus discontinuity appears. It is not a bug.
4578 
4579 INPUT PARAMETERS:
4580     S               -   SSA model
4581     Data            -   array[NTicks], data to forecast
4582     DataLen         -   number of ticks in the data, DataLen>=1
4583     M               -   number  of  sliding  windows  to combine, M>=1. If
4584                         your dataset has less than M sliding windows, this
4585                         parameter will be silently reduced.
4586     ForecastLen     -   number of ticks to predict, ForecastLen>=1
4587     ApplySmoothing  -   whether to apply smoothing trend extraction or not.
4588                         if you do not know what to specify, pass true.
4589 
4590 OUTPUT PARAMETERS:
4591     Trend           -   array[ForecastLen], forecasted trend
4592 
4593 
4594 CACHING/REUSE OF THE BASIS
4595 
4596 Caching/reuse of previous results is performed:
4597 * first call performs full run of SSA; basis is stored in the cache
4598 * subsequent calls reuse previously cached basis
4599 * if you call any function which changes model properties (window  length,
4600   algorithm, dataset), internal basis will be invalidated.
4601 * the only calls which do NOT invalidate basis are listed below:
4602   a) ssasetwindow() with same window length
4603   b) ssaappendpointandupdate()
4604   c) ssaappendsequenceandupdate()
4605   d) ssasetalgotopk...() with exactly same K
4606   Calling these functions will result in reuse of previously found basis.
4607 
4608 
4609 HANDLING OF DEGENERATE CASES
4610 
4611 Following degenerate cases may happen:
4612 * dataset is empty (no analysis can be done)
4613 * all sequences are shorter than the window length,no analysis can be done
4614 * no algorithm is specified (no analysis can be done)
4615 * data sequence is shorter than the WindowWidth   (analysis  can  be done,
4616   but we can not perform forecasting on the last sequence)
4617 * window lentgh is 1 (impossible to use for forecasting)
4618 * SSA analysis algorithm is  configured  to  extract  basis  whose size is
4619   equal to window length (impossible to use for  forecasting;  only  basis
4620   whose size is less than window length can be used).
4621 
4622 Calling this function in degenerate cases returns following result:
4623 * ForecastLen copies of the last value is returned for non-empty task with
4624   large enough dataset, but with overcomplete  basis  (window  width=1  or
4625   basis size is equal to window width)
4626 * zero trend with length=ForecastLen is returned for empty task
4627 
4628 No analysis is performed in degenerate cases (we immediately return  dummy
4629 values, no basis is ever constructed).
4630 
4631   -- ALGLIB --
4632      Copyright 30.10.2017 by Bochkanov Sergey
4633 *************************************************************************/
4634 void ssaforecastavgsequence(const ssamodel &s, const real_1d_array &data, const ae_int_t datalen, const ae_int_t m, const ae_int_t forecastlen, const bool applysmoothing, real_1d_array &trend, const xparams _xparams = alglib::xdefault);
4635 void ssaforecastavgsequence(const ssamodel &s, const real_1d_array &data, const ae_int_t m, const ae_int_t forecastlen, real_1d_array &trend, const xparams _xparams = alglib::xdefault);
4636 #endif
4637 
4638 #if defined(AE_COMPILE_LINREG) || !defined(AE_PARTIAL_BUILD)
4639 /*************************************************************************
4640 Linear regression
4641 
4642 Subroutine builds model:
4643 
4644     Y = A(0)*X[0] + ... + A(N-1)*X[N-1] + A(N)
4645 
4646 and model found in ALGLIB format, covariation matrix, training set  errors
4647 (rms,  average,  average  relative)   and  leave-one-out  cross-validation
4648 estimate of the generalization error. CV  estimate calculated  using  fast
4649 algorithm with O(NPoints*NVars) complexity.
4650 
4651 When  covariation  matrix  is  calculated  standard deviations of function
4652 values are assumed to be equal to RMS error on the training set.
4653 
4654 INPUT PARAMETERS:
4655     XY          -   training set, array [0..NPoints-1,0..NVars]:
4656                     * NVars columns - independent variables
4657                     * last column - dependent variable
4658     NPoints     -   training set size, NPoints>NVars+1
4659     NVars       -   number of independent variables
4660 
4661 OUTPUT PARAMETERS:
4662     Info        -   return code:
4663                     * -255, in case of unknown internal error
4664                     * -4, if internal SVD subroutine haven't converged
4665                     * -1, if incorrect parameters was passed (NPoints<NVars+2, NVars<1).
4666                     *  1, if subroutine successfully finished
4667     LM          -   linear model in the ALGLIB format. Use subroutines of
4668                     this unit to work with the model.
4669     AR          -   additional results
4670 
4671 
4672   -- ALGLIB --
4673      Copyright 02.08.2008 by Bochkanov Sergey
4674 *************************************************************************/
4675 void lrbuild(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, linearmodel &lm, lrreport &ar, const xparams _xparams = alglib::xdefault);
4676 
4677 
4678 /*************************************************************************
4679 Linear regression
4680 
4681 Variant of LRBuild which uses vector of standatd deviations (errors in
4682 function values).
4683 
4684 INPUT PARAMETERS:
4685     XY          -   training set, array [0..NPoints-1,0..NVars]:
4686                     * NVars columns - independent variables
4687                     * last column - dependent variable
4688     S           -   standard deviations (errors in function values)
4689                     array[0..NPoints-1], S[i]>0.
4690     NPoints     -   training set size, NPoints>NVars+1
4691     NVars       -   number of independent variables
4692 
4693 OUTPUT PARAMETERS:
4694     Info        -   return code:
4695                     * -255, in case of unknown internal error
4696                     * -4, if internal SVD subroutine haven't converged
4697                     * -1, if incorrect parameters was passed (NPoints<NVars+2, NVars<1).
4698                     * -2, if S[I]<=0
4699                     *  1, if subroutine successfully finished
4700     LM          -   linear model in the ALGLIB format. Use subroutines of
4701                     this unit to work with the model.
4702     AR          -   additional results
4703 
4704 
4705   -- ALGLIB --
4706      Copyright 02.08.2008 by Bochkanov Sergey
4707 *************************************************************************/
4708 void lrbuilds(const real_2d_array &xy, const real_1d_array &s, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, linearmodel &lm, lrreport &ar, const xparams _xparams = alglib::xdefault);
4709 
4710 
4711 /*************************************************************************
4712 Like LRBuildS, but builds model
4713 
4714     Y = A(0)*X[0] + ... + A(N-1)*X[N-1]
4715 
4716 i.e. with zero constant term.
4717 
4718   -- ALGLIB --
4719      Copyright 30.10.2008 by Bochkanov Sergey
4720 *************************************************************************/
4721 void lrbuildzs(const real_2d_array &xy, const real_1d_array &s, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, linearmodel &lm, lrreport &ar, const xparams _xparams = alglib::xdefault);
4722 
4723 
4724 /*************************************************************************
4725 Like LRBuild but builds model
4726 
4727     Y = A(0)*X[0] + ... + A(N-1)*X[N-1]
4728 
4729 i.e. with zero constant term.
4730 
4731   -- ALGLIB --
4732      Copyright 30.10.2008 by Bochkanov Sergey
4733 *************************************************************************/
4734 void lrbuildz(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, linearmodel &lm, lrreport &ar, const xparams _xparams = alglib::xdefault);
4735 
4736 
4737 /*************************************************************************
4738 Unpacks coefficients of linear model.
4739 
4740 INPUT PARAMETERS:
4741     LM          -   linear model in ALGLIB format
4742 
4743 OUTPUT PARAMETERS:
4744     V           -   coefficients, array[0..NVars]
4745                     constant term (intercept) is stored in the V[NVars].
4746     NVars       -   number of independent variables (one less than number
4747                     of coefficients)
4748 
4749   -- ALGLIB --
4750      Copyright 30.08.2008 by Bochkanov Sergey
4751 *************************************************************************/
4752 void lrunpack(const linearmodel &lm, real_1d_array &v, ae_int_t &nvars, const xparams _xparams = alglib::xdefault);
4753 
4754 
4755 /*************************************************************************
4756 "Packs" coefficients and creates linear model in ALGLIB format (LRUnpack
4757 reversed).
4758 
4759 INPUT PARAMETERS:
4760     V           -   coefficients, array[0..NVars]
4761     NVars       -   number of independent variables
4762 
4763 OUTPUT PAREMETERS:
4764     LM          -   linear model.
4765 
4766   -- ALGLIB --
4767      Copyright 30.08.2008 by Bochkanov Sergey
4768 *************************************************************************/
4769 void lrpack(const real_1d_array &v, const ae_int_t nvars, linearmodel &lm, const xparams _xparams = alglib::xdefault);
4770 
4771 
4772 /*************************************************************************
4773 Procesing
4774 
4775 INPUT PARAMETERS:
4776     LM      -   linear model
4777     X       -   input vector,  array[0..NVars-1].
4778 
4779 Result:
4780     value of linear model regression estimate
4781 
4782   -- ALGLIB --
4783      Copyright 03.09.2008 by Bochkanov Sergey
4784 *************************************************************************/
4785 double lrprocess(const linearmodel &lm, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
4786 
4787 
4788 /*************************************************************************
4789 RMS error on the test set
4790 
4791 INPUT PARAMETERS:
4792     LM      -   linear model
4793     XY      -   test set
4794     NPoints -   test set size
4795 
4796 RESULT:
4797     root mean square error.
4798 
4799   -- ALGLIB --
4800      Copyright 30.08.2008 by Bochkanov Sergey
4801 *************************************************************************/
4802 double lrrmserror(const linearmodel &lm, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
4803 
4804 
4805 /*************************************************************************
4806 Average error on the test set
4807 
4808 INPUT PARAMETERS:
4809     LM      -   linear model
4810     XY      -   test set
4811     NPoints -   test set size
4812 
4813 RESULT:
4814     average error.
4815 
4816   -- ALGLIB --
4817      Copyright 30.08.2008 by Bochkanov Sergey
4818 *************************************************************************/
4819 double lravgerror(const linearmodel &lm, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
4820 
4821 
4822 /*************************************************************************
4823 RMS error on the test set
4824 
4825 INPUT PARAMETERS:
4826     LM      -   linear model
4827     XY      -   test set
4828     NPoints -   test set size
4829 
4830 RESULT:
4831     average relative error.
4832 
4833   -- ALGLIB --
4834      Copyright 30.08.2008 by Bochkanov Sergey
4835 *************************************************************************/
4836 double lravgrelerror(const linearmodel &lm, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
4837 #endif
4838 
4839 #if defined(AE_COMPILE_FILTERS) || !defined(AE_PARTIAL_BUILD)
4840 /*************************************************************************
4841 Filters: simple moving averages (unsymmetric).
4842 
4843 This filter replaces array by results of SMA(K) filter. SMA(K) is defined
4844 as filter which averages at most K previous points (previous - not points
4845 AROUND central point) - or less, in case of the first K-1 points.
4846 
4847 INPUT PARAMETERS:
4848     X           -   array[N], array to process. It can be larger than N,
4849                     in this case only first N points are processed.
4850     N           -   points count, N>=0
4851     K           -   K>=1 (K can be larger than N ,  such  cases  will  be
4852                     correctly handled). Window width. K=1 corresponds  to
4853                     identity transformation (nothing changes).
4854 
4855 OUTPUT PARAMETERS:
4856     X           -   array, whose first N elements were processed with SMA(K)
4857 
4858 NOTE 1: this function uses efficient in-place  algorithm  which  does not
4859         allocate temporary arrays.
4860 
4861 NOTE 2: this algorithm makes only one pass through array and uses running
4862         sum  to speed-up calculation of the averages. Additional measures
4863         are taken to ensure that running sum on a long sequence  of  zero
4864         elements will be correctly reset to zero even in the presence  of
4865         round-off error.
4866 
4867 NOTE 3: this  is  unsymmetric version of the algorithm,  which  does  NOT
4868         averages points after the current one. Only X[i], X[i-1], ... are
4869         used when calculating new value of X[i]. We should also note that
4870         this algorithm uses BOTH previous points and  current  one,  i.e.
4871         new value of X[i] depends on BOTH previous point and X[i] itself.
4872 
4873   -- ALGLIB --
4874      Copyright 25.10.2011 by Bochkanov Sergey
4875 *************************************************************************/
4876 void filtersma(real_1d_array &x, const ae_int_t n, const ae_int_t k, const xparams _xparams = alglib::xdefault);
4877 void filtersma(real_1d_array &x, const ae_int_t k, const xparams _xparams = alglib::xdefault);
4878 
4879 
4880 /*************************************************************************
4881 Filters: exponential moving averages.
4882 
4883 This filter replaces array by results of EMA(alpha) filter. EMA(alpha) is
4884 defined as filter which replaces X[] by S[]:
4885     S[0] = X[0]
4886     S[t] = alpha*X[t] + (1-alpha)*S[t-1]
4887 
4888 INPUT PARAMETERS:
4889     X           -   array[N], array to process. It can be larger than N,
4890                     in this case only first N points are processed.
4891     N           -   points count, N>=0
4892     alpha       -   0<alpha<=1, smoothing parameter.
4893 
4894 OUTPUT PARAMETERS:
4895     X           -   array, whose first N elements were processed
4896                     with EMA(alpha)
4897 
4898 NOTE 1: this function uses efficient in-place  algorithm  which  does not
4899         allocate temporary arrays.
4900 
4901 NOTE 2: this algorithm uses BOTH previous points and  current  one,  i.e.
4902         new value of X[i] depends on BOTH previous point and X[i] itself.
4903 
4904 NOTE 3: technical analytis users quite often work  with  EMA  coefficient
4905         expressed in DAYS instead of fractions. If you want to  calculate
4906         EMA(N), where N is a number of days, you can use alpha=2/(N+1).
4907 
4908   -- ALGLIB --
4909      Copyright 25.10.2011 by Bochkanov Sergey
4910 *************************************************************************/
4911 void filterema(real_1d_array &x, const ae_int_t n, const double alpha, const xparams _xparams = alglib::xdefault);
4912 void filterema(real_1d_array &x, const double alpha, const xparams _xparams = alglib::xdefault);
4913 
4914 
4915 /*************************************************************************
4916 Filters: linear regression moving averages.
4917 
4918 This filter replaces array by results of LRMA(K) filter.
4919 
4920 LRMA(K) is defined as filter which, for each data  point,  builds  linear
4921 regression  model  using  K  prevous  points (point itself is included in
4922 these K points) and calculates value of this linear model at the point in
4923 question.
4924 
4925 INPUT PARAMETERS:
4926     X           -   array[N], array to process. It can be larger than N,
4927                     in this case only first N points are processed.
4928     N           -   points count, N>=0
4929     K           -   K>=1 (K can be larger than N ,  such  cases  will  be
4930                     correctly handled). Window width. K=1 corresponds  to
4931                     identity transformation (nothing changes).
4932 
4933 OUTPUT PARAMETERS:
4934     X           -   array, whose first N elements were processed with SMA(K)
4935 
4936 NOTE 1: this function uses efficient in-place  algorithm  which  does not
4937         allocate temporary arrays.
4938 
4939 NOTE 2: this algorithm makes only one pass through array and uses running
4940         sum  to speed-up calculation of the averages. Additional measures
4941         are taken to ensure that running sum on a long sequence  of  zero
4942         elements will be correctly reset to zero even in the presence  of
4943         round-off error.
4944 
4945 NOTE 3: this  is  unsymmetric version of the algorithm,  which  does  NOT
4946         averages points after the current one. Only X[i], X[i-1], ... are
4947         used when calculating new value of X[i]. We should also note that
4948         this algorithm uses BOTH previous points and  current  one,  i.e.
4949         new value of X[i] depends on BOTH previous point and X[i] itself.
4950 
4951   -- ALGLIB --
4952      Copyright 25.10.2011 by Bochkanov Sergey
4953 *************************************************************************/
4954 void filterlrma(real_1d_array &x, const ae_int_t n, const ae_int_t k, const xparams _xparams = alglib::xdefault);
4955 void filterlrma(real_1d_array &x, const ae_int_t k, const xparams _xparams = alglib::xdefault);
4956 #endif
4957 
4958 #if defined(AE_COMPILE_LOGIT) || !defined(AE_PARTIAL_BUILD)
4959 /*************************************************************************
4960 This subroutine trains logit model.
4961 
4962 INPUT PARAMETERS:
4963     XY          -   training set, array[0..NPoints-1,0..NVars]
4964                     First NVars columns store values of independent
4965                     variables, next column stores number of class (from 0
4966                     to NClasses-1) which dataset element belongs to. Fractional
4967                     values are rounded to nearest integer.
4968     NPoints     -   training set size, NPoints>=1
4969     NVars       -   number of independent variables, NVars>=1
4970     NClasses    -   number of classes, NClasses>=2
4971 
4972 OUTPUT PARAMETERS:
4973     Info        -   return code:
4974                     * -2, if there is a point with class number
4975                           outside of [0..NClasses-1].
4976                     * -1, if incorrect parameters was passed
4977                           (NPoints<NVars+2, NVars<1, NClasses<2).
4978                     *  1, if task has been solved
4979     LM          -   model built
4980     Rep         -   training report
4981 
4982   -- ALGLIB --
4983      Copyright 10.09.2008 by Bochkanov Sergey
4984 *************************************************************************/
4985 void mnltrainh(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, ae_int_t &info, logitmodel &lm, mnlreport &rep, const xparams _xparams = alglib::xdefault);
4986 
4987 
4988 /*************************************************************************
4989 Procesing
4990 
4991 INPUT PARAMETERS:
4992     LM      -   logit model, passed by non-constant reference
4993                 (some fields of structure are used as temporaries
4994                 when calculating model output).
4995     X       -   input vector,  array[0..NVars-1].
4996     Y       -   (possibly) preallocated buffer; if size of Y is less than
4997                 NClasses, it will be reallocated.If it is large enough, it
4998                 is NOT reallocated, so we can save some time on reallocation.
4999 
5000 OUTPUT PARAMETERS:
5001     Y       -   result, array[0..NClasses-1]
5002                 Vector of posterior probabilities for classification task.
5003 
5004   -- ALGLIB --
5005      Copyright 10.09.2008 by Bochkanov Sergey
5006 *************************************************************************/
5007 void mnlprocess(const logitmodel &lm, const real_1d_array &x, real_1d_array &y, const xparams _xparams = alglib::xdefault);
5008 
5009 
5010 /*************************************************************************
5011 'interactive'  variant  of  MNLProcess  for  languages  like  Python which
5012 support constructs like "Y = MNLProcess(LM,X)" and interactive mode of the
5013 interpreter
5014 
5015 This function allocates new array on each call,  so  it  is  significantly
5016 slower than its 'non-interactive' counterpart, but it is  more  convenient
5017 when you call it from command line.
5018 
5019   -- ALGLIB --
5020      Copyright 10.09.2008 by Bochkanov Sergey
5021 *************************************************************************/
5022 void mnlprocessi(const logitmodel &lm, const real_1d_array &x, real_1d_array &y, const xparams _xparams = alglib::xdefault);
5023 
5024 
5025 /*************************************************************************
5026 Unpacks coefficients of logit model. Logit model have form:
5027 
5028     P(class=i) = S(i) / (S(0) + S(1) + ... +S(M-1))
5029           S(i) = Exp(A[i,0]*X[0] + ... + A[i,N-1]*X[N-1] + A[i,N]), when i<M-1
5030         S(M-1) = 1
5031 
5032 INPUT PARAMETERS:
5033     LM          -   logit model in ALGLIB format
5034 
5035 OUTPUT PARAMETERS:
5036     V           -   coefficients, array[0..NClasses-2,0..NVars]
5037     NVars       -   number of independent variables
5038     NClasses    -   number of classes
5039 
5040   -- ALGLIB --
5041      Copyright 10.09.2008 by Bochkanov Sergey
5042 *************************************************************************/
5043 void mnlunpack(const logitmodel &lm, real_2d_array &a, ae_int_t &nvars, ae_int_t &nclasses, const xparams _xparams = alglib::xdefault);
5044 
5045 
5046 /*************************************************************************
5047 "Packs" coefficients and creates logit model in ALGLIB format (MNLUnpack
5048 reversed).
5049 
5050 INPUT PARAMETERS:
5051     A           -   model (see MNLUnpack)
5052     NVars       -   number of independent variables
5053     NClasses    -   number of classes
5054 
5055 OUTPUT PARAMETERS:
5056     LM          -   logit model.
5057 
5058   -- ALGLIB --
5059      Copyright 10.09.2008 by Bochkanov Sergey
5060 *************************************************************************/
5061 void mnlpack(const real_2d_array &a, const ae_int_t nvars, const ae_int_t nclasses, logitmodel &lm, const xparams _xparams = alglib::xdefault);
5062 
5063 
5064 /*************************************************************************
5065 Average cross-entropy (in bits per element) on the test set
5066 
5067 INPUT PARAMETERS:
5068     LM      -   logit model
5069     XY      -   test set
5070     NPoints -   test set size
5071 
5072 RESULT:
5073     CrossEntropy/(NPoints*ln(2)).
5074 
5075   -- ALGLIB --
5076      Copyright 10.09.2008 by Bochkanov Sergey
5077 *************************************************************************/
5078 double mnlavgce(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
5079 
5080 
5081 /*************************************************************************
5082 Relative classification error on the test set
5083 
5084 INPUT PARAMETERS:
5085     LM      -   logit model
5086     XY      -   test set
5087     NPoints -   test set size
5088 
5089 RESULT:
5090     percent of incorrectly classified cases.
5091 
5092   -- ALGLIB --
5093      Copyright 10.09.2008 by Bochkanov Sergey
5094 *************************************************************************/
5095 double mnlrelclserror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
5096 
5097 
5098 /*************************************************************************
5099 RMS error on the test set
5100 
5101 INPUT PARAMETERS:
5102     LM      -   logit model
5103     XY      -   test set
5104     NPoints -   test set size
5105 
5106 RESULT:
5107     root mean square error (error when estimating posterior probabilities).
5108 
5109   -- ALGLIB --
5110      Copyright 30.08.2008 by Bochkanov Sergey
5111 *************************************************************************/
5112 double mnlrmserror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
5113 
5114 
5115 /*************************************************************************
5116 Average error on the test set
5117 
5118 INPUT PARAMETERS:
5119     LM      -   logit model
5120     XY      -   test set
5121     NPoints -   test set size
5122 
5123 RESULT:
5124     average error (error when estimating posterior probabilities).
5125 
5126   -- ALGLIB --
5127      Copyright 30.08.2008 by Bochkanov Sergey
5128 *************************************************************************/
5129 double mnlavgerror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
5130 
5131 
5132 /*************************************************************************
5133 Average relative error on the test set
5134 
5135 INPUT PARAMETERS:
5136     LM      -   logit model
5137     XY      -   test set
5138     NPoints -   test set size
5139 
5140 RESULT:
5141     average relative error (error when estimating posterior probabilities).
5142 
5143   -- ALGLIB --
5144      Copyright 30.08.2008 by Bochkanov Sergey
5145 *************************************************************************/
5146 double mnlavgrelerror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t ssize, const xparams _xparams = alglib::xdefault);
5147 
5148 
5149 /*************************************************************************
5150 Classification error on test set = MNLRelClsError*NPoints
5151 
5152   -- ALGLIB --
5153      Copyright 10.09.2008 by Bochkanov Sergey
5154 *************************************************************************/
5155 ae_int_t mnlclserror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
5156 #endif
5157 
5158 #if defined(AE_COMPILE_MCPD) || !defined(AE_PARTIAL_BUILD)
5159 /*************************************************************************
5160 DESCRIPTION:
5161 
5162 This function creates MCPD (Markov Chains for Population Data) solver.
5163 
5164 This  solver  can  be  used  to find transition matrix P for N-dimensional
5165 prediction  problem  where transition from X[i] to X[i+1] is  modelled  as
5166     X[i+1] = P*X[i]
5167 where X[i] and X[i+1] are N-dimensional population vectors (components  of
5168 each X are non-negative), and P is a N*N transition matrix (elements of  P
5169 are non-negative, each column sums to 1.0).
5170 
5171 Such models arise when when:
5172 * there is some population of individuals
5173 * individuals can have different states
5174 * individuals can transit from one state to another
5175 * population size is constant, i.e. there is no new individuals and no one
5176   leaves population
5177 * you want to model transitions of individuals from one state into another
5178 
5179 USAGE:
5180 
5181 Here we give very brief outline of the MCPD. We strongly recommend you  to
5182 read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide
5183 on data analysis which is available at http://www.alglib.net/dataanalysis/
5184 
5185 1. User initializes algorithm state with MCPDCreate() call
5186 
5187 2. User  adds  one  or  more  tracks -  sequences of states which describe
5188    evolution of a system being modelled from different starting conditions
5189 
5190 3. User may add optional boundary, equality  and/or  linear constraints on
5191    the coefficients of P by calling one of the following functions:
5192    * MCPDSetEC() to set equality constraints
5193    * MCPDSetBC() to set bound constraints
5194    * MCPDSetLC() to set linear constraints
5195 
5196 4. Optionally,  user  may  set  custom  weights  for prediction errors (by
5197    default, algorithm assigns non-equal, automatically chosen weights  for
5198    errors in the prediction of different components of X). It can be  done
5199    with a call of MCPDSetPredictionWeights() function.
5200 
5201 5. User calls MCPDSolve() function which takes algorithm  state and
5202    pointer (delegate, etc.) to callback function which calculates F/G.
5203 
5204 6. User calls MCPDResults() to get solution
5205 
5206 INPUT PARAMETERS:
5207     N       -   problem dimension, N>=1
5208 
5209 OUTPUT PARAMETERS:
5210     State   -   structure stores algorithm state
5211 
5212   -- ALGLIB --
5213      Copyright 23.05.2010 by Bochkanov Sergey
5214 *************************************************************************/
5215 void mcpdcreate(const ae_int_t n, mcpdstate &s, const xparams _xparams = alglib::xdefault);
5216 
5217 
5218 /*************************************************************************
5219 DESCRIPTION:
5220 
5221 This function is a specialized version of MCPDCreate()  function,  and  we
5222 recommend  you  to read comments for this function for general information
5223 about MCPD solver.
5224 
5225 This  function  creates  MCPD (Markov Chains for Population  Data)  solver
5226 for "Entry-state" model,  i.e. model  where transition from X[i] to X[i+1]
5227 is modelled as
5228     X[i+1] = P*X[i]
5229 where
5230     X[i] and X[i+1] are N-dimensional state vectors
5231     P is a N*N transition matrix
5232 and  one  selected component of X[] is called "entry" state and is treated
5233 in a special way:
5234     system state always transits from "entry" state to some another state
5235     system state can not transit from any state into "entry" state
5236 Such conditions basically mean that row of P which corresponds to  "entry"
5237 state is zero.
5238 
5239 Such models arise when:
5240 * there is some population of individuals
5241 * individuals can have different states
5242 * individuals can transit from one state to another
5243 * population size is NOT constant -  at every moment of time there is some
5244   (unpredictable) amount of "new" individuals, which can transit into  one
5245   of the states at the next turn, but still no one leaves population
5246 * you want to model transitions of individuals from one state into another
5247 * but you do NOT want to predict amount of "new"  individuals  because  it
5248   does not depends on individuals already present (hence  system  can  not
5249   transit INTO entry state - it can only transit FROM it).
5250 
5251 This model is discussed  in  more  details  in  the ALGLIB User Guide (see
5252 http://www.alglib.net/dataanalysis/ for more data).
5253 
5254 INPUT PARAMETERS:
5255     N       -   problem dimension, N>=2
5256     EntryState- index of entry state, in 0..N-1
5257 
5258 OUTPUT PARAMETERS:
5259     State   -   structure stores algorithm state
5260 
5261   -- ALGLIB --
5262      Copyright 23.05.2010 by Bochkanov Sergey
5263 *************************************************************************/
5264 void mcpdcreateentry(const ae_int_t n, const ae_int_t entrystate, mcpdstate &s, const xparams _xparams = alglib::xdefault);
5265 
5266 
5267 /*************************************************************************
5268 DESCRIPTION:
5269 
5270 This function is a specialized version of MCPDCreate()  function,  and  we
5271 recommend  you  to read comments for this function for general information
5272 about MCPD solver.
5273 
5274 This  function  creates  MCPD (Markov Chains for Population  Data)  solver
5275 for "Exit-state" model,  i.e. model  where  transition from X[i] to X[i+1]
5276 is modelled as
5277     X[i+1] = P*X[i]
5278 where
5279     X[i] and X[i+1] are N-dimensional state vectors
5280     P is a N*N transition matrix
5281 and  one  selected component of X[] is called "exit"  state and is treated
5282 in a special way:
5283     system state can transit from any state into "exit" state
5284     system state can not transit from "exit" state into any other state
5285     transition operator discards "exit" state (makes it zero at each turn)
5286 Such  conditions  basically  mean  that  column  of P which corresponds to
5287 "exit" state is zero. Multiplication by such P may decrease sum of  vector
5288 components.
5289 
5290 Such models arise when:
5291 * there is some population of individuals
5292 * individuals can have different states
5293 * individuals can transit from one state to another
5294 * population size is NOT constant - individuals can move into "exit" state
5295   and leave population at the next turn, but there are no new individuals
5296 * amount of individuals which leave population can be predicted
5297 * you want to model transitions of individuals from one state into another
5298   (including transitions into the "exit" state)
5299 
5300 This model is discussed  in  more  details  in  the ALGLIB User Guide (see
5301 http://www.alglib.net/dataanalysis/ for more data).
5302 
5303 INPUT PARAMETERS:
5304     N       -   problem dimension, N>=2
5305     ExitState-  index of exit state, in 0..N-1
5306 
5307 OUTPUT PARAMETERS:
5308     State   -   structure stores algorithm state
5309 
5310   -- ALGLIB --
5311      Copyright 23.05.2010 by Bochkanov Sergey
5312 *************************************************************************/
5313 void mcpdcreateexit(const ae_int_t n, const ae_int_t exitstate, mcpdstate &s, const xparams _xparams = alglib::xdefault);
5314 
5315 
5316 /*************************************************************************
5317 DESCRIPTION:
5318 
5319 This function is a specialized version of MCPDCreate()  function,  and  we
5320 recommend  you  to read comments for this function for general information
5321 about MCPD solver.
5322 
5323 This  function  creates  MCPD (Markov Chains for Population  Data)  solver
5324 for "Entry-Exit-states" model, i.e. model where  transition  from  X[i] to
5325 X[i+1] is modelled as
5326     X[i+1] = P*X[i]
5327 where
5328     X[i] and X[i+1] are N-dimensional state vectors
5329     P is a N*N transition matrix
5330 one selected component of X[] is called "entry" state and is treated in  a
5331 special way:
5332     system state always transits from "entry" state to some another state
5333     system state can not transit from any state into "entry" state
5334 and another one component of X[] is called "exit" state and is treated  in
5335 a special way too:
5336     system state can transit from any state into "exit" state
5337     system state can not transit from "exit" state into any other state
5338     transition operator discards "exit" state (makes it zero at each turn)
5339 Such conditions basically mean that:
5340     row of P which corresponds to "entry" state is zero
5341     column of P which corresponds to "exit" state is zero
5342 Multiplication by such P may decrease sum of vector components.
5343 
5344 Such models arise when:
5345 * there is some population of individuals
5346 * individuals can have different states
5347 * individuals can transit from one state to another
5348 * population size is NOT constant
5349 * at every moment of time there is some (unpredictable)  amount  of  "new"
5350   individuals, which can transit into one of the states at the next turn
5351 * some  individuals  can  move  (predictably)  into "exit" state and leave
5352   population at the next turn
5353 * you want to model transitions of individuals from one state into another,
5354   including transitions from the "entry" state and into the "exit" state.
5355 * but you do NOT want to predict amount of "new"  individuals  because  it
5356   does not depends on individuals already present (hence  system  can  not
5357   transit INTO entry state - it can only transit FROM it).
5358 
5359 This model is discussed  in  more  details  in  the ALGLIB User Guide (see
5360 http://www.alglib.net/dataanalysis/ for more data).
5361 
5362 INPUT PARAMETERS:
5363     N       -   problem dimension, N>=2
5364     EntryState- index of entry state, in 0..N-1
5365     ExitState-  index of exit state, in 0..N-1
5366 
5367 OUTPUT PARAMETERS:
5368     State   -   structure stores algorithm state
5369 
5370   -- ALGLIB --
5371      Copyright 23.05.2010 by Bochkanov Sergey
5372 *************************************************************************/
5373 void mcpdcreateentryexit(const ae_int_t n, const ae_int_t entrystate, const ae_int_t exitstate, mcpdstate &s, const xparams _xparams = alglib::xdefault);
5374 
5375 
5376 /*************************************************************************
5377 This  function  is  used to add a track - sequence of system states at the
5378 different moments of its evolution.
5379 
5380 You  may  add  one  or several tracks to the MCPD solver. In case you have
5381 several tracks, they won't overwrite each other. For example,  if you pass
5382 two tracks, A1-A2-A3 (system at t=A+1, t=A+2 and t=A+3) and B1-B2-B3, then
5383 solver will try to model transitions from t=A+1 to t=A+2, t=A+2 to  t=A+3,
5384 t=B+1 to t=B+2, t=B+2 to t=B+3. But it WONT mix these two tracks - i.e. it
5385 wont try to model transition from t=A+3 to t=B+1.
5386 
5387 INPUT PARAMETERS:
5388     S       -   solver
5389     XY      -   track, array[K,N]:
5390                 * I-th row is a state at t=I
5391                 * elements of XY must be non-negative (exception will be
5392                   thrown on negative elements)
5393     K       -   number of points in a track
5394                 * if given, only leading K rows of XY are used
5395                 * if not given, automatically determined from size of XY
5396 
5397 NOTES:
5398 
5399 1. Track may contain either proportional or population data:
5400    * with proportional data all rows of XY must sum to 1.0, i.e. we have
5401      proportions instead of absolute population values
5402    * with population data rows of XY contain population counts and generally
5403      do not sum to 1.0 (although they still must be non-negative)
5404 
5405   -- ALGLIB --
5406      Copyright 23.05.2010 by Bochkanov Sergey
5407 *************************************************************************/
5408 void mcpdaddtrack(const mcpdstate &s, const real_2d_array &xy, const ae_int_t k, const xparams _xparams = alglib::xdefault);
5409 void mcpdaddtrack(const mcpdstate &s, const real_2d_array &xy, const xparams _xparams = alglib::xdefault);
5410 
5411 
5412 /*************************************************************************
5413 This function is used to add equality constraints on the elements  of  the
5414 transition matrix P.
5415 
5416 MCPD solver has four types of constraints which can be placed on P:
5417 * user-specified equality constraints (optional)
5418 * user-specified bound constraints (optional)
5419 * user-specified general linear constraints (optional)
5420 * basic constraints (always present):
5421   * non-negativity: P[i,j]>=0
5422   * consistency: every column of P sums to 1.0
5423 
5424 Final  constraints  which  are  passed  to  the  underlying  optimizer are
5425 calculated  as  intersection  of all present constraints. For example, you
5426 may specify boundary constraint on P[0,0] and equality one:
5427     0.1<=P[0,0]<=0.9
5428     P[0,0]=0.5
5429 Such  combination  of  constraints  will  be  silently  reduced  to  their
5430 intersection, which is P[0,0]=0.5.
5431 
5432 This  function  can  be  used  to  place equality constraints on arbitrary
5433 subset of elements of P. Set of constraints is specified by EC, which  may
5434 contain either NAN's or finite numbers from [0,1]. NAN denotes absence  of
5435 constraint, finite number denotes equality constraint on specific  element
5436 of P.
5437 
5438 You can also  use  MCPDAddEC()  function  which  allows  to  ADD  equality
5439 constraint  for  one  element  of P without changing constraints for other
5440 elements.
5441 
5442 These functions (MCPDSetEC and MCPDAddEC) interact as follows:
5443 * there is internal matrix of equality constraints which is stored in  the
5444   MCPD solver
5445 * MCPDSetEC() replaces this matrix by another one (SET)
5446 * MCPDAddEC() modifies one element of this matrix and  leaves  other  ones
5447   unchanged (ADD)
5448 * thus  MCPDAddEC()  call  preserves  all  modifications  done by previous
5449   calls,  while  MCPDSetEC()  completely discards all changes  done to the
5450   equality constraints.
5451 
5452 INPUT PARAMETERS:
5453     S       -   solver
5454     EC      -   equality constraints, array[N,N]. Elements of  EC  can  be
5455                 either NAN's or finite  numbers from  [0,1].  NAN  denotes
5456                 absence  of  constraints,  while  finite  value    denotes
5457                 equality constraint on the corresponding element of P.
5458 
5459 NOTES:
5460 
5461 1. infinite values of EC will lead to exception being thrown. Values  less
5462 than 0.0 or greater than 1.0 will lead to error code being returned  after
5463 call to MCPDSolve().
5464 
5465   -- ALGLIB --
5466      Copyright 23.05.2010 by Bochkanov Sergey
5467 *************************************************************************/
5468 void mcpdsetec(const mcpdstate &s, const real_2d_array &ec, const xparams _xparams = alglib::xdefault);
5469 
5470 
5471 /*************************************************************************
5472 This function is used to add equality constraints on the elements  of  the
5473 transition matrix P.
5474 
5475 MCPD solver has four types of constraints which can be placed on P:
5476 * user-specified equality constraints (optional)
5477 * user-specified bound constraints (optional)
5478 * user-specified general linear constraints (optional)
5479 * basic constraints (always present):
5480   * non-negativity: P[i,j]>=0
5481   * consistency: every column of P sums to 1.0
5482 
5483 Final  constraints  which  are  passed  to  the  underlying  optimizer are
5484 calculated  as  intersection  of all present constraints. For example, you
5485 may specify boundary constraint on P[0,0] and equality one:
5486     0.1<=P[0,0]<=0.9
5487     P[0,0]=0.5
5488 Such  combination  of  constraints  will  be  silently  reduced  to  their
5489 intersection, which is P[0,0]=0.5.
5490 
5491 This function can be used to ADD equality constraint for one element of  P
5492 without changing constraints for other elements.
5493 
5494 You  can  also  use  MCPDSetEC()  function  which  allows  you  to specify
5495 arbitrary set of equality constraints in one call.
5496 
5497 These functions (MCPDSetEC and MCPDAddEC) interact as follows:
5498 * there is internal matrix of equality constraints which is stored in the
5499   MCPD solver
5500 * MCPDSetEC() replaces this matrix by another one (SET)
5501 * MCPDAddEC() modifies one element of this matrix and leaves  other  ones
5502   unchanged (ADD)
5503 * thus  MCPDAddEC()  call  preserves  all  modifications done by previous
5504   calls,  while  MCPDSetEC()  completely discards all changes done to the
5505   equality constraints.
5506 
5507 INPUT PARAMETERS:
5508     S       -   solver
5509     I       -   row index of element being constrained
5510     J       -   column index of element being constrained
5511     C       -   value (constraint for P[I,J]).  Can  be  either  NAN  (no
5512                 constraint) or finite value from [0,1].
5513 
5514 NOTES:
5515 
5516 1. infinite values of C  will lead to exception being thrown. Values  less
5517 than 0.0 or greater than 1.0 will lead to error code being returned  after
5518 call to MCPDSolve().
5519 
5520   -- ALGLIB --
5521      Copyright 23.05.2010 by Bochkanov Sergey
5522 *************************************************************************/
5523 void mcpdaddec(const mcpdstate &s, const ae_int_t i, const ae_int_t j, const double c, const xparams _xparams = alglib::xdefault);
5524 
5525 
5526 /*************************************************************************
5527 This function is used to add bound constraints  on  the  elements  of  the
5528 transition matrix P.
5529 
5530 MCPD solver has four types of constraints which can be placed on P:
5531 * user-specified equality constraints (optional)
5532 * user-specified bound constraints (optional)
5533 * user-specified general linear constraints (optional)
5534 * basic constraints (always present):
5535   * non-negativity: P[i,j]>=0
5536   * consistency: every column of P sums to 1.0
5537 
5538 Final  constraints  which  are  passed  to  the  underlying  optimizer are
5539 calculated  as  intersection  of all present constraints. For example, you
5540 may specify boundary constraint on P[0,0] and equality one:
5541     0.1<=P[0,0]<=0.9
5542     P[0,0]=0.5
5543 Such  combination  of  constraints  will  be  silently  reduced  to  their
5544 intersection, which is P[0,0]=0.5.
5545 
5546 This  function  can  be  used  to  place bound   constraints  on arbitrary
5547 subset  of  elements  of  P.  Set of constraints is specified by BndL/BndU
5548 matrices, which may contain arbitrary combination  of  finite  numbers  or
5549 infinities (like -INF<x<=0.5 or 0.1<=x<+INF).
5550 
5551 You can also use MCPDAddBC() function which allows to ADD bound constraint
5552 for one element of P without changing constraints for other elements.
5553 
5554 These functions (MCPDSetBC and MCPDAddBC) interact as follows:
5555 * there is internal matrix of bound constraints which is stored in the
5556   MCPD solver
5557 * MCPDSetBC() replaces this matrix by another one (SET)
5558 * MCPDAddBC() modifies one element of this matrix and  leaves  other  ones
5559   unchanged (ADD)
5560 * thus  MCPDAddBC()  call  preserves  all  modifications  done by previous
5561   calls,  while  MCPDSetBC()  completely discards all changes  done to the
5562   equality constraints.
5563 
5564 INPUT PARAMETERS:
5565     S       -   solver
5566     BndL    -   lower bounds constraints, array[N,N]. Elements of BndL can
5567                 be finite numbers or -INF.
5568     BndU    -   upper bounds constraints, array[N,N]. Elements of BndU can
5569                 be finite numbers or +INF.
5570 
5571   -- ALGLIB --
5572      Copyright 23.05.2010 by Bochkanov Sergey
5573 *************************************************************************/
5574 void mcpdsetbc(const mcpdstate &s, const real_2d_array &bndl, const real_2d_array &bndu, const xparams _xparams = alglib::xdefault);
5575 
5576 
5577 /*************************************************************************
5578 This function is used to add bound constraints  on  the  elements  of  the
5579 transition matrix P.
5580 
5581 MCPD solver has four types of constraints which can be placed on P:
5582 * user-specified equality constraints (optional)
5583 * user-specified bound constraints (optional)
5584 * user-specified general linear constraints (optional)
5585 * basic constraints (always present):
5586   * non-negativity: P[i,j]>=0
5587   * consistency: every column of P sums to 1.0
5588 
5589 Final  constraints  which  are  passed  to  the  underlying  optimizer are
5590 calculated  as  intersection  of all present constraints. For example, you
5591 may specify boundary constraint on P[0,0] and equality one:
5592     0.1<=P[0,0]<=0.9
5593     P[0,0]=0.5
5594 Such  combination  of  constraints  will  be  silently  reduced  to  their
5595 intersection, which is P[0,0]=0.5.
5596 
5597 This  function  can  be  used to ADD bound constraint for one element of P
5598 without changing constraints for other elements.
5599 
5600 You  can  also  use  MCPDSetBC()  function  which  allows to  place  bound
5601 constraints  on arbitrary subset of elements of P.   Set of constraints is
5602 specified  by  BndL/BndU matrices, which may contain arbitrary combination
5603 of finite numbers or infinities (like -INF<x<=0.5 or 0.1<=x<+INF).
5604 
5605 These functions (MCPDSetBC and MCPDAddBC) interact as follows:
5606 * there is internal matrix of bound constraints which is stored in the
5607   MCPD solver
5608 * MCPDSetBC() replaces this matrix by another one (SET)
5609 * MCPDAddBC() modifies one element of this matrix and  leaves  other  ones
5610   unchanged (ADD)
5611 * thus  MCPDAddBC()  call  preserves  all  modifications  done by previous
5612   calls,  while  MCPDSetBC()  completely discards all changes  done to the
5613   equality constraints.
5614 
5615 INPUT PARAMETERS:
5616     S       -   solver
5617     I       -   row index of element being constrained
5618     J       -   column index of element being constrained
5619     BndL    -   lower bound
5620     BndU    -   upper bound
5621 
5622   -- ALGLIB --
5623      Copyright 23.05.2010 by Bochkanov Sergey
5624 *************************************************************************/
5625 void mcpdaddbc(const mcpdstate &s, const ae_int_t i, const ae_int_t j, const double bndl, const double bndu, const xparams _xparams = alglib::xdefault);
5626 
5627 
5628 /*************************************************************************
5629 This function is used to set linear equality/inequality constraints on the
5630 elements of the transition matrix P.
5631 
5632 This function can be used to set one or several general linear constraints
5633 on the elements of P. Two types of constraints are supported:
5634 * equality constraints
5635 * inequality constraints (both less-or-equal and greater-or-equal)
5636 
5637 Coefficients  of  constraints  are  specified  by  matrix  C (one  of  the
5638 parameters).  One  row  of  C  corresponds  to  one  constraint.   Because
5639 transition  matrix P has N*N elements,  we  need  N*N columns to store all
5640 coefficients  (they  are  stored row by row), and one more column to store
5641 right part - hence C has N*N+1 columns.  Constraint  kind is stored in the
5642 CT array.
5643 
5644 Thus, I-th linear constraint is
5645     P[0,0]*C[I,0] + P[0,1]*C[I,1] + .. + P[0,N-1]*C[I,N-1] +
5646         + P[1,0]*C[I,N] + P[1,1]*C[I,N+1] + ... +
5647         + P[N-1,N-1]*C[I,N*N-1]  ?=?  C[I,N*N]
5648 where ?=? can be either "=" (CT[i]=0), "<=" (CT[i]<0) or ">=" (CT[i]>0).
5649 
5650 Your constraint may involve only some subset of P (less than N*N elements).
5651 For example it can be something like
5652     P[0,0] + P[0,1] = 0.5
5653 In this case you still should pass matrix  with N*N+1 columns, but all its
5654 elements (except for C[0,0], C[0,1] and C[0,N*N-1]) will be zero.
5655 
5656 INPUT PARAMETERS:
5657     S       -   solver
5658     C       -   array[K,N*N+1] - coefficients of constraints
5659                 (see above for complete description)
5660     CT      -   array[K] - constraint types
5661                 (see above for complete description)
5662     K       -   number of equality/inequality constraints, K>=0:
5663                 * if given, only leading K elements of C/CT are used
5664                 * if not given, automatically determined from sizes of C/CT
5665 
5666   -- ALGLIB --
5667      Copyright 23.05.2010 by Bochkanov Sergey
5668 *************************************************************************/
5669 void mcpdsetlc(const mcpdstate &s, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams = alglib::xdefault);
5670 void mcpdsetlc(const mcpdstate &s, const real_2d_array &c, const integer_1d_array &ct, const xparams _xparams = alglib::xdefault);
5671 
5672 
5673 /*************************************************************************
5674 This function allows to  tune  amount  of  Tikhonov  regularization  being
5675 applied to your problem.
5676 
5677 By default, regularizing term is equal to r*||P-prior_P||^2, where r is  a
5678 small non-zero value,  P is transition matrix, prior_P is identity matrix,
5679 ||X||^2 is a sum of squared elements of X.
5680 
5681 This  function  allows  you to change coefficient r. You can  also  change
5682 prior values with MCPDSetPrior() function.
5683 
5684 INPUT PARAMETERS:
5685     S       -   solver
5686     V       -   regularization  coefficient, finite non-negative value. It
5687                 is  not  recommended  to specify zero value unless you are
5688                 pretty sure that you want it.
5689 
5690   -- ALGLIB --
5691      Copyright 23.05.2010 by Bochkanov Sergey
5692 *************************************************************************/
5693 void mcpdsettikhonovregularizer(const mcpdstate &s, const double v, const xparams _xparams = alglib::xdefault);
5694 
5695 
5696 /*************************************************************************
5697 This  function  allows to set prior values used for regularization of your
5698 problem.
5699 
5700 By default, regularizing term is equal to r*||P-prior_P||^2, where r is  a
5701 small non-zero value,  P is transition matrix, prior_P is identity matrix,
5702 ||X||^2 is a sum of squared elements of X.
5703 
5704 This  function  allows  you to change prior values prior_P. You  can  also
5705 change r with MCPDSetTikhonovRegularizer() function.
5706 
5707 INPUT PARAMETERS:
5708     S       -   solver
5709     PP      -   array[N,N], matrix of prior values:
5710                 1. elements must be real numbers from [0,1]
5711                 2. columns must sum to 1.0.
5712                 First property is checked (exception is thrown otherwise),
5713                 while second one is not checked/enforced.
5714 
5715   -- ALGLIB --
5716      Copyright 23.05.2010 by Bochkanov Sergey
5717 *************************************************************************/
5718 void mcpdsetprior(const mcpdstate &s, const real_2d_array &pp, const xparams _xparams = alglib::xdefault);
5719 
5720 
5721 /*************************************************************************
5722 This function is used to change prediction weights
5723 
5724 MCPD solver scales prediction errors as follows
5725     Error(P) = ||W*(y-P*x)||^2
5726 where
5727     x is a system state at time t
5728     y is a system state at time t+1
5729     P is a transition matrix
5730     W is a diagonal scaling matrix
5731 
5732 By default, weights are chosen in order  to  minimize  relative prediction
5733 error instead of absolute one. For example, if one component of  state  is
5734 about 0.5 in magnitude and another one is about 0.05, then algorithm  will
5735 make corresponding weights equal to 2.0 and 20.0.
5736 
5737 INPUT PARAMETERS:
5738     S       -   solver
5739     PW      -   array[N], weights:
5740                 * must be non-negative values (exception will be thrown otherwise)
5741                 * zero values will be replaced by automatically chosen values
5742 
5743   -- ALGLIB --
5744      Copyright 23.05.2010 by Bochkanov Sergey
5745 *************************************************************************/
5746 void mcpdsetpredictionweights(const mcpdstate &s, const real_1d_array &pw, const xparams _xparams = alglib::xdefault);
5747 
5748 
5749 /*************************************************************************
5750 This function is used to start solution of the MCPD problem.
5751 
5752 After return from this function, you can use MCPDResults() to get solution
5753 and completion code.
5754 
5755   -- ALGLIB --
5756      Copyright 23.05.2010 by Bochkanov Sergey
5757 *************************************************************************/
5758 void mcpdsolve(const mcpdstate &s, const xparams _xparams = alglib::xdefault);
5759 
5760 
5761 /*************************************************************************
5762 MCPD results
5763 
5764 INPUT PARAMETERS:
5765     State   -   algorithm state
5766 
5767 OUTPUT PARAMETERS:
5768     P       -   array[N,N], transition matrix
5769     Rep     -   optimization report. You should check Rep.TerminationType
5770                 in  order  to  distinguish  successful  termination  from
5771                 unsuccessful one. Speaking short, positive values  denote
5772                 success, negative ones are failures.
5773                 More information about fields of this  structure  can  be
5774                 found in the comments on MCPDReport datatype.
5775 
5776 
5777   -- ALGLIB --
5778      Copyright 23.05.2010 by Bochkanov Sergey
5779 *************************************************************************/
5780 void mcpdresults(const mcpdstate &s, real_2d_array &p, mcpdreport &rep, const xparams _xparams = alglib::xdefault);
5781 #endif
5782 
5783 #if defined(AE_COMPILE_MLPE) || !defined(AE_PARTIAL_BUILD)
5784 /*************************************************************************
5785 This function serializes data structure to string.
5786 
5787 Important properties of s_out:
5788 * it contains alphanumeric characters, dots, underscores, minus signs
5789 * these symbols are grouped into words, which are separated by spaces
5790   and Windows-style (CR+LF) newlines
5791 * although  serializer  uses  spaces and CR+LF as separators, you can
5792   replace any separator character by arbitrary combination of spaces,
5793   tabs, Windows or Unix newlines. It allows flexible reformatting  of
5794   the  string  in  case you want to include it into text or XML file.
5795   But you should not insert separators into the middle of the "words"
5796   nor you should change case of letters.
5797 * s_out can be freely moved between 32-bit and 64-bit systems, little
5798   and big endian machines, and so on. You can serialize structure  on
5799   32-bit machine and unserialize it on 64-bit one (or vice versa), or
5800   serialize  it  on  SPARC  and  unserialize  on  x86.  You  can also
5801   serialize  it  in  C++ version of ALGLIB and unserialize in C# one,
5802   and vice versa.
5803 *************************************************************************/
5804 void mlpeserialize(mlpensemble &obj, std::string &s_out);
5805 
5806 
5807 /*************************************************************************
5808 This function unserializes data structure from string.
5809 *************************************************************************/
5810 void mlpeunserialize(const std::string &s_in, mlpensemble &obj);
5811 
5812 
5813 
5814 
5815 /*************************************************************************
5816 This function serializes data structure to C++ stream.
5817 
5818 Data stream generated by this function is same as  string  representation
5819 generated  by  string  version  of  serializer - alphanumeric characters,
5820 dots, underscores, minus signs, which are grouped into words separated by
5821 spaces and CR+LF.
5822 
5823 We recommend you to read comments on string version of serializer to find
5824 out more about serialization of AlGLIB objects.
5825 *************************************************************************/
5826 void mlpeserialize(mlpensemble &obj, std::ostream &s_out);
5827 
5828 
5829 /*************************************************************************
5830 This function unserializes data structure from stream.
5831 *************************************************************************/
5832 void mlpeunserialize(const std::istream &s_in, mlpensemble &obj);
5833 
5834 
5835 /*************************************************************************
5836 Like MLPCreate0, but for ensembles.
5837 
5838   -- ALGLIB --
5839      Copyright 18.02.2009 by Bochkanov Sergey
5840 *************************************************************************/
5841 void mlpecreate0(const ae_int_t nin, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams = alglib::xdefault);
5842 
5843 
5844 /*************************************************************************
5845 Like MLPCreate1, but for ensembles.
5846 
5847   -- ALGLIB --
5848      Copyright 18.02.2009 by Bochkanov Sergey
5849 *************************************************************************/
5850 void mlpecreate1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams = alglib::xdefault);
5851 
5852 
5853 /*************************************************************************
5854 Like MLPCreate2, but for ensembles.
5855 
5856   -- ALGLIB --
5857      Copyright 18.02.2009 by Bochkanov Sergey
5858 *************************************************************************/
5859 void mlpecreate2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams = alglib::xdefault);
5860 
5861 
5862 /*************************************************************************
5863 Like MLPCreateB0, but for ensembles.
5864 
5865   -- ALGLIB --
5866      Copyright 18.02.2009 by Bochkanov Sergey
5867 *************************************************************************/
5868 void mlpecreateb0(const ae_int_t nin, const ae_int_t nout, const double b, const double d, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams = alglib::xdefault);
5869 
5870 
5871 /*************************************************************************
5872 Like MLPCreateB1, but for ensembles.
5873 
5874   -- ALGLIB --
5875      Copyright 18.02.2009 by Bochkanov Sergey
5876 *************************************************************************/
5877 void mlpecreateb1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double b, const double d, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams = alglib::xdefault);
5878 
5879 
5880 /*************************************************************************
5881 Like MLPCreateB2, but for ensembles.
5882 
5883   -- ALGLIB --
5884      Copyright 18.02.2009 by Bochkanov Sergey
5885 *************************************************************************/
5886 void mlpecreateb2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double b, const double d, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams = alglib::xdefault);
5887 
5888 
5889 /*************************************************************************
5890 Like MLPCreateR0, but for ensembles.
5891 
5892   -- ALGLIB --
5893      Copyright 18.02.2009 by Bochkanov Sergey
5894 *************************************************************************/
5895 void mlpecreater0(const ae_int_t nin, const ae_int_t nout, const double a, const double b, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams = alglib::xdefault);
5896 
5897 
5898 /*************************************************************************
5899 Like MLPCreateR1, but for ensembles.
5900 
5901   -- ALGLIB --
5902      Copyright 18.02.2009 by Bochkanov Sergey
5903 *************************************************************************/
5904 void mlpecreater1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double a, const double b, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams = alglib::xdefault);
5905 
5906 
5907 /*************************************************************************
5908 Like MLPCreateR2, but for ensembles.
5909 
5910   -- ALGLIB --
5911      Copyright 18.02.2009 by Bochkanov Sergey
5912 *************************************************************************/
5913 void mlpecreater2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double a, const double b, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams = alglib::xdefault);
5914 
5915 
5916 /*************************************************************************
5917 Like MLPCreateC0, but for ensembles.
5918 
5919   -- ALGLIB --
5920      Copyright 18.02.2009 by Bochkanov Sergey
5921 *************************************************************************/
5922 void mlpecreatec0(const ae_int_t nin, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams = alglib::xdefault);
5923 
5924 
5925 /*************************************************************************
5926 Like MLPCreateC1, but for ensembles.
5927 
5928   -- ALGLIB --
5929      Copyright 18.02.2009 by Bochkanov Sergey
5930 *************************************************************************/
5931 void mlpecreatec1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams = alglib::xdefault);
5932 
5933 
5934 /*************************************************************************
5935 Like MLPCreateC2, but for ensembles.
5936 
5937   -- ALGLIB --
5938      Copyright 18.02.2009 by Bochkanov Sergey
5939 *************************************************************************/
5940 void mlpecreatec2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams = alglib::xdefault);
5941 
5942 
5943 /*************************************************************************
5944 Creates ensemble from network. Only network geometry is copied.
5945 
5946   -- ALGLIB --
5947      Copyright 17.02.2009 by Bochkanov Sergey
5948 *************************************************************************/
5949 void mlpecreatefromnetwork(const multilayerperceptron &network, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams = alglib::xdefault);
5950 
5951 
5952 /*************************************************************************
5953 Randomization of MLP ensemble
5954 
5955   -- ALGLIB --
5956      Copyright 17.02.2009 by Bochkanov Sergey
5957 *************************************************************************/
5958 void mlperandomize(const mlpensemble &ensemble, const xparams _xparams = alglib::xdefault);
5959 
5960 
5961 /*************************************************************************
5962 Return ensemble properties (number of inputs and outputs).
5963 
5964   -- ALGLIB --
5965      Copyright 17.02.2009 by Bochkanov Sergey
5966 *************************************************************************/
5967 void mlpeproperties(const mlpensemble &ensemble, ae_int_t &nin, ae_int_t &nout, const xparams _xparams = alglib::xdefault);
5968 
5969 
5970 /*************************************************************************
5971 Return normalization type (whether ensemble is SOFTMAX-normalized or not).
5972 
5973   -- ALGLIB --
5974      Copyright 17.02.2009 by Bochkanov Sergey
5975 *************************************************************************/
5976 bool mlpeissoftmax(const mlpensemble &ensemble, const xparams _xparams = alglib::xdefault);
5977 
5978 
5979 /*************************************************************************
5980 Procesing
5981 
5982 INPUT PARAMETERS:
5983     Ensemble-   neural networks ensemble
5984     X       -   input vector,  array[0..NIn-1].
5985     Y       -   (possibly) preallocated buffer; if size of Y is less than
5986                 NOut, it will be reallocated. If it is large enough, it
5987                 is NOT reallocated, so we can save some time on reallocation.
5988 
5989 
5990 OUTPUT PARAMETERS:
5991     Y       -   result. Regression estimate when solving regression  task,
5992                 vector of posterior probabilities for classification task.
5993 
5994   -- ALGLIB --
5995      Copyright 17.02.2009 by Bochkanov Sergey
5996 *************************************************************************/
5997 void mlpeprocess(const mlpensemble &ensemble, const real_1d_array &x, real_1d_array &y, const xparams _xparams = alglib::xdefault);
5998 
5999 
6000 /*************************************************************************
6001 'interactive'  variant  of  MLPEProcess  for  languages  like Python which
6002 support constructs like "Y = MLPEProcess(LM,X)" and interactive mode of the
6003 interpreter
6004 
6005 This function allocates new array on each call,  so  it  is  significantly
6006 slower than its 'non-interactive' counterpart, but it is  more  convenient
6007 when you call it from command line.
6008 
6009   -- ALGLIB --
6010      Copyright 17.02.2009 by Bochkanov Sergey
6011 *************************************************************************/
6012 void mlpeprocessi(const mlpensemble &ensemble, const real_1d_array &x, real_1d_array &y, const xparams _xparams = alglib::xdefault);
6013 
6014 
6015 /*************************************************************************
6016 Relative classification error on the test set
6017 
6018 INPUT PARAMETERS:
6019     Ensemble-   ensemble
6020     XY      -   test set
6021     NPoints -   test set size
6022 
6023 RESULT:
6024     percent of incorrectly classified cases.
6025     Works both for classifier betwork and for regression networks which
6026 are used as classifiers.
6027 
6028   -- ALGLIB --
6029      Copyright 17.02.2009 by Bochkanov Sergey
6030 *************************************************************************/
6031 double mlperelclserror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
6032 
6033 
6034 /*************************************************************************
6035 Average cross-entropy (in bits per element) on the test set
6036 
6037 INPUT PARAMETERS:
6038     Ensemble-   ensemble
6039     XY      -   test set
6040     NPoints -   test set size
6041 
6042 RESULT:
6043     CrossEntropy/(NPoints*LN(2)).
6044     Zero if ensemble solves regression task.
6045 
6046   -- ALGLIB --
6047      Copyright 17.02.2009 by Bochkanov Sergey
6048 *************************************************************************/
6049 double mlpeavgce(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
6050 
6051 
6052 /*************************************************************************
6053 RMS error on the test set
6054 
6055 INPUT PARAMETERS:
6056     Ensemble-   ensemble
6057     XY      -   test set
6058     NPoints -   test set size
6059 
6060 RESULT:
6061     root mean square error.
6062     Its meaning for regression task is obvious. As for classification task
6063 RMS error means error when estimating posterior probabilities.
6064 
6065   -- ALGLIB --
6066      Copyright 17.02.2009 by Bochkanov Sergey
6067 *************************************************************************/
6068 double mlpermserror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
6069 
6070 
6071 /*************************************************************************
6072 Average error on the test set
6073 
6074 INPUT PARAMETERS:
6075     Ensemble-   ensemble
6076     XY      -   test set
6077     NPoints -   test set size
6078 
6079 RESULT:
6080     Its meaning for regression task is obvious. As for classification task
6081 it means average error when estimating posterior probabilities.
6082 
6083   -- ALGLIB --
6084      Copyright 17.02.2009 by Bochkanov Sergey
6085 *************************************************************************/
6086 double mlpeavgerror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
6087 
6088 
6089 /*************************************************************************
6090 Average relative error on the test set
6091 
6092 INPUT PARAMETERS:
6093     Ensemble-   ensemble
6094     XY      -   test set
6095     NPoints -   test set size
6096 
6097 RESULT:
6098     Its meaning for regression task is obvious. As for classification task
6099 it means average relative error when estimating posterior probabilities.
6100 
6101   -- ALGLIB --
6102      Copyright 17.02.2009 by Bochkanov Sergey
6103 *************************************************************************/
6104 double mlpeavgrelerror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
6105 #endif
6106 
6107 #if defined(AE_COMPILE_MLPTRAIN) || !defined(AE_PARTIAL_BUILD)
6108 /*************************************************************************
6109 Neural network training  using  modified  Levenberg-Marquardt  with  exact
6110 Hessian calculation and regularization. Subroutine trains  neural  network
6111 with restarts from random positions. Algorithm is well  suited  for  small
6112 and medium scale problems (hundreds of weights).
6113 
6114 INPUT PARAMETERS:
6115     Network     -   neural network with initialized geometry
6116     XY          -   training set
6117     NPoints     -   training set size
6118     Decay       -   weight decay constant, >=0.001
6119                     Decay term 'Decay*||Weights||^2' is added to error
6120                     function.
6121                     If you don't know what Decay to choose, use 0.001.
6122     Restarts    -   number of restarts from random position, >0.
6123                     If you don't know what Restarts to choose, use 2.
6124 
6125 OUTPUT PARAMETERS:
6126     Network     -   trained neural network.
6127     Info        -   return code:
6128                     * -9, if internal matrix inverse subroutine failed
6129                     * -2, if there is a point with class number
6130                           outside of [0..NOut-1].
6131                     * -1, if wrong parameters specified
6132                           (NPoints<0, Restarts<1).
6133                     *  2, if task has been solved.
6134     Rep         -   training report
6135 
6136   -- ALGLIB --
6137      Copyright 10.03.2009 by Bochkanov Sergey
6138 *************************************************************************/
6139 void mlptrainlm(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep, const xparams _xparams = alglib::xdefault);
6140 
6141 
6142 /*************************************************************************
6143 Neural  network  training  using  L-BFGS  algorithm  with  regularization.
6144 Subroutine  trains  neural  network  with  restarts from random positions.
6145 Algorithm  is  well  suited  for  problems  of  any dimensionality (memory
6146 requirements and step complexity are linear by weights number).
6147 
6148 INPUT PARAMETERS:
6149     Network     -   neural network with initialized geometry
6150     XY          -   training set
6151     NPoints     -   training set size
6152     Decay       -   weight decay constant, >=0.001
6153                     Decay term 'Decay*||Weights||^2' is added to error
6154                     function.
6155                     If you don't know what Decay to choose, use 0.001.
6156     Restarts    -   number of restarts from random position, >0.
6157                     If you don't know what Restarts to choose, use 2.
6158     WStep       -   stopping criterion. Algorithm stops if  step  size  is
6159                     less than WStep. Recommended value - 0.01.  Zero  step
6160                     size means stopping after MaxIts iterations.
6161     MaxIts      -   stopping   criterion.  Algorithm  stops  after  MaxIts
6162                     iterations (NOT gradient  calculations).  Zero  MaxIts
6163                     means stopping when step is sufficiently small.
6164 
6165 OUTPUT PARAMETERS:
6166     Network     -   trained neural network.
6167     Info        -   return code:
6168                     * -8, if both WStep=0 and MaxIts=0
6169                     * -2, if there is a point with class number
6170                           outside of [0..NOut-1].
6171                     * -1, if wrong parameters specified
6172                           (NPoints<0, Restarts<1).
6173                     *  2, if task has been solved.
6174     Rep         -   training report
6175 
6176   -- ALGLIB --
6177      Copyright 09.12.2007 by Bochkanov Sergey
6178 *************************************************************************/
6179 void mlptrainlbfgs(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const double wstep, const ae_int_t maxits, ae_int_t &info, mlpreport &rep, const xparams _xparams = alglib::xdefault);
6180 
6181 
6182 /*************************************************************************
6183 Neural network training using early stopping (base algorithm - L-BFGS with
6184 regularization).
6185 
6186 INPUT PARAMETERS:
6187     Network     -   neural network with initialized geometry
6188     TrnXY       -   training set
6189     TrnSize     -   training set size, TrnSize>0
6190     ValXY       -   validation set
6191     ValSize     -   validation set size, ValSize>0
6192     Decay       -   weight decay constant, >=0.001
6193                     Decay term 'Decay*||Weights||^2' is added to error
6194                     function.
6195                     If you don't know what Decay to choose, use 0.001.
6196     Restarts    -   number of restarts, either:
6197                     * strictly positive number - algorithm make specified
6198                       number of restarts from random position.
6199                     * -1, in which case algorithm makes exactly one run
6200                       from the initial state of the network (no randomization).
6201                     If you don't know what Restarts to choose, choose one
6202                     one the following:
6203                     * -1 (deterministic start)
6204                     * +1 (one random restart)
6205                     * +5 (moderate amount of random restarts)
6206 
6207 OUTPUT PARAMETERS:
6208     Network     -   trained neural network.
6209     Info        -   return code:
6210                     * -2, if there is a point with class number
6211                           outside of [0..NOut-1].
6212                     * -1, if wrong parameters specified
6213                           (NPoints<0, Restarts<1, ...).
6214                     *  2, task has been solved, stopping  criterion  met -
6215                           sufficiently small step size.  Not expected  (we
6216                           use  EARLY  stopping)  but  possible  and not an
6217                           error.
6218                     *  6, task has been solved, stopping  criterion  met -
6219                           increasing of validation set error.
6220     Rep         -   training report
6221 
6222 NOTE:
6223 
6224 Algorithm stops if validation set error increases for  a  long  enough  or
6225 step size is small enought  (there  are  task  where  validation  set  may
6226 decrease for eternity). In any case solution returned corresponds  to  the
6227 minimum of validation set error.
6228 
6229   -- ALGLIB --
6230      Copyright 10.03.2009 by Bochkanov Sergey
6231 *************************************************************************/
6232 void mlptraines(const multilayerperceptron &network, const real_2d_array &trnxy, const ae_int_t trnsize, const real_2d_array &valxy, const ae_int_t valsize, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep, const xparams _xparams = alglib::xdefault);
6233 
6234 
6235 /*************************************************************************
6236 Cross-validation estimate of generalization error.
6237 
6238 Base algorithm - L-BFGS.
6239 
6240 INPUT PARAMETERS:
6241     Network     -   neural network with initialized geometry.   Network is
6242                     not changed during cross-validation -  it is used only
6243                     as a representative of its architecture.
6244     XY          -   training set.
6245     SSize       -   training set size
6246     Decay       -   weight  decay, same as in MLPTrainLBFGS
6247     Restarts    -   number of restarts, >0.
6248                     restarts are counted for each partition separately, so
6249                     total number of restarts will be Restarts*FoldsCount.
6250     WStep       -   stopping criterion, same as in MLPTrainLBFGS
6251     MaxIts      -   stopping criterion, same as in MLPTrainLBFGS
6252     FoldsCount  -   number of folds in k-fold cross-validation,
6253                     2<=FoldsCount<=SSize.
6254                     recommended value: 10.
6255 
6256 OUTPUT PARAMETERS:
6257     Info        -   return code, same as in MLPTrainLBFGS
6258     Rep         -   report, same as in MLPTrainLM/MLPTrainLBFGS
6259     CVRep       -   generalization error estimates
6260 
6261   -- ALGLIB --
6262      Copyright 09.12.2007 by Bochkanov Sergey
6263 *************************************************************************/
6264 void mlpkfoldcvlbfgs(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const double wstep, const ae_int_t maxits, const ae_int_t foldscount, ae_int_t &info, mlpreport &rep, mlpcvreport &cvrep, const xparams _xparams = alglib::xdefault);
6265 
6266 
6267 /*************************************************************************
6268 Cross-validation estimate of generalization error.
6269 
6270 Base algorithm - Levenberg-Marquardt.
6271 
6272 INPUT PARAMETERS:
6273     Network     -   neural network with initialized geometry.   Network is
6274                     not changed during cross-validation -  it is used only
6275                     as a representative of its architecture.
6276     XY          -   training set.
6277     SSize       -   training set size
6278     Decay       -   weight  decay, same as in MLPTrainLBFGS
6279     Restarts    -   number of restarts, >0.
6280                     restarts are counted for each partition separately, so
6281                     total number of restarts will be Restarts*FoldsCount.
6282     FoldsCount  -   number of folds in k-fold cross-validation,
6283                     2<=FoldsCount<=SSize.
6284                     recommended value: 10.
6285 
6286 OUTPUT PARAMETERS:
6287     Info        -   return code, same as in MLPTrainLBFGS
6288     Rep         -   report, same as in MLPTrainLM/MLPTrainLBFGS
6289     CVRep       -   generalization error estimates
6290 
6291   -- ALGLIB --
6292      Copyright 09.12.2007 by Bochkanov Sergey
6293 *************************************************************************/
6294 void mlpkfoldcvlm(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const ae_int_t foldscount, ae_int_t &info, mlpreport &rep, mlpcvreport &cvrep, const xparams _xparams = alglib::xdefault);
6295 
6296 
6297 /*************************************************************************
6298 This function estimates generalization error using cross-validation on the
6299 current dataset with current training settings.
6300 
6301   ! COMMERCIAL EDITION OF ALGLIB:
6302   !
6303   ! Commercial Edition of ALGLIB includes following important improvements
6304   ! of this function:
6305   ! * high-performance native backend with same C# interface (C# version)
6306   ! * multithreading support (C++ and C# versions)
6307   !
6308   ! We recommend you to read 'Working with commercial version' section  of
6309   ! ALGLIB Reference Manual in order to find out how to  use  performance-
6310   ! related features provided by commercial edition of ALGLIB.
6311 
6312 INPUT PARAMETERS:
6313     S           -   trainer object
6314     Network     -   neural network. It must have same number of inputs and
6315                     output/classes as was specified during creation of the
6316                     trainer object. Network is not changed  during  cross-
6317                     validation and is not trained - it  is  used  only  as
6318                     representative of its architecture. I.e., we  estimate
6319                     generalization properties of  ARCHITECTURE,  not  some
6320                     specific network.
6321     NRestarts   -   number of restarts, >=0:
6322                     * NRestarts>0  means  that  for  each cross-validation
6323                       round   specified  number   of  random  restarts  is
6324                       performed,  with  best  network  being  chosen after
6325                       training.
6326                     * NRestarts=0 is same as NRestarts=1
6327     FoldsCount  -   number of folds in k-fold cross-validation:
6328                     * 2<=FoldsCount<=size of dataset
6329                     * recommended value: 10.
6330                     * values larger than dataset size will be silently
6331                       truncated down to dataset size
6332 
6333 OUTPUT PARAMETERS:
6334     Rep         -   structure which contains cross-validation estimates:
6335                     * Rep.RelCLSError - fraction of misclassified cases.
6336                     * Rep.AvgCE - acerage cross-entropy
6337                     * Rep.RMSError - root-mean-square error
6338                     * Rep.AvgError - average error
6339                     * Rep.AvgRelError - average relative error
6340 
6341 NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
6342       or subset with only one point  was  given,  zeros  are  returned  as
6343       estimates.
6344 
6345 NOTE: this method performs FoldsCount cross-validation  rounds,  each  one
6346       with NRestarts random starts.  Thus,  FoldsCount*NRestarts  networks
6347       are trained in total.
6348 
6349 NOTE: Rep.RelCLSError/Rep.AvgCE are zero on regression problems.
6350 
6351 NOTE: on classification problems Rep.RMSError/Rep.AvgError/Rep.AvgRelError
6352       contain errors in prediction of posterior probabilities.
6353 
6354   -- ALGLIB --
6355      Copyright 23.07.2012 by Bochkanov Sergey
6356 *************************************************************************/
6357 void mlpkfoldcv(const mlptrainer &s, const multilayerperceptron &network, const ae_int_t nrestarts, const ae_int_t foldscount, mlpreport &rep, const xparams _xparams = alglib::xdefault);
6358 
6359 
6360 /*************************************************************************
6361 Creation of the network trainer object for regression networks
6362 
6363 INPUT PARAMETERS:
6364     NIn         -   number of inputs, NIn>=1
6365     NOut        -   number of outputs, NOut>=1
6366 
6367 OUTPUT PARAMETERS:
6368     S           -   neural network trainer object.
6369                     This structure can be used to train any regression
6370                     network with NIn inputs and NOut outputs.
6371 
6372   -- ALGLIB --
6373      Copyright 23.07.2012 by Bochkanov Sergey
6374 *************************************************************************/
6375 void mlpcreatetrainer(const ae_int_t nin, const ae_int_t nout, mlptrainer &s, const xparams _xparams = alglib::xdefault);
6376 
6377 
6378 /*************************************************************************
6379 Creation of the network trainer object for classification networks
6380 
6381 INPUT PARAMETERS:
6382     NIn         -   number of inputs, NIn>=1
6383     NClasses    -   number of classes, NClasses>=2
6384 
6385 OUTPUT PARAMETERS:
6386     S           -   neural network trainer object.
6387                     This structure can be used to train any classification
6388                     network with NIn inputs and NOut outputs.
6389 
6390   -- ALGLIB --
6391      Copyright 23.07.2012 by Bochkanov Sergey
6392 *************************************************************************/
6393 void mlpcreatetrainercls(const ae_int_t nin, const ae_int_t nclasses, mlptrainer &s, const xparams _xparams = alglib::xdefault);
6394 
6395 
6396 /*************************************************************************
6397 This function sets "current dataset" of the trainer object to  one  passed
6398 by user.
6399 
6400 INPUT PARAMETERS:
6401     S           -   trainer object
6402     XY          -   training  set,  see  below  for  information  on   the
6403                     training set format. This function checks  correctness
6404                     of  the  dataset  (no  NANs/INFs,  class  numbers  are
6405                     correct) and throws exception when  incorrect  dataset
6406                     is passed.
6407     NPoints     -   points count, >=0.
6408 
6409 DATASET FORMAT:
6410 
6411 This  function  uses  two  different  dataset formats - one for regression
6412 networks, another one for classification networks.
6413 
6414 For regression networks with NIn inputs and NOut outputs following dataset
6415 format is used:
6416 * dataset is given by NPoints*(NIn+NOut) matrix
6417 * each row corresponds to one example
6418 * first NIn columns are inputs, next NOut columns are outputs
6419 
6420 For classification networks with NIn inputs and NClasses clases  following
6421 datasetformat is used:
6422 * dataset is given by NPoints*(NIn+1) matrix
6423 * each row corresponds to one example
6424 * first NIn columns are inputs, last column stores class number (from 0 to
6425   NClasses-1).
6426 
6427   -- ALGLIB --
6428      Copyright 23.07.2012 by Bochkanov Sergey
6429 *************************************************************************/
6430 void mlpsetdataset(const mlptrainer &s, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
6431 
6432 
6433 /*************************************************************************
6434 This function sets "current dataset" of the trainer object to  one  passed
6435 by user (sparse matrix is used to store dataset).
6436 
6437 INPUT PARAMETERS:
6438     S           -   trainer object
6439     XY          -   training  set,  see  below  for  information  on   the
6440                     training set format. This function checks  correctness
6441                     of  the  dataset  (no  NANs/INFs,  class  numbers  are
6442                     correct) and throws exception when  incorrect  dataset
6443                     is passed. Any  sparse  storage  format  can be  used:
6444                     Hash-table, CRS...
6445     NPoints     -   points count, >=0
6446 
6447 DATASET FORMAT:
6448 
6449 This  function  uses  two  different  dataset formats - one for regression
6450 networks, another one for classification networks.
6451 
6452 For regression networks with NIn inputs and NOut outputs following dataset
6453 format is used:
6454 * dataset is given by NPoints*(NIn+NOut) matrix
6455 * each row corresponds to one example
6456 * first NIn columns are inputs, next NOut columns are outputs
6457 
6458 For classification networks with NIn inputs and NClasses clases  following
6459 datasetformat is used:
6460 * dataset is given by NPoints*(NIn+1) matrix
6461 * each row corresponds to one example
6462 * first NIn columns are inputs, last column stores class number (from 0 to
6463   NClasses-1).
6464 
6465   -- ALGLIB --
6466      Copyright 23.07.2012 by Bochkanov Sergey
6467 *************************************************************************/
6468 void mlpsetsparsedataset(const mlptrainer &s, const sparsematrix &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
6469 
6470 
6471 /*************************************************************************
6472 This function sets weight decay coefficient which is used for training.
6473 
6474 INPUT PARAMETERS:
6475     S           -   trainer object
6476     Decay       -   weight  decay  coefficient,  >=0.  Weight  decay  term
6477                     'Decay*||Weights||^2' is added to error  function.  If
6478                     you don't know what Decay to choose, use 1.0E-3.
6479                     Weight decay can be set to zero,  in this case network
6480                     is trained without weight decay.
6481 
6482 NOTE: by default network uses some small nonzero value for weight decay.
6483 
6484   -- ALGLIB --
6485      Copyright 23.07.2012 by Bochkanov Sergey
6486 *************************************************************************/
6487 void mlpsetdecay(const mlptrainer &s, const double decay, const xparams _xparams = alglib::xdefault);
6488 
6489 
6490 /*************************************************************************
6491 This function sets stopping criteria for the optimizer.
6492 
6493 INPUT PARAMETERS:
6494     S           -   trainer object
6495     WStep       -   stopping criterion. Algorithm stops if  step  size  is
6496                     less than WStep. Recommended value - 0.01.  Zero  step
6497                     size means stopping after MaxIts iterations.
6498                     WStep>=0.
6499     MaxIts      -   stopping   criterion.  Algorithm  stops  after  MaxIts
6500                     epochs (full passes over entire dataset).  Zero MaxIts
6501                     means stopping when step is sufficiently small.
6502                     MaxIts>=0.
6503 
6504 NOTE: by default, WStep=0.005 and MaxIts=0 are used. These values are also
6505       used when MLPSetCond() is called with WStep=0 and MaxIts=0.
6506 
6507 NOTE: these stopping criteria are used for all kinds of neural training  -
6508       from "conventional" networks to early stopping ensembles. When  used
6509       for "conventional" networks, they are  used  as  the  only  stopping
6510       criteria. When combined with early stopping, they used as ADDITIONAL
6511       stopping criteria which can terminate early stopping algorithm.
6512 
6513   -- ALGLIB --
6514      Copyright 23.07.2012 by Bochkanov Sergey
6515 *************************************************************************/
6516 void mlpsetcond(const mlptrainer &s, const double wstep, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
6517 
6518 
6519 /*************************************************************************
6520 This function sets training algorithm: batch training using L-BFGS will be
6521 used.
6522 
6523 This algorithm:
6524 * the most robust for small-scale problems, but may be too slow for  large
6525   scale ones.
6526 * perfoms full pass through the dataset before performing step
6527 * uses conditions specified by MLPSetCond() for stopping
6528 * is default one used by trainer object
6529 
6530 INPUT PARAMETERS:
6531     S           -   trainer object
6532 
6533   -- ALGLIB --
6534      Copyright 23.07.2012 by Bochkanov Sergey
6535 *************************************************************************/
6536 void mlpsetalgobatch(const mlptrainer &s, const xparams _xparams = alglib::xdefault);
6537 
6538 
6539 /*************************************************************************
6540 This function trains neural network passed to this function, using current
6541 dataset (one which was passed to MLPSetDataset() or MLPSetSparseDataset())
6542 and current training settings. Training  from  NRestarts  random  starting
6543 positions is performed, best network is chosen.
6544 
6545 Training is performed using current training algorithm.
6546 
6547   ! COMMERCIAL EDITION OF ALGLIB:
6548   !
6549   ! Commercial Edition of ALGLIB includes following important improvements
6550   ! of this function:
6551   ! * high-performance native backend with same C# interface (C# version)
6552   ! * multithreading support (C++ and C# versions)
6553   !
6554   ! We recommend you to read 'Working with commercial version' section  of
6555   ! ALGLIB Reference Manual in order to find out how to  use  performance-
6556   ! related features provided by commercial edition of ALGLIB.
6557 
6558 INPUT PARAMETERS:
6559     S           -   trainer object
6560     Network     -   neural network. It must have same number of inputs and
6561                     output/classes as was specified during creation of the
6562                     trainer object.
6563     NRestarts   -   number of restarts, >=0:
6564                     * NRestarts>0 means that specified  number  of  random
6565                       restarts are performed, best network is chosen after
6566                       training
6567                     * NRestarts=0 means that current state of the  network
6568                       is used for training.
6569 
6570 OUTPUT PARAMETERS:
6571     Network     -   trained network
6572 
6573 NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
6574       network  is  filled  by zero  values.  Same  behavior  for functions
6575       MLPStartTraining and MLPContinueTraining.
6576 
6577 NOTE: this method uses sum-of-squares error function for training.
6578 
6579   -- ALGLIB --
6580      Copyright 23.07.2012 by Bochkanov Sergey
6581 *************************************************************************/
6582 void mlptrainnetwork(const mlptrainer &s, const multilayerperceptron &network, const ae_int_t nrestarts, mlpreport &rep, const xparams _xparams = alglib::xdefault);
6583 
6584 
6585 /*************************************************************************
6586 IMPORTANT: this is an "expert" version of the MLPTrain() function.  We  do
6587            not recommend you to use it unless you are pretty sure that you
6588            need ability to monitor training progress.
6589 
6590 This function performs step-by-step training of the neural  network.  Here
6591 "step-by-step" means that training  starts  with  MLPStartTraining() call,
6592 and then user subsequently calls MLPContinueTraining() to perform one more
6593 iteration of the training.
6594 
6595 After call to this function trainer object remembers network and  is ready
6596 to  train  it.  However,  no  training  is  performed  until first call to
6597 MLPContinueTraining() function. Subsequent calls  to MLPContinueTraining()
6598 will advance training progress one iteration further.
6599 
6600 EXAMPLE:
6601     >
6602     > ...initialize network and trainer object....
6603     >
6604     > MLPStartTraining(Trainer, Network, True)
6605     > while MLPContinueTraining(Trainer, Network) do
6606     >     ...visualize training progress...
6607     >
6608 
6609 INPUT PARAMETERS:
6610     S           -   trainer object
6611     Network     -   neural network. It must have same number of inputs and
6612                     output/classes as was specified during creation of the
6613                     trainer object.
6614     RandomStart -   randomize network before training or not:
6615                     * True  means  that  network  is  randomized  and  its
6616                       initial state (one which was passed to  the  trainer
6617                       object) is lost.
6618                     * False  means  that  training  is  started  from  the
6619                       current state of the network
6620 
6621 OUTPUT PARAMETERS:
6622     Network     -   neural network which is ready to training (weights are
6623                     initialized, preprocessor is initialized using current
6624                     training set)
6625 
6626 NOTE: this method uses sum-of-squares error function for training.
6627 
6628 NOTE: it is expected that trainer object settings are NOT  changed  during
6629       step-by-step training, i.e. no  one  changes  stopping  criteria  or
6630       training set during training. It is possible and there is no defense
6631       against  such  actions,  but  algorithm  behavior  in  such cases is
6632       undefined and can be unpredictable.
6633 
6634   -- ALGLIB --
6635      Copyright 23.07.2012 by Bochkanov Sergey
6636 *************************************************************************/
6637 void mlpstarttraining(const mlptrainer &s, const multilayerperceptron &network, const bool randomstart, const xparams _xparams = alglib::xdefault);
6638 
6639 
6640 /*************************************************************************
6641 IMPORTANT: this is an "expert" version of the MLPTrain() function.  We  do
6642            not recommend you to use it unless you are pretty sure that you
6643            need ability to monitor training progress.
6644 
6645   ! COMMERCIAL EDITION OF ALGLIB:
6646   !
6647   ! Commercial Edition of ALGLIB includes following important improvements
6648   ! of this function:
6649   ! * high-performance native backend with same C# interface (C# version)
6650   ! * multithreading support (C++ and C# versions)
6651   !
6652   ! We recommend you to read 'Working with commercial version' section  of
6653   ! ALGLIB Reference Manual in order to find out how to  use  performance-
6654   ! related features provided by commercial edition of ALGLIB.
6655 
6656 This function performs step-by-step training of the neural  network.  Here
6657 "step-by-step" means that training starts  with  MLPStartTraining()  call,
6658 and then user subsequently calls MLPContinueTraining() to perform one more
6659 iteration of the training.
6660 
6661 This  function  performs  one  more  iteration of the training and returns
6662 either True (training continues) or False (training stopped). In case True
6663 was returned, Network weights are updated according to the  current  state
6664 of the optimization progress. In case False was  returned,  no  additional
6665 updates is performed (previous update of  the  network weights moved us to
6666 the final point, and no additional updates is needed).
6667 
6668 EXAMPLE:
6669     >
6670     > [initialize network and trainer object]
6671     >
6672     > MLPStartTraining(Trainer, Network, True)
6673     > while MLPContinueTraining(Trainer, Network) do
6674     >     [visualize training progress]
6675     >
6676 
6677 INPUT PARAMETERS:
6678     S           -   trainer object
6679     Network     -   neural  network  structure,  which  is  used to  store
6680                     current state of the training process.
6681 
6682 OUTPUT PARAMETERS:
6683     Network     -   weights of the neural network  are  rewritten  by  the
6684                     current approximation.
6685 
6686 NOTE: this method uses sum-of-squares error function for training.
6687 
6688 NOTE: it is expected that trainer object settings are NOT  changed  during
6689       step-by-step training, i.e. no  one  changes  stopping  criteria  or
6690       training set during training. It is possible and there is no defense
6691       against  such  actions,  but  algorithm  behavior  in  such cases is
6692       undefined and can be unpredictable.
6693 
6694 NOTE: It  is  expected that Network is the same one which  was  passed  to
6695       MLPStartTraining() function.  However,  THIS  function  checks  only
6696       following:
6697       * that number of network inputs is consistent with trainer object
6698         settings
6699       * that number of network outputs/classes is consistent with  trainer
6700         object settings
6701       * that number of network weights is the same as number of weights in
6702         the network passed to MLPStartTraining() function
6703       Exception is thrown when these conditions are violated.
6704 
6705       It is also expected that you do not change state of the  network  on
6706       your own - the only party who has right to change network during its
6707       training is a trainer object. Any attempt to interfere with  trainer
6708       may lead to unpredictable results.
6709 
6710 
6711   -- ALGLIB --
6712      Copyright 23.07.2012 by Bochkanov Sergey
6713 *************************************************************************/
6714 bool mlpcontinuetraining(const mlptrainer &s, const multilayerperceptron &network, const xparams _xparams = alglib::xdefault);
6715 
6716 
6717 /*************************************************************************
6718 Training neural networks ensemble using  bootstrap  aggregating (bagging).
6719 Modified Levenberg-Marquardt algorithm is used as base training method.
6720 
6721 INPUT PARAMETERS:
6722     Ensemble    -   model with initialized geometry
6723     XY          -   training set
6724     NPoints     -   training set size
6725     Decay       -   weight decay coefficient, >=0.001
6726     Restarts    -   restarts, >0.
6727 
6728 OUTPUT PARAMETERS:
6729     Ensemble    -   trained model
6730     Info        -   return code:
6731                     * -2, if there is a point with class number
6732                           outside of [0..NClasses-1].
6733                     * -1, if incorrect parameters was passed
6734                           (NPoints<0, Restarts<1).
6735                     *  2, if task has been solved.
6736     Rep         -   training report.
6737     OOBErrors   -   out-of-bag generalization error estimate
6738 
6739   -- ALGLIB --
6740      Copyright 17.02.2009 by Bochkanov Sergey
6741 *************************************************************************/
6742 void mlpebagginglm(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep, mlpcvreport &ooberrors, const xparams _xparams = alglib::xdefault);
6743 
6744 
6745 /*************************************************************************
6746 Training neural networks ensemble using  bootstrap  aggregating (bagging).
6747 L-BFGS algorithm is used as base training method.
6748 
6749 INPUT PARAMETERS:
6750     Ensemble    -   model with initialized geometry
6751     XY          -   training set
6752     NPoints     -   training set size
6753     Decay       -   weight decay coefficient, >=0.001
6754     Restarts    -   restarts, >0.
6755     WStep       -   stopping criterion, same as in MLPTrainLBFGS
6756     MaxIts      -   stopping criterion, same as in MLPTrainLBFGS
6757 
6758 OUTPUT PARAMETERS:
6759     Ensemble    -   trained model
6760     Info        -   return code:
6761                     * -8, if both WStep=0 and MaxIts=0
6762                     * -2, if there is a point with class number
6763                           outside of [0..NClasses-1].
6764                     * -1, if incorrect parameters was passed
6765                           (NPoints<0, Restarts<1).
6766                     *  2, if task has been solved.
6767     Rep         -   training report.
6768     OOBErrors   -   out-of-bag generalization error estimate
6769 
6770   -- ALGLIB --
6771      Copyright 17.02.2009 by Bochkanov Sergey
6772 *************************************************************************/
6773 void mlpebagginglbfgs(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const double wstep, const ae_int_t maxits, ae_int_t &info, mlpreport &rep, mlpcvreport &ooberrors, const xparams _xparams = alglib::xdefault);
6774 
6775 
6776 /*************************************************************************
6777 Training neural networks ensemble using early stopping.
6778 
6779 INPUT PARAMETERS:
6780     Ensemble    -   model with initialized geometry
6781     XY          -   training set
6782     NPoints     -   training set size
6783     Decay       -   weight decay coefficient, >=0.001
6784     Restarts    -   restarts, >0.
6785 
6786 OUTPUT PARAMETERS:
6787     Ensemble    -   trained model
6788     Info        -   return code:
6789                     * -2, if there is a point with class number
6790                           outside of [0..NClasses-1].
6791                     * -1, if incorrect parameters was passed
6792                           (NPoints<0, Restarts<1).
6793                     *  6, if task has been solved.
6794     Rep         -   training report.
6795     OOBErrors   -   out-of-bag generalization error estimate
6796 
6797   -- ALGLIB --
6798      Copyright 10.03.2009 by Bochkanov Sergey
6799 *************************************************************************/
6800 void mlpetraines(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep, const xparams _xparams = alglib::xdefault);
6801 
6802 
6803 /*************************************************************************
6804 This function trains neural network ensemble passed to this function using
6805 current dataset and early stopping training algorithm. Each early stopping
6806 round performs NRestarts  random  restarts  (thus,  EnsembleSize*NRestarts
6807 training rounds is performed in total).
6808 
6809   ! COMMERCIAL EDITION OF ALGLIB:
6810   !
6811   ! Commercial Edition of ALGLIB includes following important improvements
6812   ! of this function:
6813   ! * high-performance native backend with same C# interface (C# version)
6814   ! * multithreading support (C++ and C# versions)
6815   !
6816   ! We recommend you to read 'Working with commercial version' section  of
6817   ! ALGLIB Reference Manual in order to find out how to  use  performance-
6818   ! related features provided by commercial edition of ALGLIB.
6819 
6820 INPUT PARAMETERS:
6821     S           -   trainer object;
6822     Ensemble    -   neural network ensemble. It must have same  number  of
6823                     inputs and outputs/classes  as  was  specified  during
6824                     creation of the trainer object.
6825     NRestarts   -   number of restarts, >=0:
6826                     * NRestarts>0 means that specified  number  of  random
6827                       restarts are performed during each ES round;
6828                     * NRestarts=0 is silently replaced by 1.
6829 
6830 OUTPUT PARAMETERS:
6831     Ensemble    -   trained ensemble;
6832     Rep         -   it contains all type of errors.
6833 
6834 NOTE: this training method uses BOTH early stopping and weight decay!  So,
6835       you should select weight decay before starting training just as  you
6836       select it before training "conventional" networks.
6837 
6838 NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
6839       or  single-point  dataset  was  passed,  ensemble  is filled by zero
6840       values.
6841 
6842 NOTE: this method uses sum-of-squares error function for training.
6843 
6844   -- ALGLIB --
6845      Copyright 22.08.2012 by Bochkanov Sergey
6846 *************************************************************************/
6847 void mlptrainensemblees(const mlptrainer &s, const mlpensemble &ensemble, const ae_int_t nrestarts, mlpreport &rep, const xparams _xparams = alglib::xdefault);
6848 #endif
6849 
6850 #if defined(AE_COMPILE_CLUSTERING) || !defined(AE_PARTIAL_BUILD)
6851 /*************************************************************************
6852 This function initializes clusterizer object. Newly initialized object  is
6853 empty, i.e. it does not contain dataset. You should use it as follows:
6854 1. creation
6855 2. dataset is added with ClusterizerSetPoints()
6856 3. additional parameters are set
6857 3. clusterization is performed with one of the clustering functions
6858 
6859   -- ALGLIB --
6860      Copyright 10.07.2012 by Bochkanov Sergey
6861 *************************************************************************/
6862 void clusterizercreate(clusterizerstate &s, const xparams _xparams = alglib::xdefault);
6863 
6864 
6865 /*************************************************************************
6866 This function adds dataset to the clusterizer structure.
6867 
6868 This function overrides all previous calls  of  ClusterizerSetPoints()  or
6869 ClusterizerSetDistances().
6870 
6871 INPUT PARAMETERS:
6872     S       -   clusterizer state, initialized by ClusterizerCreate()
6873     XY      -   array[NPoints,NFeatures], dataset
6874     NPoints -   number of points, >=0
6875     NFeatures-  number of features, >=1
6876     DistType-   distance function:
6877                 *  0    Chebyshev distance  (L-inf norm)
6878                 *  1    city block distance (L1 norm)
6879                 *  2    Euclidean distance  (L2 norm), non-squared
6880                 * 10    Pearson correlation:
6881                         dist(a,b) = 1-corr(a,b)
6882                 * 11    Absolute Pearson correlation:
6883                         dist(a,b) = 1-|corr(a,b)|
6884                 * 12    Uncentered Pearson correlation (cosine of the angle):
6885                         dist(a,b) = a'*b/(|a|*|b|)
6886                 * 13    Absolute uncentered Pearson correlation
6887                         dist(a,b) = |a'*b|/(|a|*|b|)
6888                 * 20    Spearman rank correlation:
6889                         dist(a,b) = 1-rankcorr(a,b)
6890                 * 21    Absolute Spearman rank correlation
6891                         dist(a,b) = 1-|rankcorr(a,b)|
6892 
6893 NOTE 1: different distance functions have different performance penalty:
6894         * Euclidean or Pearson correlation distances are the fastest ones
6895         * Spearman correlation distance function is a bit slower
6896         * city block and Chebyshev distances are order of magnitude slower
6897 
6898         The reason behing difference in performance is that correlation-based
6899         distance functions are computed using optimized linear algebra kernels,
6900         while Chebyshev and city block distance functions are computed using
6901         simple nested loops with two branches at each iteration.
6902 
6903 NOTE 2: different clustering algorithms have different limitations:
6904         * agglomerative hierarchical clustering algorithms may be used with
6905           any kind of distance metric
6906         * k-means++ clustering algorithm may be used only  with  Euclidean
6907           distance function
6908         Thus, list of specific clustering algorithms you may  use  depends
6909         on distance function you specify when you set your dataset.
6910 
6911   -- ALGLIB --
6912      Copyright 10.07.2012 by Bochkanov Sergey
6913 *************************************************************************/
6914 void clusterizersetpoints(const clusterizerstate &s, const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nfeatures, const ae_int_t disttype, const xparams _xparams = alglib::xdefault);
6915 void clusterizersetpoints(const clusterizerstate &s, const real_2d_array &xy, const ae_int_t disttype, const xparams _xparams = alglib::xdefault);
6916 
6917 
6918 /*************************************************************************
6919 This function adds dataset given by distance  matrix  to  the  clusterizer
6920 structure. It is important that dataset is not  given  explicitly  -  only
6921 distance matrix is given.
6922 
6923 This function overrides all previous calls  of  ClusterizerSetPoints()  or
6924 ClusterizerSetDistances().
6925 
6926 INPUT PARAMETERS:
6927     S       -   clusterizer state, initialized by ClusterizerCreate()
6928     D       -   array[NPoints,NPoints], distance matrix given by its upper
6929                 or lower triangle (main diagonal is  ignored  because  its
6930                 entries are expected to be zero).
6931     NPoints -   number of points
6932     IsUpper -   whether upper or lower triangle of D is given.
6933 
6934 NOTE 1: different clustering algorithms have different limitations:
6935         * agglomerative hierarchical clustering algorithms may be used with
6936           any kind of distance metric, including one  which  is  given  by
6937           distance matrix
6938         * k-means++ clustering algorithm may be used only  with  Euclidean
6939           distance function and explicitly given points - it  can  not  be
6940           used with dataset given by distance matrix
6941         Thus, if you call this function, you will be unable to use k-means
6942         clustering algorithm to process your problem.
6943 
6944   -- ALGLIB --
6945      Copyright 10.07.2012 by Bochkanov Sergey
6946 *************************************************************************/
6947 void clusterizersetdistances(const clusterizerstate &s, const real_2d_array &d, const ae_int_t npoints, const bool isupper, const xparams _xparams = alglib::xdefault);
6948 void clusterizersetdistances(const clusterizerstate &s, const real_2d_array &d, const bool isupper, const xparams _xparams = alglib::xdefault);
6949 
6950 
6951 /*************************************************************************
6952 This function sets agglomerative hierarchical clustering algorithm
6953 
6954 INPUT PARAMETERS:
6955     S       -   clusterizer state, initialized by ClusterizerCreate()
6956     Algo    -   algorithm type:
6957                 * 0     complete linkage (default algorithm)
6958                 * 1     single linkage
6959                 * 2     unweighted average linkage
6960                 * 3     weighted average linkage
6961                 * 4     Ward's method
6962 
6963 NOTE: Ward's method works correctly only with Euclidean  distance,  that's
6964       why algorithm will return negative termination  code  (failure)  for
6965       any other distance type.
6966 
6967       It is possible, however,  to  use  this  method  with  user-supplied
6968       distance matrix. It  is  your  responsibility  to pass one which was
6969       calculated with Euclidean distance function.
6970 
6971   -- ALGLIB --
6972      Copyright 10.07.2012 by Bochkanov Sergey
6973 *************************************************************************/
6974 void clusterizersetahcalgo(const clusterizerstate &s, const ae_int_t algo, const xparams _xparams = alglib::xdefault);
6975 
6976 
6977 /*************************************************************************
6978 This  function  sets k-means properties:  number  of  restarts and maximum
6979 number of iterations per one run.
6980 
6981 INPUT PARAMETERS:
6982     S       -   clusterizer state, initialized by ClusterizerCreate()
6983     Restarts-   restarts count, >=1.
6984                 k-means++ algorithm performs several restarts and  chooses
6985                 best set of centers (one with minimum squared distance).
6986     MaxIts  -   maximum number of k-means iterations performed during  one
6987                 run. >=0, zero value means that algorithm performs unlimited
6988                 number of iterations.
6989 
6990   -- ALGLIB --
6991      Copyright 10.07.2012 by Bochkanov Sergey
6992 *************************************************************************/
6993 void clusterizersetkmeanslimits(const clusterizerstate &s, const ae_int_t restarts, const ae_int_t maxits, const xparams _xparams = alglib::xdefault);
6994 
6995 
6996 /*************************************************************************
6997 This function sets k-means  initialization  algorithm.  Several  different
6998 algorithms can be chosen, including k-means++.
6999 
7000 INPUT PARAMETERS:
7001     S       -   clusterizer state, initialized by ClusterizerCreate()
7002     InitAlgo-   initialization algorithm:
7003                 * 0  automatic selection ( different  versions  of  ALGLIB
7004                      may select different algorithms)
7005                 * 1  random initialization
7006                 * 2  k-means++ initialization  (best  quality  of  initial
7007                      centers, but long  non-parallelizable  initialization
7008                      phase with bad cache locality)
7009                 * 3  "fast-greedy"  algorithm  with  efficient,  easy   to
7010                      parallelize initialization. Quality of initial centers
7011                      is  somewhat  worse  than  that  of  k-means++.  This
7012                      algorithm is a default one in the current version  of
7013                      ALGLIB.
7014                 *-1  "debug" algorithm which always selects first  K  rows
7015                      of dataset; this algorithm is used for debug purposes
7016                      only. Do not use it in the industrial code!
7017 
7018   -- ALGLIB --
7019      Copyright 21.01.2015 by Bochkanov Sergey
7020 *************************************************************************/
7021 void clusterizersetkmeansinit(const clusterizerstate &s, const ae_int_t initalgo, const xparams _xparams = alglib::xdefault);
7022 
7023 
7024 /*************************************************************************
7025 This  function  sets  seed  which  is  used to initialize internal RNG. By
7026 default, deterministic seed is used - same for each run of clusterizer. If
7027 you specify non-deterministic  seed  value,  then  some  algorithms  which
7028 depend on random initialization (in current version: k-means)  may  return
7029 slightly different results after each run.
7030 
7031 INPUT PARAMETERS:
7032     S       -   clusterizer state, initialized by ClusterizerCreate()
7033     Seed    -   seed:
7034                 * positive values = use deterministic seed for each run of
7035                   algorithms which depend on random initialization
7036                 * zero or negative values = use non-deterministic seed
7037 
7038   -- ALGLIB --
7039      Copyright 08.06.2017 by Bochkanov Sergey
7040 *************************************************************************/
7041 void clusterizersetseed(const clusterizerstate &s, const ae_int_t seed, const xparams _xparams = alglib::xdefault);
7042 
7043 
7044 /*************************************************************************
7045 This function performs agglomerative hierarchical clustering
7046 
7047   ! COMMERCIAL EDITION OF ALGLIB:
7048   !
7049   ! Commercial Edition of ALGLIB includes following important improvements
7050   ! of this function:
7051   ! * high-performance native backend with same C# interface (C# version)
7052   ! * multithreading support (C++ and C# versions)
7053   ! * hardware vendor (Intel) implementations of linear algebra primitives
7054   !   (C++ and C# versions, x86/x64 platform)
7055   !
7056   ! We recommend you to read 'Working with commercial version' section  of
7057   ! ALGLIB Reference Manual in order to find out how to  use  performance-
7058   ! related features provided by commercial edition of ALGLIB.
7059 
7060 NOTE: Agglomerative  hierarchical  clustering  algorithm  has two  phases:
7061       distance matrix calculation and clustering  itself. Only first phase
7062       (distance matrix  calculation)  is  accelerated  by  Intel  MKL  and
7063       multithreading. Thus, acceleration is significant only for medium or
7064       high-dimensional problems.
7065 
7066       Although activating multithreading gives some speedup  over  single-
7067       threaded execution, you  should  not  expect  nearly-linear  scaling
7068       with respect to cores count.
7069 
7070 INPUT PARAMETERS:
7071     S       -   clusterizer state, initialized by ClusterizerCreate()
7072 
7073 OUTPUT PARAMETERS:
7074     Rep     -   clustering results; see description of AHCReport
7075                 structure for more information.
7076 
7077 NOTE 1: hierarchical clustering algorithms require large amounts of memory.
7078         In particular, this implementation needs  sizeof(double)*NPoints^2
7079         bytes, which are used to store distance matrix. In  case  we  work
7080         with user-supplied matrix, this amount is multiplied by 2 (we have
7081         to store original matrix and to work with its copy).
7082 
7083         For example, problem with 10000 points  would require 800M of RAM,
7084         even when working in a 1-dimensional space.
7085 
7086   -- ALGLIB --
7087      Copyright 10.07.2012 by Bochkanov Sergey
7088 *************************************************************************/
7089 void clusterizerrunahc(const clusterizerstate &s, ahcreport &rep, const xparams _xparams = alglib::xdefault);
7090 
7091 
7092 /*************************************************************************
7093 This function performs clustering by k-means++ algorithm.
7094 
7095 You may change algorithm properties by calling:
7096 * ClusterizerSetKMeansLimits() to change number of restarts or iterations
7097 * ClusterizerSetKMeansInit() to change initialization algorithm
7098 
7099 By  default,  one  restart  and  unlimited number of iterations are  used.
7100 Initialization algorithm is chosen automatically.
7101 
7102   ! COMMERCIAL EDITION OF ALGLIB:
7103   !
7104   ! Commercial Edition of ALGLIB includes following important improvements
7105   ! of this function:
7106   ! * high-performance native backend with same C# interface (C# version)
7107   ! * multithreading support (C++ and C# versions)
7108   ! * hardware vendor (Intel) implementations of linear algebra primitives
7109   !   (C++ and C# versions, x86/x64 platform)
7110   !
7111   ! We recommend you to read 'Working with commercial version' section  of
7112   ! ALGLIB Reference Manual in order to find out how to  use  performance-
7113   ! related features provided by commercial edition of ALGLIB.
7114 
7115 NOTE: k-means clustering  algorithm has two  phases:  selection of initial
7116       centers and clustering  itself.  ALGLIB  parallelizes  both  phases.
7117       Parallel version is optimized for the following  scenario: medium or
7118       high-dimensional problem (8 or more dimensions) with large number of
7119       points and clusters. However, some speed-up  can  be  obtained  even
7120       when assumptions above are violated.
7121 
7122 INPUT PARAMETERS:
7123     S       -   clusterizer state, initialized by ClusterizerCreate()
7124     K       -   number of clusters, K>=0.
7125                 K  can  be  zero only when algorithm is called  for  empty
7126                 dataset,  in   this   case   completion  code  is  set  to
7127                 success (+1).
7128                 If  K=0  and  dataset  size  is  non-zero,  we   can   not
7129                 meaningfully assign points to some center  (there  are  no
7130                 centers because K=0) and  return  -3  as  completion  code
7131                 (failure).
7132 
7133 OUTPUT PARAMETERS:
7134     Rep     -   clustering results; see description of KMeansReport
7135                 structure for more information.
7136 
7137 NOTE 1: k-means  clustering  can  be  performed  only  for  datasets  with
7138         Euclidean  distance  function.  Algorithm  will  return   negative
7139         completion code in Rep.TerminationType in case dataset  was  added
7140         to clusterizer with DistType other than Euclidean (or dataset  was
7141         specified by distance matrix instead of explicitly given points).
7142 
7143 NOTE 2: by default, k-means uses non-deterministic seed to initialize  RNG
7144         which is used to select initial centers. As  result,  each  run of
7145         algorithm may return different values. If you  need  deterministic
7146         behavior, use ClusterizerSetSeed() function.
7147 
7148   -- ALGLIB --
7149      Copyright 10.07.2012 by Bochkanov Sergey
7150 *************************************************************************/
7151 void clusterizerrunkmeans(const clusterizerstate &s, const ae_int_t k, kmeansreport &rep, const xparams _xparams = alglib::xdefault);
7152 
7153 
7154 /*************************************************************************
7155 This function returns distance matrix for dataset
7156 
7157   ! COMMERCIAL EDITION OF ALGLIB:
7158   !
7159   ! Commercial Edition of ALGLIB includes following important improvements
7160   ! of this function:
7161   ! * high-performance native backend with same C# interface (C# version)
7162   ! * multithreading support (C++ and C# versions)
7163   ! * hardware vendor (Intel) implementations of linear algebra primitives
7164   !   (C++ and C# versions, x86/x64 platform)
7165   !
7166   ! We recommend you to read 'Working with commercial version' section  of
7167   ! ALGLIB Reference Manual in order to find out how to  use  performance-
7168   ! related features provided by commercial edition of ALGLIB.
7169 
7170 INPUT PARAMETERS:
7171     XY      -   array[NPoints,NFeatures], dataset
7172     NPoints -   number of points, >=0
7173     NFeatures-  number of features, >=1
7174     DistType-   distance function:
7175                 *  0    Chebyshev distance  (L-inf norm)
7176                 *  1    city block distance (L1 norm)
7177                 *  2    Euclidean distance  (L2 norm, non-squared)
7178                 * 10    Pearson correlation:
7179                         dist(a,b) = 1-corr(a,b)
7180                 * 11    Absolute Pearson correlation:
7181                         dist(a,b) = 1-|corr(a,b)|
7182                 * 12    Uncentered Pearson correlation (cosine of the angle):
7183                         dist(a,b) = a'*b/(|a|*|b|)
7184                 * 13    Absolute uncentered Pearson correlation
7185                         dist(a,b) = |a'*b|/(|a|*|b|)
7186                 * 20    Spearman rank correlation:
7187                         dist(a,b) = 1-rankcorr(a,b)
7188                 * 21    Absolute Spearman rank correlation
7189                         dist(a,b) = 1-|rankcorr(a,b)|
7190 
7191 OUTPUT PARAMETERS:
7192     D       -   array[NPoints,NPoints], distance matrix
7193                 (full matrix is returned, with lower and upper triangles)
7194 
7195 NOTE:  different distance functions have different performance penalty:
7196        * Euclidean or Pearson correlation distances are the fastest ones
7197        * Spearman correlation distance function is a bit slower
7198        * city block and Chebyshev distances are order of magnitude slower
7199 
7200        The reason behing difference in performance is that correlation-based
7201        distance functions are computed using optimized linear algebra kernels,
7202        while Chebyshev and city block distance functions are computed using
7203        simple nested loops with two branches at each iteration.
7204 
7205   -- ALGLIB --
7206      Copyright 10.07.2012 by Bochkanov Sergey
7207 *************************************************************************/
7208 void clusterizergetdistances(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nfeatures, const ae_int_t disttype, real_2d_array &d, const xparams _xparams = alglib::xdefault);
7209 
7210 
7211 /*************************************************************************
7212 This function takes as input clusterization report Rep,  desired  clusters
7213 count K, and builds top K clusters from hierarchical clusterization  tree.
7214 It returns assignment of points to clusters (array of cluster indexes).
7215 
7216 INPUT PARAMETERS:
7217     Rep     -   report from ClusterizerRunAHC() performed on XY
7218     K       -   desired number of clusters, 1<=K<=NPoints.
7219                 K can be zero only when NPoints=0.
7220 
7221 OUTPUT PARAMETERS:
7222     CIdx    -   array[NPoints], I-th element contains cluster index  (from
7223                 0 to K-1) for I-th point of the dataset.
7224     CZ      -   array[K]. This array allows  to  convert  cluster  indexes
7225                 returned by this function to indexes used by  Rep.Z.  J-th
7226                 cluster returned by this function corresponds to  CZ[J]-th
7227                 cluster stored in Rep.Z/PZ/PM.
7228                 It is guaranteed that CZ[I]<CZ[I+1].
7229 
7230 NOTE: K clusters built by this subroutine are assumed to have no hierarchy.
7231       Although  they  were  obtained  by  manipulation with top K nodes of
7232       dendrogram  (i.e.  hierarchical  decomposition  of  dataset),   this
7233       function does not return information about hierarchy.  Each  of  the
7234       clusters stand on its own.
7235 
7236 NOTE: Cluster indexes returned by this function  does  not  correspond  to
7237       indexes returned in Rep.Z/PZ/PM. Either you work  with  hierarchical
7238       representation of the dataset (dendrogram), or you work with  "flat"
7239       representation returned by this function.  Each  of  representations
7240       has its own clusters indexing system (former uses [0, 2*NPoints-2]),
7241       while latter uses [0..K-1]), although  it  is  possible  to  perform
7242       conversion from one system to another by means of CZ array, returned
7243       by this function, which allows you to convert indexes stored in CIdx
7244       to the numeration system used by Rep.Z.
7245 
7246 NOTE: this subroutine is optimized for moderate values of K. Say, for  K=5
7247       it will perform many times faster than  for  K=100.  Its  worst-case
7248       performance is O(N*K), although in average case  it  perform  better
7249       (up to O(N*log(K))).
7250 
7251   -- ALGLIB --
7252      Copyright 10.07.2012 by Bochkanov Sergey
7253 *************************************************************************/
7254 void clusterizergetkclusters(const ahcreport &rep, const ae_int_t k, integer_1d_array &cidx, integer_1d_array &cz, const xparams _xparams = alglib::xdefault);
7255 
7256 
7257 /*************************************************************************
7258 This  function  accepts  AHC  report  Rep,  desired  minimum  intercluster
7259 distance and returns top clusters from  hierarchical  clusterization  tree
7260 which are separated by distance R or HIGHER.
7261 
7262 It returns assignment of points to clusters (array of cluster indexes).
7263 
7264 There is one more function with similar name - ClusterizerSeparatedByCorr,
7265 which returns clusters with intercluster correlation equal to R  or  LOWER
7266 (note: higher for distance, lower for correlation).
7267 
7268 INPUT PARAMETERS:
7269     Rep     -   report from ClusterizerRunAHC() performed on XY
7270     R       -   desired minimum intercluster distance, R>=0
7271 
7272 OUTPUT PARAMETERS:
7273     K       -   number of clusters, 1<=K<=NPoints
7274     CIdx    -   array[NPoints], I-th element contains cluster index  (from
7275                 0 to K-1) for I-th point of the dataset.
7276     CZ      -   array[K]. This array allows  to  convert  cluster  indexes
7277                 returned by this function to indexes used by  Rep.Z.  J-th
7278                 cluster returned by this function corresponds to  CZ[J]-th
7279                 cluster stored in Rep.Z/PZ/PM.
7280                 It is guaranteed that CZ[I]<CZ[I+1].
7281 
7282 NOTE: K clusters built by this subroutine are assumed to have no hierarchy.
7283       Although  they  were  obtained  by  manipulation with top K nodes of
7284       dendrogram  (i.e.  hierarchical  decomposition  of  dataset),   this
7285       function does not return information about hierarchy.  Each  of  the
7286       clusters stand on its own.
7287 
7288 NOTE: Cluster indexes returned by this function  does  not  correspond  to
7289       indexes returned in Rep.Z/PZ/PM. Either you work  with  hierarchical
7290       representation of the dataset (dendrogram), or you work with  "flat"
7291       representation returned by this function.  Each  of  representations
7292       has its own clusters indexing system (former uses [0, 2*NPoints-2]),
7293       while latter uses [0..K-1]), although  it  is  possible  to  perform
7294       conversion from one system to another by means of CZ array, returned
7295       by this function, which allows you to convert indexes stored in CIdx
7296       to the numeration system used by Rep.Z.
7297 
7298 NOTE: this subroutine is optimized for moderate values of K. Say, for  K=5
7299       it will perform many times faster than  for  K=100.  Its  worst-case
7300       performance is O(N*K), although in average case  it  perform  better
7301       (up to O(N*log(K))).
7302 
7303   -- ALGLIB --
7304      Copyright 10.07.2012 by Bochkanov Sergey
7305 *************************************************************************/
7306 void clusterizerseparatedbydist(const ahcreport &rep, const double r, ae_int_t &k, integer_1d_array &cidx, integer_1d_array &cz, const xparams _xparams = alglib::xdefault);
7307 
7308 
7309 /*************************************************************************
7310 This  function  accepts  AHC  report  Rep,  desired  maximum  intercluster
7311 correlation and returns top clusters from hierarchical clusterization tree
7312 which are separated by correlation R or LOWER.
7313 
7314 It returns assignment of points to clusters (array of cluster indexes).
7315 
7316 There is one more function with similar name - ClusterizerSeparatedByDist,
7317 which returns clusters with intercluster distance equal  to  R  or  HIGHER
7318 (note: higher for distance, lower for correlation).
7319 
7320 INPUT PARAMETERS:
7321     Rep     -   report from ClusterizerRunAHC() performed on XY
7322     R       -   desired maximum intercluster correlation, -1<=R<=+1
7323 
7324 OUTPUT PARAMETERS:
7325     K       -   number of clusters, 1<=K<=NPoints
7326     CIdx    -   array[NPoints], I-th element contains cluster index  (from
7327                 0 to K-1) for I-th point of the dataset.
7328     CZ      -   array[K]. This array allows  to  convert  cluster  indexes
7329                 returned by this function to indexes used by  Rep.Z.  J-th
7330                 cluster returned by this function corresponds to  CZ[J]-th
7331                 cluster stored in Rep.Z/PZ/PM.
7332                 It is guaranteed that CZ[I]<CZ[I+1].
7333 
7334 NOTE: K clusters built by this subroutine are assumed to have no hierarchy.
7335       Although  they  were  obtained  by  manipulation with top K nodes of
7336       dendrogram  (i.e.  hierarchical  decomposition  of  dataset),   this
7337       function does not return information about hierarchy.  Each  of  the
7338       clusters stand on its own.
7339 
7340 NOTE: Cluster indexes returned by this function  does  not  correspond  to
7341       indexes returned in Rep.Z/PZ/PM. Either you work  with  hierarchical
7342       representation of the dataset (dendrogram), or you work with  "flat"
7343       representation returned by this function.  Each  of  representations
7344       has its own clusters indexing system (former uses [0, 2*NPoints-2]),
7345       while latter uses [0..K-1]), although  it  is  possible  to  perform
7346       conversion from one system to another by means of CZ array, returned
7347       by this function, which allows you to convert indexes stored in CIdx
7348       to the numeration system used by Rep.Z.
7349 
7350 NOTE: this subroutine is optimized for moderate values of K. Say, for  K=5
7351       it will perform many times faster than  for  K=100.  Its  worst-case
7352       performance is O(N*K), although in average case  it  perform  better
7353       (up to O(N*log(K))).
7354 
7355   -- ALGLIB --
7356      Copyright 10.07.2012 by Bochkanov Sergey
7357 *************************************************************************/
7358 void clusterizerseparatedbycorr(const ahcreport &rep, const double r, ae_int_t &k, integer_1d_array &cidx, integer_1d_array &cz, const xparams _xparams = alglib::xdefault);
7359 #endif
7360 
7361 #if defined(AE_COMPILE_DFOREST) || !defined(AE_PARTIAL_BUILD)
7362 /*************************************************************************
7363 This function serializes data structure to string.
7364 
7365 Important properties of s_out:
7366 * it contains alphanumeric characters, dots, underscores, minus signs
7367 * these symbols are grouped into words, which are separated by spaces
7368   and Windows-style (CR+LF) newlines
7369 * although  serializer  uses  spaces and CR+LF as separators, you can
7370   replace any separator character by arbitrary combination of spaces,
7371   tabs, Windows or Unix newlines. It allows flexible reformatting  of
7372   the  string  in  case you want to include it into text or XML file.
7373   But you should not insert separators into the middle of the "words"
7374   nor you should change case of letters.
7375 * s_out can be freely moved between 32-bit and 64-bit systems, little
7376   and big endian machines, and so on. You can serialize structure  on
7377   32-bit machine and unserialize it on 64-bit one (or vice versa), or
7378   serialize  it  on  SPARC  and  unserialize  on  x86.  You  can also
7379   serialize  it  in  C++ version of ALGLIB and unserialize in C# one,
7380   and vice versa.
7381 *************************************************************************/
7382 void dfserialize(decisionforest &obj, std::string &s_out);
7383 
7384 
7385 /*************************************************************************
7386 This function unserializes data structure from string.
7387 *************************************************************************/
7388 void dfunserialize(const std::string &s_in, decisionforest &obj);
7389 
7390 
7391 
7392 
7393 /*************************************************************************
7394 This function serializes data structure to C++ stream.
7395 
7396 Data stream generated by this function is same as  string  representation
7397 generated  by  string  version  of  serializer - alphanumeric characters,
7398 dots, underscores, minus signs, which are grouped into words separated by
7399 spaces and CR+LF.
7400 
7401 We recommend you to read comments on string version of serializer to find
7402 out more about serialization of AlGLIB objects.
7403 *************************************************************************/
7404 void dfserialize(decisionforest &obj, std::ostream &s_out);
7405 
7406 
7407 /*************************************************************************
7408 This function unserializes data structure from stream.
7409 *************************************************************************/
7410 void dfunserialize(const std::istream &s_in, decisionforest &obj);
7411 
7412 
7413 /*************************************************************************
7414 This function creates buffer  structure  which  can  be  used  to  perform
7415 parallel inference requests.
7416 
7417 DF subpackage  provides two sets of computing functions - ones  which  use
7418 internal buffer of DF model  (these  functions are single-threaded because
7419 they use same buffer, which can not  shared  between  threads),  and  ones
7420 which use external buffer.
7421 
7422 This function is used to initialize external buffer.
7423 
7424 INPUT PARAMETERS
7425     Model       -   DF model which is associated with newly created buffer
7426 
7427 OUTPUT PARAMETERS
7428     Buf         -   external buffer.
7429 
7430 
7431 IMPORTANT: buffer object should be used only with model which was used  to
7432            initialize buffer. Any attempt to  use  buffer  with  different
7433            object is dangerous - you  may   get  integrity  check  failure
7434            (exception) because sizes of internal  arrays  do  not  fit  to
7435            dimensions of the model structure.
7436 
7437   -- ALGLIB --
7438      Copyright 15.02.2019 by Bochkanov Sergey
7439 *************************************************************************/
7440 void dfcreatebuffer(const decisionforest &model, decisionforestbuffer &buf, const xparams _xparams = alglib::xdefault);
7441 
7442 
7443 /*************************************************************************
7444 This subroutine creates DecisionForestBuilder  object  which  is  used  to
7445 train random forests.
7446 
7447 By default, new builder stores empty dataset and some  reasonable  default
7448 settings. At the very least, you should specify dataset prior to  building
7449 decision forest. You can also tweak settings of  the  forest  construction
7450 algorithm (recommended, although default setting should work well).
7451 
7452 Following actions are mandatory:
7453 * calling dfbuildersetdataset() to specify dataset
7454 * calling dfbuilderbuildrandomforest() to build random forest using current
7455   dataset and default settings
7456 
7457 Additionally, you may call:
7458 * dfbuildersetrndvars() or dfbuildersetrndvarsratio() to specify number of
7459   variables randomly chosen for each split
7460 * dfbuildersetsubsampleratio() to specify fraction of the dataset randomly
7461   subsampled to build each tree
7462 * dfbuildersetseed() to control random seed chosen for tree construction
7463 
7464 INPUT PARAMETERS:
7465     none
7466 
7467 OUTPUT PARAMETERS:
7468     S           -   decision forest builder
7469 
7470   -- ALGLIB --
7471      Copyright 21.05.2018 by Bochkanov Sergey
7472 *************************************************************************/
7473 void dfbuildercreate(decisionforestbuilder &s, const xparams _xparams = alglib::xdefault);
7474 
7475 
7476 /*************************************************************************
7477 This subroutine adds dense dataset to the internal storage of the  builder
7478 object. Specifying your dataset in the dense format means that  the  dense
7479 version of the forest construction algorithm will be invoked.
7480 
7481 INPUT PARAMETERS:
7482     S           -   decision forest builder object
7483     XY          -   array[NPoints,NVars+1] (minimum size; actual size  can
7484                     be larger, only leading part is used anyway), dataset:
7485                     * first NVars elements of each row store values of the
7486                       independent variables
7487                     * last  column  store class number (in 0...NClasses-1)
7488                       or real value of the dependent variable
7489     NPoints     -   number of rows in the dataset, NPoints>=1
7490     NVars       -   number of independent variables, NVars>=1
7491     NClasses    -   indicates type of the problem being solved:
7492                     * NClasses>=2 means  that  classification  problem  is
7493                       solved  (last  column  of  the  dataset stores class
7494                       number)
7495                     * NClasses=1 means that regression problem  is  solved
7496                       (last column of the dataset stores variable value)
7497 
7498 OUTPUT PARAMETERS:
7499     S           -   decision forest builder
7500 
7501   -- ALGLIB --
7502      Copyright 21.05.2018 by Bochkanov Sergey
7503 *************************************************************************/
7504 void dfbuildersetdataset(const decisionforestbuilder &s, const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, const xparams _xparams = alglib::xdefault);
7505 
7506 
7507 /*************************************************************************
7508 This function sets number of variables (in [1,NVars] range) used by random
7509 forest construction algorithm.
7510 
7511 The default option is to use roughly sqrt(NVars) variables.
7512 
7513 INPUT PARAMETERS:
7514     S           -   decision forest builder object
7515     RndVars     -   number of randomly selected variables; values  outside
7516                     of [1,NVars] range are silently clipped.
7517 
7518 OUTPUT PARAMETERS:
7519     S           -   decision forest builder
7520 
7521   -- ALGLIB --
7522      Copyright 21.05.2018 by Bochkanov Sergey
7523 *************************************************************************/
7524 void dfbuildersetrndvars(const decisionforestbuilder &s, const ae_int_t rndvars, const xparams _xparams = alglib::xdefault);
7525 
7526 
7527 /*************************************************************************
7528 This function sets number of variables used by random forest  construction
7529 algorithm as a fraction of total variable count (0,1) range.
7530 
7531 The default option is to use roughly sqrt(NVars) variables.
7532 
7533 INPUT PARAMETERS:
7534     S           -   decision forest builder object
7535     F           -   round(NVars*F) variables are selected
7536 
7537 OUTPUT PARAMETERS:
7538     S           -   decision forest builder
7539 
7540   -- ALGLIB --
7541      Copyright 21.05.2018 by Bochkanov Sergey
7542 *************************************************************************/
7543 void dfbuildersetrndvarsratio(const decisionforestbuilder &s, const double f, const xparams _xparams = alglib::xdefault);
7544 
7545 
7546 /*************************************************************************
7547 This function tells random forest builder to automatically  choose  number
7548 of  variables  used  by  random  forest  construction  algorithm.  Roughly
7549 sqrt(NVars) variables will be used.
7550 
7551 INPUT PARAMETERS:
7552     S           -   decision forest builder object
7553 
7554 OUTPUT PARAMETERS:
7555     S           -   decision forest builder
7556 
7557   -- ALGLIB --
7558      Copyright 21.05.2018 by Bochkanov Sergey
7559 *************************************************************************/
7560 void dfbuildersetrndvarsauto(const decisionforestbuilder &s, const xparams _xparams = alglib::xdefault);
7561 
7562 
7563 /*************************************************************************
7564 This function sets size of dataset subsample generated the  random  forest
7565 construction algorithm. Size is specified as a fraction of  total  dataset
7566 size.
7567 
7568 The default option is to use 50% of the dataset for training, 50% for  the
7569 OOB estimates. You can decrease fraction F down to 10%, 1% or  even  below
7570 in order to reduce overfitting.
7571 
7572 INPUT PARAMETERS:
7573     S           -   decision forest builder object
7574     F           -   fraction of the dataset to use, in (0,1] range. Values
7575                     outside of this range will  be  silently  clipped.  At
7576                     least one element is always selected for the  training
7577                     set.
7578 
7579 OUTPUT PARAMETERS:
7580     S           -   decision forest builder
7581 
7582   -- ALGLIB --
7583      Copyright 21.05.2018 by Bochkanov Sergey
7584 *************************************************************************/
7585 void dfbuildersetsubsampleratio(const decisionforestbuilder &s, const double f, const xparams _xparams = alglib::xdefault);
7586 
7587 
7588 /*************************************************************************
7589 This function sets seed used by internal RNG for  random  subsampling  and
7590 random selection of variable subsets.
7591 
7592 By default, random seed is used, i.e. every time you build random  forest,
7593 we seed generator with new value  obtained  from  system-wide  RNG.  Thus,
7594 decision forest builder returns non-deterministic results. You can  change
7595 such behavior by specyfing fixed positive seed value.
7596 
7597 INPUT PARAMETERS:
7598     S           -   decision forest builder object
7599     SeedVal     -   seed value:
7600                     * positive values are used for seeding RNG with fixed
7601                       seed, i.e. subsequent runs on same data will return
7602                       same random forests
7603                     * non-positive seed means that random seed is used
7604                       for every run of builder, i.e. subsequent  runs  on
7605                       same datasets will return slightly different random
7606                       forests
7607 
7608 OUTPUT PARAMETERS:
7609     S           -   decision forest builder, see
7610 
7611   -- ALGLIB --
7612      Copyright 21.05.2018 by Bochkanov Sergey
7613 *************************************************************************/
7614 void dfbuildersetseed(const decisionforestbuilder &s, const ae_int_t seedval, const xparams _xparams = alglib::xdefault);
7615 
7616 
7617 /*************************************************************************
7618 This function sets random decision forest construction algorithm.
7619 
7620 As for now, only one random forest construction algorithm is  supported  -
7621 a dense "baseline" RDF algorithm.
7622 
7623 INPUT PARAMETERS:
7624     S           -   decision forest builder object
7625     AlgoType    -   algorithm type:
7626                     * 0 = baseline dense RDF
7627 
7628 OUTPUT PARAMETERS:
7629     S           -   decision forest builder, see
7630 
7631   -- ALGLIB --
7632      Copyright 21.05.2018 by Bochkanov Sergey
7633 *************************************************************************/
7634 void dfbuildersetrdfalgo(const decisionforestbuilder &s, const ae_int_t algotype, const xparams _xparams = alglib::xdefault);
7635 
7636 
7637 /*************************************************************************
7638 This  function  sets  split  selection  algorithm  used  by random forests
7639 classifier. You may choose several algorithms, with  different  speed  and
7640 quality of the results.
7641 
7642 INPUT PARAMETERS:
7643     S           -   decision forest builder object
7644     SplitStrength-  split type:
7645                     * 0 = split at the random position, fastest one
7646                     * 1 = split at the middle of the range
7647                     * 2 = strong split at the best point of the range (default)
7648 
7649 OUTPUT PARAMETERS:
7650     S           -   decision forest builder, see
7651 
7652   -- ALGLIB --
7653      Copyright 21.05.2018 by Bochkanov Sergey
7654 *************************************************************************/
7655 void dfbuildersetrdfsplitstrength(const decisionforestbuilder &s, const ae_int_t splitstrength, const xparams _xparams = alglib::xdefault);
7656 
7657 
7658 /*************************************************************************
7659 This function is an alias for dfbuilderpeekprogress(), left in ALGLIB  for
7660 backward compatibility reasons.
7661 
7662   -- ALGLIB --
7663      Copyright 21.05.2018 by Bochkanov Sergey
7664 *************************************************************************/
7665 double dfbuildergetprogress(const decisionforestbuilder &s, const xparams _xparams = alglib::xdefault);
7666 
7667 
7668 /*************************************************************************
7669 This function is used to peek into random forest construction process from
7670 other thread and get current progress indicator. It returns value in [0,1].
7671 
7672 You can "peek" into decision forest builder from another thread.
7673 
7674 INPUT PARAMETERS:
7675     S           -   decision forest builder object used  to  build  random
7676                     forest in some other thread
7677 
7678 RESULT:
7679     progress value, in [0,1]
7680 
7681   -- ALGLIB --
7682      Copyright 21.05.2018 by Bochkanov Sergey
7683 *************************************************************************/
7684 double dfbuilderpeekprogress(const decisionforestbuilder &s, const xparams _xparams = alglib::xdefault);
7685 
7686 
7687 /*************************************************************************
7688 This subroutine builds random forest according to current settings,  using
7689 dataset internally stored in the builder object. Dense algorithm is used.
7690 
7691 NOTE: this   function   uses   dense  algorithm  for  forest  construction
7692       independently from the dataset format (dense or sparse).
7693 
7694 Default settings are used by the algorithm; you can tweak  them  with  the
7695 help of the following functions:
7696 * dfbuildersetrfactor() - to control a fraction of the  dataset  used  for
7697   subsampling
7698 * dfbuildersetrandomvars() - to control number of variables randomly chosen
7699   for decision rule creation
7700 
7701   ! COMMERCIAL EDITION OF ALGLIB:
7702   !
7703   ! Commercial Edition of ALGLIB includes following important improvements
7704   ! of this function:
7705   ! * high-performance native backend with same C# interface (C# version)
7706   ! * multithreading support (C++ and C# versions)
7707   !
7708   ! We recommend you to read 'Working with commercial version' section  of
7709   ! ALGLIB Reference Manual in order to find out how to  use  performance-
7710   ! related features provided by commercial edition of ALGLIB.
7711 
7712 INPUT PARAMETERS:
7713     S           -   decision forest builder object
7714     NTrees      -   NTrees>=1, number of trees to train
7715 
7716 OUTPUT PARAMETERS:
7717     DF          -   decision forest
7718     Rep         -   report
7719 
7720   -- ALGLIB --
7721      Copyright 21.05.2018 by Bochkanov Sergey
7722 *************************************************************************/
7723 void dfbuilderbuildrandomforest(const decisionforestbuilder &s, const ae_int_t ntrees, decisionforest &df, dfreport &rep, const xparams _xparams = alglib::xdefault);
7724 
7725 
7726 /*************************************************************************
7727 Inference using decision forest
7728 
7729 IMPORTANT: this  function  is  thread-unsafe  and  may   modify   internal
7730            structures of the model! You can not use same model  object for
7731            parallel evaluation from several threads.
7732 
7733            Use dftsprocess()  with  independent  thread-local  buffers  if
7734            you need thread-safe evaluation.
7735 
7736 INPUT PARAMETERS:
7737     DF      -   decision forest model
7738     X       -   input vector,  array[NVars]
7739     Y       -   possibly preallocated buffer, reallocated if too small
7740 
7741 OUTPUT PARAMETERS:
7742     Y       -   result. Regression estimate when solving regression  task,
7743                 vector of posterior probabilities for classification task.
7744 
7745 See also DFProcessI.
7746 
7747 
7748   -- ALGLIB --
7749      Copyright 16.02.2009 by Bochkanov Sergey
7750 *************************************************************************/
7751 void dfprocess(const decisionforest &df, const real_1d_array &x, real_1d_array &y, const xparams _xparams = alglib::xdefault);
7752 
7753 
7754 /*************************************************************************
7755 'interactive' variant of DFProcess for languages like Python which support
7756 constructs like "Y = DFProcessI(DF,X)" and interactive mode of interpreter
7757 
7758 This function allocates new array on each call,  so  it  is  significantly
7759 slower than its 'non-interactive' counterpart, but it is  more  convenient
7760 when you call it from command line.
7761 
7762 IMPORTANT: this  function  is  thread-unsafe  and  may   modify   internal
7763            structures of the model! You can not use same model  object for
7764            parallel evaluation from several threads.
7765 
7766            Use dftsprocess()  with  independent  thread-local  buffers  if
7767            you need thread-safe evaluation.
7768 
7769   -- ALGLIB --
7770      Copyright 28.02.2010 by Bochkanov Sergey
7771 *************************************************************************/
7772 void dfprocessi(const decisionforest &df, const real_1d_array &x, real_1d_array &y, const xparams _xparams = alglib::xdefault);
7773 
7774 
7775 /*************************************************************************
7776 This function returns first component of the  inferred  vector  (i.e.  one
7777 with index #0).
7778 
7779 It is a convenience wrapper for dfprocess() intended for either:
7780 * 1-dimensional regression problems
7781 * 2-class classification problems
7782 
7783 In the former case this function returns inference result as scalar, which
7784 is definitely more convenient that wrapping it as vector.  In  the  latter
7785 case it returns probability of object belonging to class #0.
7786 
7787 If you call it for anything different from two cases above, it  will  work
7788 as defined, i.e. return y[0], although it is of less use in such cases.
7789 
7790 IMPORTANT: this function is thread-unsafe and modifies internal structures
7791            of the model! You can not use same model  object  for  parallel
7792            evaluation from several threads.
7793 
7794            Use dftsprocess() with  independent  thread-local  buffers,  if
7795            you need thread-safe evaluation.
7796 
7797 INPUT PARAMETERS:
7798     Model   -   DF model
7799     X       -   input vector,  array[0..NVars-1].
7800 
7801 RESULT:
7802     Y[0]
7803 
7804   -- ALGLIB --
7805      Copyright 15.02.2019 by Bochkanov Sergey
7806 *************************************************************************/
7807 double dfprocess0(const decisionforest &model, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
7808 
7809 
7810 /*************************************************************************
7811 This function returns most probable class number for an  input  X.  It  is
7812 same as calling  dfprocess(model,x,y), then determining i=argmax(y[i]) and
7813 returning i.
7814 
7815 A class number in [0,NOut) range in returned for classification  problems,
7816 -1 is returned when this function is called for regression problems.
7817 
7818 IMPORTANT: this function is thread-unsafe and modifies internal structures
7819            of the model! You can not use same model  object  for  parallel
7820            evaluation from several threads.
7821 
7822            Use dftsprocess()  with independent  thread-local  buffers,  if
7823            you need thread-safe evaluation.
7824 
7825 INPUT PARAMETERS:
7826     Model   -   decision forest model
7827     X       -   input vector,  array[0..NVars-1].
7828 
7829 RESULT:
7830     class number, -1 for regression tasks
7831 
7832   -- ALGLIB --
7833      Copyright 15.02.2019 by Bochkanov Sergey
7834 *************************************************************************/
7835 ae_int_t dfclassify(const decisionforest &model, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
7836 
7837 
7838 /*************************************************************************
7839 Inference using decision forest
7840 
7841 Thread-safe procesing using external buffer for temporaries.
7842 
7843 This function is thread-safe (i.e .  you  can  use  same  DF   model  from
7844 multiple threads) as long as you use different buffer objects for different
7845 threads.
7846 
7847 INPUT PARAMETERS:
7848     DF      -   decision forest model
7849     Buf     -   buffer object, must be  allocated  specifically  for  this
7850                 model with dfcreatebuffer().
7851     X       -   input vector,  array[NVars]
7852     Y       -   possibly preallocated buffer, reallocated if too small
7853 
7854 OUTPUT PARAMETERS:
7855     Y       -   result. Regression estimate when solving regression  task,
7856                 vector of posterior probabilities for classification task.
7857 
7858 See also DFProcessI.
7859 
7860 
7861   -- ALGLIB --
7862      Copyright 16.02.2009 by Bochkanov Sergey
7863 *************************************************************************/
7864 void dftsprocess(const decisionforest &df, const decisionforestbuffer &buf, const real_1d_array &x, real_1d_array &y, const xparams _xparams = alglib::xdefault);
7865 
7866 
7867 /*************************************************************************
7868 Relative classification error on the test set
7869 
7870 INPUT PARAMETERS:
7871     DF      -   decision forest model
7872     XY      -   test set
7873     NPoints -   test set size
7874 
7875 RESULT:
7876     percent of incorrectly classified cases.
7877     Zero if model solves regression task.
7878 
7879   -- ALGLIB --
7880      Copyright 16.02.2009 by Bochkanov Sergey
7881 *************************************************************************/
7882 double dfrelclserror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
7883 
7884 
7885 /*************************************************************************
7886 Average cross-entropy (in bits per element) on the test set
7887 
7888 INPUT PARAMETERS:
7889     DF      -   decision forest model
7890     XY      -   test set
7891     NPoints -   test set size
7892 
7893 RESULT:
7894     CrossEntropy/(NPoints*LN(2)).
7895     Zero if model solves regression task.
7896 
7897   -- ALGLIB --
7898      Copyright 16.02.2009 by Bochkanov Sergey
7899 *************************************************************************/
7900 double dfavgce(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
7901 
7902 
7903 /*************************************************************************
7904 RMS error on the test set
7905 
7906 INPUT PARAMETERS:
7907     DF      -   decision forest model
7908     XY      -   test set
7909     NPoints -   test set size
7910 
7911 RESULT:
7912     root mean square error.
7913     Its meaning for regression task is obvious. As for
7914     classification task, RMS error means error when estimating posterior
7915     probabilities.
7916 
7917   -- ALGLIB --
7918      Copyright 16.02.2009 by Bochkanov Sergey
7919 *************************************************************************/
7920 double dfrmserror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
7921 
7922 
7923 /*************************************************************************
7924 Average error on the test set
7925 
7926 INPUT PARAMETERS:
7927     DF      -   decision forest model
7928     XY      -   test set
7929     NPoints -   test set size
7930 
7931 RESULT:
7932     Its meaning for regression task is obvious. As for
7933     classification task, it means average error when estimating posterior
7934     probabilities.
7935 
7936   -- ALGLIB --
7937      Copyright 16.02.2009 by Bochkanov Sergey
7938 *************************************************************************/
7939 double dfavgerror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
7940 
7941 
7942 /*************************************************************************
7943 Average relative error on the test set
7944 
7945 INPUT PARAMETERS:
7946     DF      -   decision forest model
7947     XY      -   test set
7948     NPoints -   test set size
7949 
7950 RESULT:
7951     Its meaning for regression task is obvious. As for
7952     classification task, it means average relative error when estimating
7953     posterior probability of belonging to the correct class.
7954 
7955   -- ALGLIB --
7956      Copyright 16.02.2009 by Bochkanov Sergey
7957 *************************************************************************/
7958 double dfavgrelerror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
7959 
7960 
7961 /*************************************************************************
7962 This subroutine builds random decision forest.
7963 
7964 --------- DEPRECATED VERSION! USE DECISION FOREST BUILDER OBJECT ---------
7965 
7966   -- ALGLIB --
7967      Copyright 19.02.2009 by Bochkanov Sergey
7968 *************************************************************************/
7969 void dfbuildrandomdecisionforest(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, const ae_int_t ntrees, const double r, ae_int_t &info, decisionforest &df, dfreport &rep, const xparams _xparams = alglib::xdefault);
7970 
7971 
7972 /*************************************************************************
7973 This subroutine builds random decision forest.
7974 
7975 --------- DEPRECATED VERSION! USE DECISION FOREST BUILDER OBJECT ---------
7976 
7977   -- ALGLIB --
7978      Copyright 19.02.2009 by Bochkanov Sergey
7979 *************************************************************************/
7980 void dfbuildrandomdecisionforestx1(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, const ae_int_t ntrees, const ae_int_t nrndvars, const double r, ae_int_t &info, decisionforest &df, dfreport &rep, const xparams _xparams = alglib::xdefault);
7981 #endif
7982 
7983 #if defined(AE_COMPILE_KNN) || !defined(AE_PARTIAL_BUILD)
7984 /*************************************************************************
7985 This function serializes data structure to string.
7986 
7987 Important properties of s_out:
7988 * it contains alphanumeric characters, dots, underscores, minus signs
7989 * these symbols are grouped into words, which are separated by spaces
7990   and Windows-style (CR+LF) newlines
7991 * although  serializer  uses  spaces and CR+LF as separators, you can
7992   replace any separator character by arbitrary combination of spaces,
7993   tabs, Windows or Unix newlines. It allows flexible reformatting  of
7994   the  string  in  case you want to include it into text or XML file.
7995   But you should not insert separators into the middle of the "words"
7996   nor you should change case of letters.
7997 * s_out can be freely moved between 32-bit and 64-bit systems, little
7998   and big endian machines, and so on. You can serialize structure  on
7999   32-bit machine and unserialize it on 64-bit one (or vice versa), or
8000   serialize  it  on  SPARC  and  unserialize  on  x86.  You  can also
8001   serialize  it  in  C++ version of ALGLIB and unserialize in C# one,
8002   and vice versa.
8003 *************************************************************************/
8004 void knnserialize(knnmodel &obj, std::string &s_out);
8005 
8006 
8007 /*************************************************************************
8008 This function unserializes data structure from string.
8009 *************************************************************************/
8010 void knnunserialize(const std::string &s_in, knnmodel &obj);
8011 
8012 
8013 
8014 
8015 /*************************************************************************
8016 This function serializes data structure to C++ stream.
8017 
8018 Data stream generated by this function is same as  string  representation
8019 generated  by  string  version  of  serializer - alphanumeric characters,
8020 dots, underscores, minus signs, which are grouped into words separated by
8021 spaces and CR+LF.
8022 
8023 We recommend you to read comments on string version of serializer to find
8024 out more about serialization of AlGLIB objects.
8025 *************************************************************************/
8026 void knnserialize(knnmodel &obj, std::ostream &s_out);
8027 
8028 
8029 /*************************************************************************
8030 This function unserializes data structure from stream.
8031 *************************************************************************/
8032 void knnunserialize(const std::istream &s_in, knnmodel &obj);
8033 
8034 
8035 /*************************************************************************
8036 This function creates buffer  structure  which  can  be  used  to  perform
8037 parallel KNN requests.
8038 
8039 KNN subpackage provides two sets of computing functions - ones  which  use
8040 internal buffer of KNN model (these  functions are single-threaded because
8041 they use same buffer, which can not  shared  between  threads),  and  ones
8042 which use external buffer.
8043 
8044 This function is used to initialize external buffer.
8045 
8046 INPUT PARAMETERS
8047     Model       -   KNN model which is associated with newly created buffer
8048 
8049 OUTPUT PARAMETERS
8050     Buf         -   external buffer.
8051 
8052 
8053 IMPORTANT: buffer object should be used only with model which was used  to
8054            initialize buffer. Any attempt to  use  buffer  with  different
8055            object is dangerous - you  may   get  integrity  check  failure
8056            (exception) because sizes of internal  arrays  do  not  fit  to
8057            dimensions of the model structure.
8058 
8059   -- ALGLIB --
8060      Copyright 15.02.2019 by Bochkanov Sergey
8061 *************************************************************************/
8062 void knncreatebuffer(const knnmodel &model, knnbuffer &buf, const xparams _xparams = alglib::xdefault);
8063 
8064 
8065 /*************************************************************************
8066 This subroutine creates KNNBuilder object which is used to train KNN models.
8067 
8068 By default, new builder stores empty dataset and some  reasonable  default
8069 settings. At the very least, you should specify dataset prior to  building
8070 KNN model. You can also tweak settings of the model construction algorithm
8071 (recommended, although default settings should work well).
8072 
8073 Following actions are mandatory:
8074 * calling knnbuildersetdataset() to specify dataset
8075 * calling knnbuilderbuildknnmodel() to build KNN model using current
8076   dataset and default settings
8077 
8078 Additionally, you may call:
8079 * knnbuildersetnorm() to change norm being used
8080 
8081 INPUT PARAMETERS:
8082     none
8083 
8084 OUTPUT PARAMETERS:
8085     S           -   KNN builder
8086 
8087   -- ALGLIB --
8088      Copyright 15.02.2019 by Bochkanov Sergey
8089 *************************************************************************/
8090 void knnbuildercreate(knnbuilder &s, const xparams _xparams = alglib::xdefault);
8091 
8092 
8093 /*************************************************************************
8094 Specifies regression problem (one or more continuous  output variables are
8095 predicted). There also exists "classification" version of this function.
8096 
8097 This subroutine adds dense dataset to the internal storage of the  builder
8098 object. Specifying your dataset in the dense format means that  the  dense
8099 version of the KNN construction algorithm will be invoked.
8100 
8101 INPUT PARAMETERS:
8102     S           -   KNN builder object
8103     XY          -   array[NPoints,NVars+NOut] (note: actual  size  can  be
8104                     larger, only leading part is used anyway), dataset:
8105                     * first NVars elements of each row store values of the
8106                       independent variables
8107                     * next NOut elements store  values  of  the  dependent
8108                       variables
8109     NPoints     -   number of rows in the dataset, NPoints>=1
8110     NVars       -   number of independent variables, NVars>=1
8111     NOut        -   number of dependent variables, NOut>=1
8112 
8113 OUTPUT PARAMETERS:
8114     S           -   KNN builder
8115 
8116   -- ALGLIB --
8117      Copyright 15.02.2019 by Bochkanov Sergey
8118 *************************************************************************/
8119 void knnbuildersetdatasetreg(const knnbuilder &s, const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nout, const xparams _xparams = alglib::xdefault);
8120 
8121 
8122 /*************************************************************************
8123 Specifies classification problem (two  or  more  classes  are  predicted).
8124 There also exists "regression" version of this function.
8125 
8126 This subroutine adds dense dataset to the internal storage of the  builder
8127 object. Specifying your dataset in the dense format means that  the  dense
8128 version of the KNN construction algorithm will be invoked.
8129 
8130 INPUT PARAMETERS:
8131     S           -   KNN builder object
8132     XY          -   array[NPoints,NVars+1] (note:   actual   size  can  be
8133                     larger, only leading part is used anyway), dataset:
8134                     * first NVars elements of each row store values of the
8135                       independent variables
8136                     * next element stores class index, in [0,NClasses)
8137     NPoints     -   number of rows in the dataset, NPoints>=1
8138     NVars       -   number of independent variables, NVars>=1
8139     NClasses    -   number of classes, NClasses>=2
8140 
8141 OUTPUT PARAMETERS:
8142     S           -   KNN builder
8143 
8144   -- ALGLIB --
8145      Copyright 15.02.2019 by Bochkanov Sergey
8146 *************************************************************************/
8147 void knnbuildersetdatasetcls(const knnbuilder &s, const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, const xparams _xparams = alglib::xdefault);
8148 
8149 
8150 /*************************************************************************
8151 This function sets norm type used for neighbor search.
8152 
8153 INPUT PARAMETERS:
8154     S           -   decision forest builder object
8155     NormType    -   norm type:
8156                     * 0      inf-norm
8157                     * 1      1-norm
8158                     * 2      Euclidean norm (default)
8159 
8160 OUTPUT PARAMETERS:
8161     S           -   decision forest builder
8162 
8163   -- ALGLIB --
8164      Copyright 15.02.2019 by Bochkanov Sergey
8165 *************************************************************************/
8166 void knnbuildersetnorm(const knnbuilder &s, const ae_int_t nrmtype, const xparams _xparams = alglib::xdefault);
8167 
8168 
8169 /*************************************************************************
8170 This subroutine builds KNN model  according  to  current  settings,  using
8171 dataset internally stored in the builder object.
8172 
8173 The model being built performs inference using Eps-approximate  K  nearest
8174 neighbors search algorithm, with:
8175 * K=1,  Eps=0 corresponding to the "nearest neighbor algorithm"
8176 * K>1,  Eps=0 corresponding to the "K nearest neighbors algorithm"
8177 * K>=1, Eps>0 corresponding to "approximate nearest neighbors algorithm"
8178 
8179 An approximate KNN is a good option for high-dimensional  datasets  (exact
8180 KNN works slowly when dimensions count grows).
8181 
8182 An ALGLIB implementation of kd-trees is used to perform k-nn searches.
8183 
8184   ! COMMERCIAL EDITION OF ALGLIB:
8185   !
8186   ! Commercial Edition of ALGLIB includes following important improvements
8187   ! of this function:
8188   ! * high-performance native backend with same C# interface (C# version)
8189   ! * multithreading support (C++ and C# versions)
8190   !
8191   ! We recommend you to read 'Working with commercial version' section  of
8192   ! ALGLIB Reference Manual in order to find out how to  use  performance-
8193   ! related features provided by commercial edition of ALGLIB.
8194 
8195 INPUT PARAMETERS:
8196     S       -   KNN builder object
8197     K       -   number of neighbors to search for, K>=1
8198     Eps     -   approximation factor:
8199                 * Eps=0 means that exact kNN search is performed
8200                 * Eps>0 means that (1+Eps)-approximate search is performed
8201 
8202 OUTPUT PARAMETERS:
8203     Model       -   KNN model
8204     Rep         -   report
8205 
8206   -- ALGLIB --
8207      Copyright 15.02.2019 by Bochkanov Sergey
8208 *************************************************************************/
8209 void knnbuilderbuildknnmodel(const knnbuilder &s, const ae_int_t k, const double eps, knnmodel &model, knnreport &rep, const xparams _xparams = alglib::xdefault);
8210 
8211 
8212 /*************************************************************************
8213 Changing search settings of KNN model.
8214 
8215 K and EPS parameters of KNN  (AKNN)  search  are  specified  during  model
8216 construction. However, plain KNN algorithm with Euclidean distance  allows
8217 you to change them at any moment.
8218 
8219 NOTE: future versions of KNN model may support advanced versions  of  KNN,
8220       such as NCA or LMNN. It is possible that such algorithms won't allow
8221       you to change search settings on the fly. If you call this  function
8222       for an algorithm which does not support on-the-fly changes, it  will
8223       throw an exception.
8224 
8225 INPUT PARAMETERS:
8226     Model   -   KNN model
8227     K       -   K>=1, neighbors count
8228     EPS     -   accuracy of the EPS-approximate NN search. Set to 0.0,  if
8229                 you want to perform "classic" KNN search.  Specify  larger
8230                 values  if  you  need  to  speed-up  high-dimensional  KNN
8231                 queries.
8232 
8233 OUTPUT PARAMETERS:
8234     nothing on success, exception on failure
8235 
8236   -- ALGLIB --
8237      Copyright 15.02.2019 by Bochkanov Sergey
8238 *************************************************************************/
8239 void knnrewritekeps(const knnmodel &model, const ae_int_t k, const double eps, const xparams _xparams = alglib::xdefault);
8240 
8241 
8242 /*************************************************************************
8243 Inference using KNN model.
8244 
8245 See also knnprocess0(), knnprocessi() and knnclassify() for options with a
8246 bit more convenient interface.
8247 
8248 IMPORTANT: this function is thread-unsafe and modifies internal structures
8249            of the model! You can not use same model  object  for  parallel
8250            evaluation from several threads.
8251 
8252            Use knntsprocess() with independent  thread-local  buffers,  if
8253            you need thread-safe evaluation.
8254 
8255 INPUT PARAMETERS:
8256     Model   -   KNN model
8257     X       -   input vector,  array[0..NVars-1].
8258     Y       -   possible preallocated buffer. Reused if long enough.
8259 
8260 OUTPUT PARAMETERS:
8261     Y       -   result. Regression estimate when solving regression  task,
8262                 vector of posterior probabilities for classification task.
8263 
8264   -- ALGLIB --
8265      Copyright 15.02.2019 by Bochkanov Sergey
8266 *************************************************************************/
8267 void knnprocess(const knnmodel &model, const real_1d_array &x, real_1d_array &y, const xparams _xparams = alglib::xdefault);
8268 
8269 
8270 /*************************************************************************
8271 This function returns first component of the  inferred  vector  (i.e.  one
8272 with index #0).
8273 
8274 It is a convenience wrapper for knnprocess() intended for either:
8275 * 1-dimensional regression problems
8276 * 2-class classification problems
8277 
8278 In the former case this function returns inference result as scalar, which
8279 is definitely more convenient that wrapping it as vector.  In  the  latter
8280 case it returns probability of object belonging to class #0.
8281 
8282 If you call it for anything different from two cases above, it  will  work
8283 as defined, i.e. return y[0], although it is of less use in such cases.
8284 
8285 IMPORTANT: this function is thread-unsafe and modifies internal structures
8286            of the model! You can not use same model  object  for  parallel
8287            evaluation from several threads.
8288 
8289            Use knntsprocess() with independent  thread-local  buffers,  if
8290            you need thread-safe evaluation.
8291 
8292 INPUT PARAMETERS:
8293     Model   -   KNN model
8294     X       -   input vector,  array[0..NVars-1].
8295 
8296 RESULT:
8297     Y[0]
8298 
8299   -- ALGLIB --
8300      Copyright 15.02.2019 by Bochkanov Sergey
8301 *************************************************************************/
8302 double knnprocess0(const knnmodel &model, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
8303 
8304 
8305 /*************************************************************************
8306 This function returns most probable class number for an  input  X.  It  is
8307 same as calling knnprocess(model,x,y), then determining i=argmax(y[i]) and
8308 returning i.
8309 
8310 A class number in [0,NOut) range in returned for classification  problems,
8311 -1 is returned when this function is called for regression problems.
8312 
8313 IMPORTANT: this function is thread-unsafe and modifies internal structures
8314            of the model! You can not use same model  object  for  parallel
8315            evaluation from several threads.
8316 
8317            Use knntsprocess() with independent  thread-local  buffers,  if
8318            you need thread-safe evaluation.
8319 
8320 INPUT PARAMETERS:
8321     Model   -   KNN model
8322     X       -   input vector,  array[0..NVars-1].
8323 
8324 RESULT:
8325     class number, -1 for regression tasks
8326 
8327   -- ALGLIB --
8328      Copyright 15.02.2019 by Bochkanov Sergey
8329 *************************************************************************/
8330 ae_int_t knnclassify(const knnmodel &model, const real_1d_array &x, const xparams _xparams = alglib::xdefault);
8331 
8332 
8333 /*************************************************************************
8334 'interactive' variant of knnprocess()  for  languages  like  Python  which
8335 support constructs like "y = knnprocessi(model,x)" and interactive mode of
8336 the interpreter.
8337 
8338 This function allocates new array on each call,  so  it  is  significantly
8339 slower than its 'non-interactive' counterpart, but it is  more  convenient
8340 when you call it from command line.
8341 
8342 IMPORTANT: this  function  is  thread-unsafe  and  may   modify   internal
8343            structures of the model! You can not use same model  object for
8344            parallel evaluation from several threads.
8345 
8346            Use knntsprocess()  with  independent  thread-local  buffers if
8347            you need thread-safe evaluation.
8348 
8349   -- ALGLIB --
8350      Copyright 15.02.2019 by Bochkanov Sergey
8351 *************************************************************************/
8352 void knnprocessi(const knnmodel &model, const real_1d_array &x, real_1d_array &y, const xparams _xparams = alglib::xdefault);
8353 
8354 
8355 /*************************************************************************
8356 Thread-safe procesing using external buffer for temporaries.
8357 
8358 This function is thread-safe (i.e .  you  can  use  same  KNN  model  from
8359 multiple threads) as long as you use different buffer objects for different
8360 threads.
8361 
8362 INPUT PARAMETERS:
8363     Model   -   KNN model
8364     Buf     -   buffer object, must be  allocated  specifically  for  this
8365                 model with knncreatebuffer().
8366     X       -   input vector,  array[NVars]
8367 
8368 OUTPUT PARAMETERS:
8369     Y       -   result, array[NOut].   Regression  estimate  when  solving
8370                 regression task,  vector  of  posterior  probabilities for
8371                 a classification task.
8372 
8373   -- ALGLIB --
8374      Copyright 15.02.2019 by Bochkanov Sergey
8375 *************************************************************************/
8376 void knntsprocess(const knnmodel &model, const knnbuffer &buf, const real_1d_array &x, real_1d_array &y, const xparams _xparams = alglib::xdefault);
8377 
8378 
8379 /*************************************************************************
8380 Relative classification error on the test set
8381 
8382 INPUT PARAMETERS:
8383     Model   -   KNN model
8384     XY      -   test set
8385     NPoints -   test set size
8386 
8387 RESULT:
8388     percent of incorrectly classified cases.
8389     Zero if model solves regression task.
8390 
8391 NOTE: if  you  need several different kinds of error metrics, it is better
8392       to use knnallerrors() which computes all error metric  with just one
8393       pass over dataset.
8394 
8395   -- ALGLIB --
8396      Copyright 15.02.2019 by Bochkanov Sergey
8397 *************************************************************************/
8398 double knnrelclserror(const knnmodel &model, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
8399 
8400 
8401 /*************************************************************************
8402 Average cross-entropy (in bits per element) on the test set
8403 
8404 INPUT PARAMETERS:
8405     Model   -   KNN model
8406     XY      -   test set
8407     NPoints -   test set size
8408 
8409 RESULT:
8410     CrossEntropy/NPoints.
8411     Zero if model solves regression task.
8412 
8413 NOTE: the cross-entropy metric is too unstable when used to  evaluate  KNN
8414       models (such models can report exactly  zero probabilities),  so  we
8415       do not recommend using it.
8416 
8417 NOTE: if  you  need several different kinds of error metrics, it is better
8418       to use knnallerrors() which computes all error metric  with just one
8419       pass over dataset.
8420 
8421   -- ALGLIB --
8422      Copyright 15.02.2019 by Bochkanov Sergey
8423 *************************************************************************/
8424 double knnavgce(const knnmodel &model, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
8425 
8426 
8427 /*************************************************************************
8428 RMS error on the test set.
8429 
8430 Its meaning for regression task is obvious. As for classification problems,
8431 RMS error means error when estimating posterior probabilities.
8432 
8433 INPUT PARAMETERS:
8434     Model   -   KNN model
8435     XY      -   test set
8436     NPoints -   test set size
8437 
8438 RESULT:
8439     root mean square error.
8440 
8441 NOTE: if  you  need several different kinds of error metrics, it is better
8442       to use knnallerrors() which computes all error metric  with just one
8443       pass over dataset.
8444 
8445   -- ALGLIB --
8446      Copyright 15.02.2019 by Bochkanov Sergey
8447 *************************************************************************/
8448 double knnrmserror(const knnmodel &model, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
8449 
8450 
8451 /*************************************************************************
8452 Average error on the test set
8453 
8454 Its meaning for regression task is obvious. As for classification problems,
8455 average error means error when estimating posterior probabilities.
8456 
8457 INPUT PARAMETERS:
8458     Model   -   KNN model
8459     XY      -   test set
8460     NPoints -   test set size
8461 
8462 RESULT:
8463     average error
8464 
8465 NOTE: if  you  need several different kinds of error metrics, it is better
8466       to use knnallerrors() which computes all error metric  with just one
8467       pass over dataset.
8468 
8469   -- ALGLIB --
8470      Copyright 15.02.2019 by Bochkanov Sergey
8471 *************************************************************************/
8472 double knnavgerror(const knnmodel &model, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
8473 
8474 
8475 /*************************************************************************
8476 Average relative error on the test set
8477 
8478 Its meaning for regression task is obvious. As for classification problems,
8479 average relative error means error when estimating posterior probabilities.
8480 
8481 INPUT PARAMETERS:
8482     Model   -   KNN model
8483     XY      -   test set
8484     NPoints -   test set size
8485 
8486 RESULT:
8487     average relative error
8488 
8489 NOTE: if  you  need several different kinds of error metrics, it is better
8490       to use knnallerrors() which computes all error metric  with just one
8491       pass over dataset.
8492 
8493   -- ALGLIB --
8494      Copyright 15.02.2019 by Bochkanov Sergey
8495 *************************************************************************/
8496 double knnavgrelerror(const knnmodel &model, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams = alglib::xdefault);
8497 
8498 
8499 /*************************************************************************
8500 Calculates all kinds of errors for the model in one call.
8501 
8502 INPUT PARAMETERS:
8503     Model   -   KNN model
8504     XY      -   test set:
8505                 * one row per point
8506                 * first NVars columns store independent variables
8507                 * depending on problem type:
8508                   * next column stores class number in [0,NClasses) -  for
8509                     classification problems
8510                   * next NOut columns  store  dependent  variables  -  for
8511                     regression problems
8512     NPoints -   test set size, NPoints>=0
8513 
8514 OUTPUT PARAMETERS:
8515     Rep     -   following fields are loaded with errors for both regression
8516                 and classification models:
8517                 * rep.rmserror - RMS error for the output
8518                 * rep.avgerror - average error
8519                 * rep.avgrelerror - average relative error
8520                 following fields are set only  for classification  models,
8521                 zero for regression ones:
8522                 * relclserror   - relative classification error, in [0,1]
8523                 * avgce - average cross-entropy in bits per dataset entry
8524 
8525 NOTE: the cross-entropy metric is too unstable when used to  evaluate  KNN
8526       models (such models can report exactly  zero probabilities),  so  we
8527       do not recommend using it.
8528 
8529   -- ALGLIB --
8530      Copyright 15.02.2019 by Bochkanov Sergey
8531 *************************************************************************/
8532 void knnallerrors(const knnmodel &model, const real_2d_array &xy, const ae_int_t npoints, knnreport &rep, const xparams _xparams = alglib::xdefault);
8533 #endif
8534 
8535 #if defined(AE_COMPILE_DATACOMP) || !defined(AE_PARTIAL_BUILD)
8536 /*************************************************************************
8537 k-means++ clusterization.
8538 Backward compatibility function, we recommend to use CLUSTERING subpackage
8539 as better replacement.
8540 
8541   -- ALGLIB --
8542      Copyright 21.03.2009 by Bochkanov Sergey
8543 *************************************************************************/
8544 void kmeansgenerate(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t k, const ae_int_t restarts, ae_int_t &info, real_2d_array &c, integer_1d_array &xyc, const xparams _xparams = alglib::xdefault);
8545 #endif
8546 }
8547 
8548 /////////////////////////////////////////////////////////////////////////
8549 //
8550 // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (FUNCTIONS)
8551 //
8552 /////////////////////////////////////////////////////////////////////////
8553 namespace alglib_impl
8554 {
8555 #if defined(AE_COMPILE_PCA) || !defined(AE_PARTIAL_BUILD)
8556 void pcabuildbasis(/* Real    */ ae_matrix* x,
8557      ae_int_t npoints,
8558      ae_int_t nvars,
8559      ae_int_t* info,
8560      /* Real    */ ae_vector* s2,
8561      /* Real    */ ae_matrix* v,
8562      ae_state *_state);
8563 void pcatruncatedsubspace(/* Real    */ ae_matrix* x,
8564      ae_int_t npoints,
8565      ae_int_t nvars,
8566      ae_int_t nneeded,
8567      double eps,
8568      ae_int_t maxits,
8569      /* Real    */ ae_vector* s2,
8570      /* Real    */ ae_matrix* v,
8571      ae_state *_state);
8572 void pcatruncatedsubspacesparse(sparsematrix* x,
8573      ae_int_t npoints,
8574      ae_int_t nvars,
8575      ae_int_t nneeded,
8576      double eps,
8577      ae_int_t maxits,
8578      /* Real    */ ae_vector* s2,
8579      /* Real    */ ae_matrix* v,
8580      ae_state *_state);
8581 #endif
8582 #if defined(AE_COMPILE_BDSS) || !defined(AE_PARTIAL_BUILD)
8583 void dserrallocate(ae_int_t nclasses,
8584      /* Real    */ ae_vector* buf,
8585      ae_state *_state);
8586 void dserraccumulate(/* Real    */ ae_vector* buf,
8587      /* Real    */ ae_vector* y,
8588      /* Real    */ ae_vector* desiredy,
8589      ae_state *_state);
8590 void dserrfinish(/* Real    */ ae_vector* buf, ae_state *_state);
8591 void dsnormalize(/* Real    */ ae_matrix* xy,
8592      ae_int_t npoints,
8593      ae_int_t nvars,
8594      ae_int_t* info,
8595      /* Real    */ ae_vector* means,
8596      /* Real    */ ae_vector* sigmas,
8597      ae_state *_state);
8598 void dsnormalizec(/* Real    */ ae_matrix* xy,
8599      ae_int_t npoints,
8600      ae_int_t nvars,
8601      ae_int_t* info,
8602      /* Real    */ ae_vector* means,
8603      /* Real    */ ae_vector* sigmas,
8604      ae_state *_state);
8605 double dsgetmeanmindistance(/* Real    */ ae_matrix* xy,
8606      ae_int_t npoints,
8607      ae_int_t nvars,
8608      ae_state *_state);
8609 void dstie(/* Real    */ ae_vector* a,
8610      ae_int_t n,
8611      /* Integer */ ae_vector* ties,
8612      ae_int_t* tiecount,
8613      /* Integer */ ae_vector* p1,
8614      /* Integer */ ae_vector* p2,
8615      ae_state *_state);
8616 void dstiefasti(/* Real    */ ae_vector* a,
8617      /* Integer */ ae_vector* b,
8618      ae_int_t n,
8619      /* Integer */ ae_vector* ties,
8620      ae_int_t* tiecount,
8621      /* Real    */ ae_vector* bufr,
8622      /* Integer */ ae_vector* bufi,
8623      ae_state *_state);
8624 void dsoptimalsplit2(/* Real    */ ae_vector* a,
8625      /* Integer */ ae_vector* c,
8626      ae_int_t n,
8627      ae_int_t* info,
8628      double* threshold,
8629      double* pal,
8630      double* pbl,
8631      double* par,
8632      double* pbr,
8633      double* cve,
8634      ae_state *_state);
8635 void dsoptimalsplit2fast(/* Real    */ ae_vector* a,
8636      /* Integer */ ae_vector* c,
8637      /* Integer */ ae_vector* tiesbuf,
8638      /* Integer */ ae_vector* cntbuf,
8639      /* Real    */ ae_vector* bufr,
8640      /* Integer */ ae_vector* bufi,
8641      ae_int_t n,
8642      ae_int_t nc,
8643      double alpha,
8644      ae_int_t* info,
8645      double* threshold,
8646      double* rms,
8647      double* cvrms,
8648      ae_state *_state);
8649 void dssplitk(/* Real    */ ae_vector* a,
8650      /* Integer */ ae_vector* c,
8651      ae_int_t n,
8652      ae_int_t nc,
8653      ae_int_t kmax,
8654      ae_int_t* info,
8655      /* Real    */ ae_vector* thresholds,
8656      ae_int_t* ni,
8657      double* cve,
8658      ae_state *_state);
8659 void dsoptimalsplitk(/* Real    */ ae_vector* a,
8660      /* Integer */ ae_vector* c,
8661      ae_int_t n,
8662      ae_int_t nc,
8663      ae_int_t kmax,
8664      ae_int_t* info,
8665      /* Real    */ ae_vector* thresholds,
8666      ae_int_t* ni,
8667      double* cve,
8668      ae_state *_state);
8669 void _cvreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
8670 void _cvreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
8671 void _cvreport_clear(void* _p);
8672 void _cvreport_destroy(void* _p);
8673 #endif
8674 #if defined(AE_COMPILE_MLPBASE) || !defined(AE_PARTIAL_BUILD)
8675 ae_int_t mlpgradsplitcost(ae_state *_state);
8676 ae_int_t mlpgradsplitsize(ae_state *_state);
8677 void mlpcreate0(ae_int_t nin,
8678      ae_int_t nout,
8679      multilayerperceptron* network,
8680      ae_state *_state);
8681 void mlpcreate1(ae_int_t nin,
8682      ae_int_t nhid,
8683      ae_int_t nout,
8684      multilayerperceptron* network,
8685      ae_state *_state);
8686 void mlpcreate2(ae_int_t nin,
8687      ae_int_t nhid1,
8688      ae_int_t nhid2,
8689      ae_int_t nout,
8690      multilayerperceptron* network,
8691      ae_state *_state);
8692 void mlpcreateb0(ae_int_t nin,
8693      ae_int_t nout,
8694      double b,
8695      double d,
8696      multilayerperceptron* network,
8697      ae_state *_state);
8698 void mlpcreateb1(ae_int_t nin,
8699      ae_int_t nhid,
8700      ae_int_t nout,
8701      double b,
8702      double d,
8703      multilayerperceptron* network,
8704      ae_state *_state);
8705 void mlpcreateb2(ae_int_t nin,
8706      ae_int_t nhid1,
8707      ae_int_t nhid2,
8708      ae_int_t nout,
8709      double b,
8710      double d,
8711      multilayerperceptron* network,
8712      ae_state *_state);
8713 void mlpcreater0(ae_int_t nin,
8714      ae_int_t nout,
8715      double a,
8716      double b,
8717      multilayerperceptron* network,
8718      ae_state *_state);
8719 void mlpcreater1(ae_int_t nin,
8720      ae_int_t nhid,
8721      ae_int_t nout,
8722      double a,
8723      double b,
8724      multilayerperceptron* network,
8725      ae_state *_state);
8726 void mlpcreater2(ae_int_t nin,
8727      ae_int_t nhid1,
8728      ae_int_t nhid2,
8729      ae_int_t nout,
8730      double a,
8731      double b,
8732      multilayerperceptron* network,
8733      ae_state *_state);
8734 void mlpcreatec0(ae_int_t nin,
8735      ae_int_t nout,
8736      multilayerperceptron* network,
8737      ae_state *_state);
8738 void mlpcreatec1(ae_int_t nin,
8739      ae_int_t nhid,
8740      ae_int_t nout,
8741      multilayerperceptron* network,
8742      ae_state *_state);
8743 void mlpcreatec2(ae_int_t nin,
8744      ae_int_t nhid1,
8745      ae_int_t nhid2,
8746      ae_int_t nout,
8747      multilayerperceptron* network,
8748      ae_state *_state);
8749 void mlpcopy(multilayerperceptron* network1,
8750      multilayerperceptron* network2,
8751      ae_state *_state);
8752 void mlpcopyshared(multilayerperceptron* network1,
8753      multilayerperceptron* network2,
8754      ae_state *_state);
8755 ae_bool mlpsamearchitecture(multilayerperceptron* network1,
8756      multilayerperceptron* network2,
8757      ae_state *_state);
8758 void mlpcopytunableparameters(multilayerperceptron* network1,
8759      multilayerperceptron* network2,
8760      ae_state *_state);
8761 void mlpexporttunableparameters(multilayerperceptron* network,
8762      /* Real    */ ae_vector* p,
8763      ae_int_t* pcount,
8764      ae_state *_state);
8765 void mlpimporttunableparameters(multilayerperceptron* network,
8766      /* Real    */ ae_vector* p,
8767      ae_state *_state);
8768 void mlpserializeold(multilayerperceptron* network,
8769      /* Real    */ ae_vector* ra,
8770      ae_int_t* rlen,
8771      ae_state *_state);
8772 void mlpunserializeold(/* Real    */ ae_vector* ra,
8773      multilayerperceptron* network,
8774      ae_state *_state);
8775 void mlprandomize(multilayerperceptron* network, ae_state *_state);
8776 void mlprandomizefull(multilayerperceptron* network, ae_state *_state);
8777 void mlpinitpreprocessor(multilayerperceptron* network,
8778      /* Real    */ ae_matrix* xy,
8779      ae_int_t ssize,
8780      ae_state *_state);
8781 void mlpinitpreprocessorsparse(multilayerperceptron* network,
8782      sparsematrix* xy,
8783      ae_int_t ssize,
8784      ae_state *_state);
8785 void mlpinitpreprocessorsubset(multilayerperceptron* network,
8786      /* Real    */ ae_matrix* xy,
8787      ae_int_t setsize,
8788      /* Integer */ ae_vector* idx,
8789      ae_int_t subsetsize,
8790      ae_state *_state);
8791 void mlpinitpreprocessorsparsesubset(multilayerperceptron* network,
8792      sparsematrix* xy,
8793      ae_int_t setsize,
8794      /* Integer */ ae_vector* idx,
8795      ae_int_t subsetsize,
8796      ae_state *_state);
8797 void mlpproperties(multilayerperceptron* network,
8798      ae_int_t* nin,
8799      ae_int_t* nout,
8800      ae_int_t* wcount,
8801      ae_state *_state);
8802 ae_int_t mlpntotal(multilayerperceptron* network, ae_state *_state);
8803 ae_int_t mlpgetinputscount(multilayerperceptron* network,
8804      ae_state *_state);
8805 ae_int_t mlpgetoutputscount(multilayerperceptron* network,
8806      ae_state *_state);
8807 ae_int_t mlpgetweightscount(multilayerperceptron* network,
8808      ae_state *_state);
8809 ae_bool mlpissoftmax(multilayerperceptron* network, ae_state *_state);
8810 ae_int_t mlpgetlayerscount(multilayerperceptron* network,
8811      ae_state *_state);
8812 ae_int_t mlpgetlayersize(multilayerperceptron* network,
8813      ae_int_t k,
8814      ae_state *_state);
8815 void mlpgetinputscaling(multilayerperceptron* network,
8816      ae_int_t i,
8817      double* mean,
8818      double* sigma,
8819      ae_state *_state);
8820 void mlpgetoutputscaling(multilayerperceptron* network,
8821      ae_int_t i,
8822      double* mean,
8823      double* sigma,
8824      ae_state *_state);
8825 void mlpgetneuroninfo(multilayerperceptron* network,
8826      ae_int_t k,
8827      ae_int_t i,
8828      ae_int_t* fkind,
8829      double* threshold,
8830      ae_state *_state);
8831 double mlpgetweight(multilayerperceptron* network,
8832      ae_int_t k0,
8833      ae_int_t i0,
8834      ae_int_t k1,
8835      ae_int_t i1,
8836      ae_state *_state);
8837 void mlpsetinputscaling(multilayerperceptron* network,
8838      ae_int_t i,
8839      double mean,
8840      double sigma,
8841      ae_state *_state);
8842 void mlpsetoutputscaling(multilayerperceptron* network,
8843      ae_int_t i,
8844      double mean,
8845      double sigma,
8846      ae_state *_state);
8847 void mlpsetneuroninfo(multilayerperceptron* network,
8848      ae_int_t k,
8849      ae_int_t i,
8850      ae_int_t fkind,
8851      double threshold,
8852      ae_state *_state);
8853 void mlpsetweight(multilayerperceptron* network,
8854      ae_int_t k0,
8855      ae_int_t i0,
8856      ae_int_t k1,
8857      ae_int_t i1,
8858      double w,
8859      ae_state *_state);
8860 void mlpactivationfunction(double net,
8861      ae_int_t k,
8862      double* f,
8863      double* df,
8864      double* d2f,
8865      ae_state *_state);
8866 void mlpprocess(multilayerperceptron* network,
8867      /* Real    */ ae_vector* x,
8868      /* Real    */ ae_vector* y,
8869      ae_state *_state);
8870 void mlpprocessi(multilayerperceptron* network,
8871      /* Real    */ ae_vector* x,
8872      /* Real    */ ae_vector* y,
8873      ae_state *_state);
8874 double mlperror(multilayerperceptron* network,
8875      /* Real    */ ae_matrix* xy,
8876      ae_int_t npoints,
8877      ae_state *_state);
8878 double mlperrorsparse(multilayerperceptron* network,
8879      sparsematrix* xy,
8880      ae_int_t npoints,
8881      ae_state *_state);
8882 double mlperrorn(multilayerperceptron* network,
8883      /* Real    */ ae_matrix* xy,
8884      ae_int_t ssize,
8885      ae_state *_state);
8886 ae_int_t mlpclserror(multilayerperceptron* network,
8887      /* Real    */ ae_matrix* xy,
8888      ae_int_t npoints,
8889      ae_state *_state);
8890 double mlprelclserror(multilayerperceptron* network,
8891      /* Real    */ ae_matrix* xy,
8892      ae_int_t npoints,
8893      ae_state *_state);
8894 double mlprelclserrorsparse(multilayerperceptron* network,
8895      sparsematrix* xy,
8896      ae_int_t npoints,
8897      ae_state *_state);
8898 double mlpavgce(multilayerperceptron* network,
8899      /* Real    */ ae_matrix* xy,
8900      ae_int_t npoints,
8901      ae_state *_state);
8902 double mlpavgcesparse(multilayerperceptron* network,
8903      sparsematrix* xy,
8904      ae_int_t npoints,
8905      ae_state *_state);
8906 double mlprmserror(multilayerperceptron* network,
8907      /* Real    */ ae_matrix* xy,
8908      ae_int_t npoints,
8909      ae_state *_state);
8910 double mlprmserrorsparse(multilayerperceptron* network,
8911      sparsematrix* xy,
8912      ae_int_t npoints,
8913      ae_state *_state);
8914 double mlpavgerror(multilayerperceptron* network,
8915      /* Real    */ ae_matrix* xy,
8916      ae_int_t npoints,
8917      ae_state *_state);
8918 double mlpavgerrorsparse(multilayerperceptron* network,
8919      sparsematrix* xy,
8920      ae_int_t npoints,
8921      ae_state *_state);
8922 double mlpavgrelerror(multilayerperceptron* network,
8923      /* Real    */ ae_matrix* xy,
8924      ae_int_t npoints,
8925      ae_state *_state);
8926 double mlpavgrelerrorsparse(multilayerperceptron* network,
8927      sparsematrix* xy,
8928      ae_int_t npoints,
8929      ae_state *_state);
8930 void mlpgrad(multilayerperceptron* network,
8931      /* Real    */ ae_vector* x,
8932      /* Real    */ ae_vector* desiredy,
8933      double* e,
8934      /* Real    */ ae_vector* grad,
8935      ae_state *_state);
8936 void mlpgradn(multilayerperceptron* network,
8937      /* Real    */ ae_vector* x,
8938      /* Real    */ ae_vector* desiredy,
8939      double* e,
8940      /* Real    */ ae_vector* grad,
8941      ae_state *_state);
8942 void mlpgradbatch(multilayerperceptron* network,
8943      /* Real    */ ae_matrix* xy,
8944      ae_int_t ssize,
8945      double* e,
8946      /* Real    */ ae_vector* grad,
8947      ae_state *_state);
8948 void mlpgradbatchsparse(multilayerperceptron* network,
8949      sparsematrix* xy,
8950      ae_int_t ssize,
8951      double* e,
8952      /* Real    */ ae_vector* grad,
8953      ae_state *_state);
8954 void mlpgradbatchsubset(multilayerperceptron* network,
8955      /* Real    */ ae_matrix* xy,
8956      ae_int_t setsize,
8957      /* Integer */ ae_vector* idx,
8958      ae_int_t subsetsize,
8959      double* e,
8960      /* Real    */ ae_vector* grad,
8961      ae_state *_state);
8962 void mlpgradbatchsparsesubset(multilayerperceptron* network,
8963      sparsematrix* xy,
8964      ae_int_t setsize,
8965      /* Integer */ ae_vector* idx,
8966      ae_int_t subsetsize,
8967      double* e,
8968      /* Real    */ ae_vector* grad,
8969      ae_state *_state);
8970 void mlpgradbatchx(multilayerperceptron* network,
8971      /* Real    */ ae_matrix* densexy,
8972      sparsematrix* sparsexy,
8973      ae_int_t datasetsize,
8974      ae_int_t datasettype,
8975      /* Integer */ ae_vector* idx,
8976      ae_int_t subset0,
8977      ae_int_t subset1,
8978      ae_int_t subsettype,
8979      ae_shared_pool* buf,
8980      ae_shared_pool* gradbuf,
8981      ae_state *_state);
8982 ae_bool _trypexec_mlpgradbatchx(multilayerperceptron* network,
8983     /* Real    */ ae_matrix* densexy,
8984     sparsematrix* sparsexy,
8985     ae_int_t datasetsize,
8986     ae_int_t datasettype,
8987     /* Integer */ ae_vector* idx,
8988     ae_int_t subset0,
8989     ae_int_t subset1,
8990     ae_int_t subsettype,
8991     ae_shared_pool* buf,
8992     ae_shared_pool* gradbuf, ae_state *_state);
8993 void mlpgradnbatch(multilayerperceptron* network,
8994      /* Real    */ ae_matrix* xy,
8995      ae_int_t ssize,
8996      double* e,
8997      /* Real    */ ae_vector* grad,
8998      ae_state *_state);
8999 void mlphessiannbatch(multilayerperceptron* network,
9000      /* Real    */ ae_matrix* xy,
9001      ae_int_t ssize,
9002      double* e,
9003      /* Real    */ ae_vector* grad,
9004      /* Real    */ ae_matrix* h,
9005      ae_state *_state);
9006 void mlphessianbatch(multilayerperceptron* network,
9007      /* Real    */ ae_matrix* xy,
9008      ae_int_t ssize,
9009      double* e,
9010      /* Real    */ ae_vector* grad,
9011      /* Real    */ ae_matrix* h,
9012      ae_state *_state);
9013 void mlpinternalprocessvector(/* Integer */ ae_vector* structinfo,
9014      /* Real    */ ae_vector* weights,
9015      /* Real    */ ae_vector* columnmeans,
9016      /* Real    */ ae_vector* columnsigmas,
9017      /* Real    */ ae_vector* neurons,
9018      /* Real    */ ae_vector* dfdnet,
9019      /* Real    */ ae_vector* x,
9020      /* Real    */ ae_vector* y,
9021      ae_state *_state);
9022 void mlpalloc(ae_serializer* s,
9023      multilayerperceptron* network,
9024      ae_state *_state);
9025 void mlpserialize(ae_serializer* s,
9026      multilayerperceptron* network,
9027      ae_state *_state);
9028 void mlpunserialize(ae_serializer* s,
9029      multilayerperceptron* network,
9030      ae_state *_state);
9031 void mlpallerrorssubset(multilayerperceptron* network,
9032      /* Real    */ ae_matrix* xy,
9033      ae_int_t setsize,
9034      /* Integer */ ae_vector* subset,
9035      ae_int_t subsetsize,
9036      modelerrors* rep,
9037      ae_state *_state);
9038 void mlpallerrorssparsesubset(multilayerperceptron* network,
9039      sparsematrix* xy,
9040      ae_int_t setsize,
9041      /* Integer */ ae_vector* subset,
9042      ae_int_t subsetsize,
9043      modelerrors* rep,
9044      ae_state *_state);
9045 double mlperrorsubset(multilayerperceptron* network,
9046      /* Real    */ ae_matrix* xy,
9047      ae_int_t setsize,
9048      /* Integer */ ae_vector* subset,
9049      ae_int_t subsetsize,
9050      ae_state *_state);
9051 double mlperrorsparsesubset(multilayerperceptron* network,
9052      sparsematrix* xy,
9053      ae_int_t setsize,
9054      /* Integer */ ae_vector* subset,
9055      ae_int_t subsetsize,
9056      ae_state *_state);
9057 void mlpallerrorsx(multilayerperceptron* network,
9058      /* Real    */ ae_matrix* densexy,
9059      sparsematrix* sparsexy,
9060      ae_int_t datasetsize,
9061      ae_int_t datasettype,
9062      /* Integer */ ae_vector* idx,
9063      ae_int_t subset0,
9064      ae_int_t subset1,
9065      ae_int_t subsettype,
9066      ae_shared_pool* buf,
9067      modelerrors* rep,
9068      ae_state *_state);
9069 ae_bool _trypexec_mlpallerrorsx(multilayerperceptron* network,
9070     /* Real    */ ae_matrix* densexy,
9071     sparsematrix* sparsexy,
9072     ae_int_t datasetsize,
9073     ae_int_t datasettype,
9074     /* Integer */ ae_vector* idx,
9075     ae_int_t subset0,
9076     ae_int_t subset1,
9077     ae_int_t subsettype,
9078     ae_shared_pool* buf,
9079     modelerrors* rep, ae_state *_state);
9080 void _modelerrors_init(void* _p, ae_state *_state, ae_bool make_automatic);
9081 void _modelerrors_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9082 void _modelerrors_clear(void* _p);
9083 void _modelerrors_destroy(void* _p);
9084 void _smlpgrad_init(void* _p, ae_state *_state, ae_bool make_automatic);
9085 void _smlpgrad_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9086 void _smlpgrad_clear(void* _p);
9087 void _smlpgrad_destroy(void* _p);
9088 void _multilayerperceptron_init(void* _p, ae_state *_state, ae_bool make_automatic);
9089 void _multilayerperceptron_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9090 void _multilayerperceptron_clear(void* _p);
9091 void _multilayerperceptron_destroy(void* _p);
9092 #endif
9093 #if defined(AE_COMPILE_LDA) || !defined(AE_PARTIAL_BUILD)
9094 void fisherlda(/* Real    */ ae_matrix* xy,
9095      ae_int_t npoints,
9096      ae_int_t nvars,
9097      ae_int_t nclasses,
9098      ae_int_t* info,
9099      /* Real    */ ae_vector* w,
9100      ae_state *_state);
9101 void fisherldan(/* Real    */ ae_matrix* xy,
9102      ae_int_t npoints,
9103      ae_int_t nvars,
9104      ae_int_t nclasses,
9105      ae_int_t* info,
9106      /* Real    */ ae_matrix* w,
9107      ae_state *_state);
9108 #endif
9109 #if defined(AE_COMPILE_SSA) || !defined(AE_PARTIAL_BUILD)
9110 void ssacreate(ssamodel* s, ae_state *_state);
9111 void ssasetwindow(ssamodel* s, ae_int_t windowwidth, ae_state *_state);
9112 void ssasetseed(ssamodel* s, ae_int_t seed, ae_state *_state);
9113 void ssasetpoweruplength(ssamodel* s, ae_int_t pwlen, ae_state *_state);
9114 void ssasetmemorylimit(ssamodel* s, ae_int_t memlimit, ae_state *_state);
9115 void ssaaddsequence(ssamodel* s,
9116      /* Real    */ ae_vector* x,
9117      ae_int_t n,
9118      ae_state *_state);
9119 void ssaappendpointandupdate(ssamodel* s,
9120      double x,
9121      double updateits,
9122      ae_state *_state);
9123 void ssaappendsequenceandupdate(ssamodel* s,
9124      /* Real    */ ae_vector* x,
9125      ae_int_t nticks,
9126      double updateits,
9127      ae_state *_state);
9128 void ssasetalgoprecomputed(ssamodel* s,
9129      /* Real    */ ae_matrix* a,
9130      ae_int_t windowwidth,
9131      ae_int_t nbasis,
9132      ae_state *_state);
9133 void ssasetalgotopkdirect(ssamodel* s, ae_int_t topk, ae_state *_state);
9134 void ssasetalgotopkrealtime(ssamodel* s, ae_int_t topk, ae_state *_state);
9135 void ssacleardata(ssamodel* s, ae_state *_state);
9136 void ssagetbasis(ssamodel* s,
9137      /* Real    */ ae_matrix* a,
9138      /* Real    */ ae_vector* sv,
9139      ae_int_t* windowwidth,
9140      ae_int_t* nbasis,
9141      ae_state *_state);
9142 void ssagetlrr(ssamodel* s,
9143      /* Real    */ ae_vector* a,
9144      ae_int_t* windowwidth,
9145      ae_state *_state);
9146 void ssaanalyzelastwindow(ssamodel* s,
9147      /* Real    */ ae_vector* trend,
9148      /* Real    */ ae_vector* noise,
9149      ae_int_t* nticks,
9150      ae_state *_state);
9151 void ssaanalyzelast(ssamodel* s,
9152      ae_int_t nticks,
9153      /* Real    */ ae_vector* trend,
9154      /* Real    */ ae_vector* noise,
9155      ae_state *_state);
9156 void ssaanalyzesequence(ssamodel* s,
9157      /* Real    */ ae_vector* data,
9158      ae_int_t nticks,
9159      /* Real    */ ae_vector* trend,
9160      /* Real    */ ae_vector* noise,
9161      ae_state *_state);
9162 void ssaforecastlast(ssamodel* s,
9163      ae_int_t nticks,
9164      /* Real    */ ae_vector* trend,
9165      ae_state *_state);
9166 void ssaforecastsequence(ssamodel* s,
9167      /* Real    */ ae_vector* data,
9168      ae_int_t datalen,
9169      ae_int_t forecastlen,
9170      ae_bool applysmoothing,
9171      /* Real    */ ae_vector* trend,
9172      ae_state *_state);
9173 void ssaforecastavglast(ssamodel* s,
9174      ae_int_t m,
9175      ae_int_t nticks,
9176      /* Real    */ ae_vector* trend,
9177      ae_state *_state);
9178 void ssaforecastavgsequence(ssamodel* s,
9179      /* Real    */ ae_vector* data,
9180      ae_int_t datalen,
9181      ae_int_t m,
9182      ae_int_t forecastlen,
9183      ae_bool applysmoothing,
9184      /* Real    */ ae_vector* trend,
9185      ae_state *_state);
9186 void _ssamodel_init(void* _p, ae_state *_state, ae_bool make_automatic);
9187 void _ssamodel_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9188 void _ssamodel_clear(void* _p);
9189 void _ssamodel_destroy(void* _p);
9190 #endif
9191 #if defined(AE_COMPILE_LINREG) || !defined(AE_PARTIAL_BUILD)
9192 void lrbuild(/* Real    */ ae_matrix* xy,
9193      ae_int_t npoints,
9194      ae_int_t nvars,
9195      ae_int_t* info,
9196      linearmodel* lm,
9197      lrreport* ar,
9198      ae_state *_state);
9199 void lrbuilds(/* Real    */ ae_matrix* xy,
9200      /* Real    */ ae_vector* s,
9201      ae_int_t npoints,
9202      ae_int_t nvars,
9203      ae_int_t* info,
9204      linearmodel* lm,
9205      lrreport* ar,
9206      ae_state *_state);
9207 void lrbuildzs(/* Real    */ ae_matrix* xy,
9208      /* Real    */ ae_vector* s,
9209      ae_int_t npoints,
9210      ae_int_t nvars,
9211      ae_int_t* info,
9212      linearmodel* lm,
9213      lrreport* ar,
9214      ae_state *_state);
9215 void lrbuildz(/* Real    */ ae_matrix* xy,
9216      ae_int_t npoints,
9217      ae_int_t nvars,
9218      ae_int_t* info,
9219      linearmodel* lm,
9220      lrreport* ar,
9221      ae_state *_state);
9222 void lrunpack(linearmodel* lm,
9223      /* Real    */ ae_vector* v,
9224      ae_int_t* nvars,
9225      ae_state *_state);
9226 void lrpack(/* Real    */ ae_vector* v,
9227      ae_int_t nvars,
9228      linearmodel* lm,
9229      ae_state *_state);
9230 double lrprocess(linearmodel* lm,
9231      /* Real    */ ae_vector* x,
9232      ae_state *_state);
9233 double lrrmserror(linearmodel* lm,
9234      /* Real    */ ae_matrix* xy,
9235      ae_int_t npoints,
9236      ae_state *_state);
9237 double lravgerror(linearmodel* lm,
9238      /* Real    */ ae_matrix* xy,
9239      ae_int_t npoints,
9240      ae_state *_state);
9241 double lravgrelerror(linearmodel* lm,
9242      /* Real    */ ae_matrix* xy,
9243      ae_int_t npoints,
9244      ae_state *_state);
9245 void lrcopy(linearmodel* lm1, linearmodel* lm2, ae_state *_state);
9246 void lrlines(/* Real    */ ae_matrix* xy,
9247      /* Real    */ ae_vector* s,
9248      ae_int_t n,
9249      ae_int_t* info,
9250      double* a,
9251      double* b,
9252      double* vara,
9253      double* varb,
9254      double* covab,
9255      double* corrab,
9256      double* p,
9257      ae_state *_state);
9258 void lrline(/* Real    */ ae_matrix* xy,
9259      ae_int_t n,
9260      ae_int_t* info,
9261      double* a,
9262      double* b,
9263      ae_state *_state);
9264 void _linearmodel_init(void* _p, ae_state *_state, ae_bool make_automatic);
9265 void _linearmodel_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9266 void _linearmodel_clear(void* _p);
9267 void _linearmodel_destroy(void* _p);
9268 void _lrreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
9269 void _lrreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9270 void _lrreport_clear(void* _p);
9271 void _lrreport_destroy(void* _p);
9272 #endif
9273 #if defined(AE_COMPILE_FILTERS) || !defined(AE_PARTIAL_BUILD)
9274 void filtersma(/* Real    */ ae_vector* x,
9275      ae_int_t n,
9276      ae_int_t k,
9277      ae_state *_state);
9278 void filterema(/* Real    */ ae_vector* x,
9279      ae_int_t n,
9280      double alpha,
9281      ae_state *_state);
9282 void filterlrma(/* Real    */ ae_vector* x,
9283      ae_int_t n,
9284      ae_int_t k,
9285      ae_state *_state);
9286 #endif
9287 #if defined(AE_COMPILE_LOGIT) || !defined(AE_PARTIAL_BUILD)
9288 void mnltrainh(/* Real    */ ae_matrix* xy,
9289      ae_int_t npoints,
9290      ae_int_t nvars,
9291      ae_int_t nclasses,
9292      ae_int_t* info,
9293      logitmodel* lm,
9294      mnlreport* rep,
9295      ae_state *_state);
9296 void mnlprocess(logitmodel* lm,
9297      /* Real    */ ae_vector* x,
9298      /* Real    */ ae_vector* y,
9299      ae_state *_state);
9300 void mnlprocessi(logitmodel* lm,
9301      /* Real    */ ae_vector* x,
9302      /* Real    */ ae_vector* y,
9303      ae_state *_state);
9304 void mnlunpack(logitmodel* lm,
9305      /* Real    */ ae_matrix* a,
9306      ae_int_t* nvars,
9307      ae_int_t* nclasses,
9308      ae_state *_state);
9309 void mnlpack(/* Real    */ ae_matrix* a,
9310      ae_int_t nvars,
9311      ae_int_t nclasses,
9312      logitmodel* lm,
9313      ae_state *_state);
9314 void mnlcopy(logitmodel* lm1, logitmodel* lm2, ae_state *_state);
9315 double mnlavgce(logitmodel* lm,
9316      /* Real    */ ae_matrix* xy,
9317      ae_int_t npoints,
9318      ae_state *_state);
9319 double mnlrelclserror(logitmodel* lm,
9320      /* Real    */ ae_matrix* xy,
9321      ae_int_t npoints,
9322      ae_state *_state);
9323 double mnlrmserror(logitmodel* lm,
9324      /* Real    */ ae_matrix* xy,
9325      ae_int_t npoints,
9326      ae_state *_state);
9327 double mnlavgerror(logitmodel* lm,
9328      /* Real    */ ae_matrix* xy,
9329      ae_int_t npoints,
9330      ae_state *_state);
9331 double mnlavgrelerror(logitmodel* lm,
9332      /* Real    */ ae_matrix* xy,
9333      ae_int_t ssize,
9334      ae_state *_state);
9335 ae_int_t mnlclserror(logitmodel* lm,
9336      /* Real    */ ae_matrix* xy,
9337      ae_int_t npoints,
9338      ae_state *_state);
9339 void _logitmodel_init(void* _p, ae_state *_state, ae_bool make_automatic);
9340 void _logitmodel_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9341 void _logitmodel_clear(void* _p);
9342 void _logitmodel_destroy(void* _p);
9343 void _logitmcstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
9344 void _logitmcstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9345 void _logitmcstate_clear(void* _p);
9346 void _logitmcstate_destroy(void* _p);
9347 void _mnlreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
9348 void _mnlreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9349 void _mnlreport_clear(void* _p);
9350 void _mnlreport_destroy(void* _p);
9351 #endif
9352 #if defined(AE_COMPILE_MCPD) || !defined(AE_PARTIAL_BUILD)
9353 void mcpdcreate(ae_int_t n, mcpdstate* s, ae_state *_state);
9354 void mcpdcreateentry(ae_int_t n,
9355      ae_int_t entrystate,
9356      mcpdstate* s,
9357      ae_state *_state);
9358 void mcpdcreateexit(ae_int_t n,
9359      ae_int_t exitstate,
9360      mcpdstate* s,
9361      ae_state *_state);
9362 void mcpdcreateentryexit(ae_int_t n,
9363      ae_int_t entrystate,
9364      ae_int_t exitstate,
9365      mcpdstate* s,
9366      ae_state *_state);
9367 void mcpdaddtrack(mcpdstate* s,
9368      /* Real    */ ae_matrix* xy,
9369      ae_int_t k,
9370      ae_state *_state);
9371 void mcpdsetec(mcpdstate* s,
9372      /* Real    */ ae_matrix* ec,
9373      ae_state *_state);
9374 void mcpdaddec(mcpdstate* s,
9375      ae_int_t i,
9376      ae_int_t j,
9377      double c,
9378      ae_state *_state);
9379 void mcpdsetbc(mcpdstate* s,
9380      /* Real    */ ae_matrix* bndl,
9381      /* Real    */ ae_matrix* bndu,
9382      ae_state *_state);
9383 void mcpdaddbc(mcpdstate* s,
9384      ae_int_t i,
9385      ae_int_t j,
9386      double bndl,
9387      double bndu,
9388      ae_state *_state);
9389 void mcpdsetlc(mcpdstate* s,
9390      /* Real    */ ae_matrix* c,
9391      /* Integer */ ae_vector* ct,
9392      ae_int_t k,
9393      ae_state *_state);
9394 void mcpdsettikhonovregularizer(mcpdstate* s, double v, ae_state *_state);
9395 void mcpdsetprior(mcpdstate* s,
9396      /* Real    */ ae_matrix* pp,
9397      ae_state *_state);
9398 void mcpdsetpredictionweights(mcpdstate* s,
9399      /* Real    */ ae_vector* pw,
9400      ae_state *_state);
9401 void mcpdsolve(mcpdstate* s, ae_state *_state);
9402 void mcpdresults(mcpdstate* s,
9403      /* Real    */ ae_matrix* p,
9404      mcpdreport* rep,
9405      ae_state *_state);
9406 void _mcpdstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
9407 void _mcpdstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9408 void _mcpdstate_clear(void* _p);
9409 void _mcpdstate_destroy(void* _p);
9410 void _mcpdreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
9411 void _mcpdreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9412 void _mcpdreport_clear(void* _p);
9413 void _mcpdreport_destroy(void* _p);
9414 #endif
9415 #if defined(AE_COMPILE_MLPE) || !defined(AE_PARTIAL_BUILD)
9416 void mlpecreate0(ae_int_t nin,
9417      ae_int_t nout,
9418      ae_int_t ensemblesize,
9419      mlpensemble* ensemble,
9420      ae_state *_state);
9421 void mlpecreate1(ae_int_t nin,
9422      ae_int_t nhid,
9423      ae_int_t nout,
9424      ae_int_t ensemblesize,
9425      mlpensemble* ensemble,
9426      ae_state *_state);
9427 void mlpecreate2(ae_int_t nin,
9428      ae_int_t nhid1,
9429      ae_int_t nhid2,
9430      ae_int_t nout,
9431      ae_int_t ensemblesize,
9432      mlpensemble* ensemble,
9433      ae_state *_state);
9434 void mlpecreateb0(ae_int_t nin,
9435      ae_int_t nout,
9436      double b,
9437      double d,
9438      ae_int_t ensemblesize,
9439      mlpensemble* ensemble,
9440      ae_state *_state);
9441 void mlpecreateb1(ae_int_t nin,
9442      ae_int_t nhid,
9443      ae_int_t nout,
9444      double b,
9445      double d,
9446      ae_int_t ensemblesize,
9447      mlpensemble* ensemble,
9448      ae_state *_state);
9449 void mlpecreateb2(ae_int_t nin,
9450      ae_int_t nhid1,
9451      ae_int_t nhid2,
9452      ae_int_t nout,
9453      double b,
9454      double d,
9455      ae_int_t ensemblesize,
9456      mlpensemble* ensemble,
9457      ae_state *_state);
9458 void mlpecreater0(ae_int_t nin,
9459      ae_int_t nout,
9460      double a,
9461      double b,
9462      ae_int_t ensemblesize,
9463      mlpensemble* ensemble,
9464      ae_state *_state);
9465 void mlpecreater1(ae_int_t nin,
9466      ae_int_t nhid,
9467      ae_int_t nout,
9468      double a,
9469      double b,
9470      ae_int_t ensemblesize,
9471      mlpensemble* ensemble,
9472      ae_state *_state);
9473 void mlpecreater2(ae_int_t nin,
9474      ae_int_t nhid1,
9475      ae_int_t nhid2,
9476      ae_int_t nout,
9477      double a,
9478      double b,
9479      ae_int_t ensemblesize,
9480      mlpensemble* ensemble,
9481      ae_state *_state);
9482 void mlpecreatec0(ae_int_t nin,
9483      ae_int_t nout,
9484      ae_int_t ensemblesize,
9485      mlpensemble* ensemble,
9486      ae_state *_state);
9487 void mlpecreatec1(ae_int_t nin,
9488      ae_int_t nhid,
9489      ae_int_t nout,
9490      ae_int_t ensemblesize,
9491      mlpensemble* ensemble,
9492      ae_state *_state);
9493 void mlpecreatec2(ae_int_t nin,
9494      ae_int_t nhid1,
9495      ae_int_t nhid2,
9496      ae_int_t nout,
9497      ae_int_t ensemblesize,
9498      mlpensemble* ensemble,
9499      ae_state *_state);
9500 void mlpecreatefromnetwork(multilayerperceptron* network,
9501      ae_int_t ensemblesize,
9502      mlpensemble* ensemble,
9503      ae_state *_state);
9504 void mlpecopy(mlpensemble* ensemble1,
9505      mlpensemble* ensemble2,
9506      ae_state *_state);
9507 void mlperandomize(mlpensemble* ensemble, ae_state *_state);
9508 void mlpeproperties(mlpensemble* ensemble,
9509      ae_int_t* nin,
9510      ae_int_t* nout,
9511      ae_state *_state);
9512 ae_bool mlpeissoftmax(mlpensemble* ensemble, ae_state *_state);
9513 void mlpeprocess(mlpensemble* ensemble,
9514      /* Real    */ ae_vector* x,
9515      /* Real    */ ae_vector* y,
9516      ae_state *_state);
9517 void mlpeprocessi(mlpensemble* ensemble,
9518      /* Real    */ ae_vector* x,
9519      /* Real    */ ae_vector* y,
9520      ae_state *_state);
9521 void mlpeallerrorsx(mlpensemble* ensemble,
9522      /* Real    */ ae_matrix* densexy,
9523      sparsematrix* sparsexy,
9524      ae_int_t datasetsize,
9525      ae_int_t datasettype,
9526      /* Integer */ ae_vector* idx,
9527      ae_int_t subset0,
9528      ae_int_t subset1,
9529      ae_int_t subsettype,
9530      ae_shared_pool* buf,
9531      modelerrors* rep,
9532      ae_state *_state);
9533 void mlpeallerrorssparse(mlpensemble* ensemble,
9534      sparsematrix* xy,
9535      ae_int_t npoints,
9536      double* relcls,
9537      double* avgce,
9538      double* rms,
9539      double* avg,
9540      double* avgrel,
9541      ae_state *_state);
9542 double mlperelclserror(mlpensemble* ensemble,
9543      /* Real    */ ae_matrix* xy,
9544      ae_int_t npoints,
9545      ae_state *_state);
9546 double mlpeavgce(mlpensemble* ensemble,
9547      /* Real    */ ae_matrix* xy,
9548      ae_int_t npoints,
9549      ae_state *_state);
9550 double mlpermserror(mlpensemble* ensemble,
9551      /* Real    */ ae_matrix* xy,
9552      ae_int_t npoints,
9553      ae_state *_state);
9554 double mlpeavgerror(mlpensemble* ensemble,
9555      /* Real    */ ae_matrix* xy,
9556      ae_int_t npoints,
9557      ae_state *_state);
9558 double mlpeavgrelerror(mlpensemble* ensemble,
9559      /* Real    */ ae_matrix* xy,
9560      ae_int_t npoints,
9561      ae_state *_state);
9562 void mlpealloc(ae_serializer* s, mlpensemble* ensemble, ae_state *_state);
9563 void mlpeserialize(ae_serializer* s,
9564      mlpensemble* ensemble,
9565      ae_state *_state);
9566 void mlpeunserialize(ae_serializer* s,
9567      mlpensemble* ensemble,
9568      ae_state *_state);
9569 void _mlpensemble_init(void* _p, ae_state *_state, ae_bool make_automatic);
9570 void _mlpensemble_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9571 void _mlpensemble_clear(void* _p);
9572 void _mlpensemble_destroy(void* _p);
9573 #endif
9574 #if defined(AE_COMPILE_MLPTRAIN) || !defined(AE_PARTIAL_BUILD)
9575 void mlptrainlm(multilayerperceptron* network,
9576      /* Real    */ ae_matrix* xy,
9577      ae_int_t npoints,
9578      double decay,
9579      ae_int_t restarts,
9580      ae_int_t* info,
9581      mlpreport* rep,
9582      ae_state *_state);
9583 void mlptrainlbfgs(multilayerperceptron* network,
9584      /* Real    */ ae_matrix* xy,
9585      ae_int_t npoints,
9586      double decay,
9587      ae_int_t restarts,
9588      double wstep,
9589      ae_int_t maxits,
9590      ae_int_t* info,
9591      mlpreport* rep,
9592      ae_state *_state);
9593 void mlptraines(multilayerperceptron* network,
9594      /* Real    */ ae_matrix* trnxy,
9595      ae_int_t trnsize,
9596      /* Real    */ ae_matrix* valxy,
9597      ae_int_t valsize,
9598      double decay,
9599      ae_int_t restarts,
9600      ae_int_t* info,
9601      mlpreport* rep,
9602      ae_state *_state);
9603 void mlpkfoldcvlbfgs(multilayerperceptron* network,
9604      /* Real    */ ae_matrix* xy,
9605      ae_int_t npoints,
9606      double decay,
9607      ae_int_t restarts,
9608      double wstep,
9609      ae_int_t maxits,
9610      ae_int_t foldscount,
9611      ae_int_t* info,
9612      mlpreport* rep,
9613      mlpcvreport* cvrep,
9614      ae_state *_state);
9615 void mlpkfoldcvlm(multilayerperceptron* network,
9616      /* Real    */ ae_matrix* xy,
9617      ae_int_t npoints,
9618      double decay,
9619      ae_int_t restarts,
9620      ae_int_t foldscount,
9621      ae_int_t* info,
9622      mlpreport* rep,
9623      mlpcvreport* cvrep,
9624      ae_state *_state);
9625 void mlpkfoldcv(mlptrainer* s,
9626      multilayerperceptron* network,
9627      ae_int_t nrestarts,
9628      ae_int_t foldscount,
9629      mlpreport* rep,
9630      ae_state *_state);
9631 void mlpcreatetrainer(ae_int_t nin,
9632      ae_int_t nout,
9633      mlptrainer* s,
9634      ae_state *_state);
9635 void mlpcreatetrainercls(ae_int_t nin,
9636      ae_int_t nclasses,
9637      mlptrainer* s,
9638      ae_state *_state);
9639 void mlpsetdataset(mlptrainer* s,
9640      /* Real    */ ae_matrix* xy,
9641      ae_int_t npoints,
9642      ae_state *_state);
9643 void mlpsetsparsedataset(mlptrainer* s,
9644      sparsematrix* xy,
9645      ae_int_t npoints,
9646      ae_state *_state);
9647 void mlpsetdecay(mlptrainer* s, double decay, ae_state *_state);
9648 void mlpsetcond(mlptrainer* s,
9649      double wstep,
9650      ae_int_t maxits,
9651      ae_state *_state);
9652 void mlpsetalgobatch(mlptrainer* s, ae_state *_state);
9653 void mlptrainnetwork(mlptrainer* s,
9654      multilayerperceptron* network,
9655      ae_int_t nrestarts,
9656      mlpreport* rep,
9657      ae_state *_state);
9658 void mlpstarttraining(mlptrainer* s,
9659      multilayerperceptron* network,
9660      ae_bool randomstart,
9661      ae_state *_state);
9662 ae_bool mlpcontinuetraining(mlptrainer* s,
9663      multilayerperceptron* network,
9664      ae_state *_state);
9665 void mlpebagginglm(mlpensemble* ensemble,
9666      /* Real    */ ae_matrix* xy,
9667      ae_int_t npoints,
9668      double decay,
9669      ae_int_t restarts,
9670      ae_int_t* info,
9671      mlpreport* rep,
9672      mlpcvreport* ooberrors,
9673      ae_state *_state);
9674 void mlpebagginglbfgs(mlpensemble* ensemble,
9675      /* Real    */ ae_matrix* xy,
9676      ae_int_t npoints,
9677      double decay,
9678      ae_int_t restarts,
9679      double wstep,
9680      ae_int_t maxits,
9681      ae_int_t* info,
9682      mlpreport* rep,
9683      mlpcvreport* ooberrors,
9684      ae_state *_state);
9685 void mlpetraines(mlpensemble* ensemble,
9686      /* Real    */ ae_matrix* xy,
9687      ae_int_t npoints,
9688      double decay,
9689      ae_int_t restarts,
9690      ae_int_t* info,
9691      mlpreport* rep,
9692      ae_state *_state);
9693 void mlptrainensemblees(mlptrainer* s,
9694      mlpensemble* ensemble,
9695      ae_int_t nrestarts,
9696      mlpreport* rep,
9697      ae_state *_state);
9698 void _mlpreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
9699 void _mlpreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9700 void _mlpreport_clear(void* _p);
9701 void _mlpreport_destroy(void* _p);
9702 void _mlpcvreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
9703 void _mlpcvreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9704 void _mlpcvreport_clear(void* _p);
9705 void _mlpcvreport_destroy(void* _p);
9706 void _smlptrnsession_init(void* _p, ae_state *_state, ae_bool make_automatic);
9707 void _smlptrnsession_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9708 void _smlptrnsession_clear(void* _p);
9709 void _smlptrnsession_destroy(void* _p);
9710 void _mlpetrnsession_init(void* _p, ae_state *_state, ae_bool make_automatic);
9711 void _mlpetrnsession_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9712 void _mlpetrnsession_clear(void* _p);
9713 void _mlpetrnsession_destroy(void* _p);
9714 void _mlptrainer_init(void* _p, ae_state *_state, ae_bool make_automatic);
9715 void _mlptrainer_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9716 void _mlptrainer_clear(void* _p);
9717 void _mlptrainer_destroy(void* _p);
9718 void _mlpparallelizationcv_init(void* _p, ae_state *_state, ae_bool make_automatic);
9719 void _mlpparallelizationcv_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9720 void _mlpparallelizationcv_clear(void* _p);
9721 void _mlpparallelizationcv_destroy(void* _p);
9722 #endif
9723 #if defined(AE_COMPILE_CLUSTERING) || !defined(AE_PARTIAL_BUILD)
9724 void clusterizercreate(clusterizerstate* s, ae_state *_state);
9725 void clusterizersetpoints(clusterizerstate* s,
9726      /* Real    */ ae_matrix* xy,
9727      ae_int_t npoints,
9728      ae_int_t nfeatures,
9729      ae_int_t disttype,
9730      ae_state *_state);
9731 void clusterizersetdistances(clusterizerstate* s,
9732      /* Real    */ ae_matrix* d,
9733      ae_int_t npoints,
9734      ae_bool isupper,
9735      ae_state *_state);
9736 void clusterizersetahcalgo(clusterizerstate* s,
9737      ae_int_t algo,
9738      ae_state *_state);
9739 void clusterizersetkmeanslimits(clusterizerstate* s,
9740      ae_int_t restarts,
9741      ae_int_t maxits,
9742      ae_state *_state);
9743 void clusterizersetkmeansinit(clusterizerstate* s,
9744      ae_int_t initalgo,
9745      ae_state *_state);
9746 void clusterizersetseed(clusterizerstate* s,
9747      ae_int_t seed,
9748      ae_state *_state);
9749 void clusterizerrunahc(clusterizerstate* s,
9750      ahcreport* rep,
9751      ae_state *_state);
9752 void clusterizerrunkmeans(clusterizerstate* s,
9753      ae_int_t k,
9754      kmeansreport* rep,
9755      ae_state *_state);
9756 void clusterizergetdistances(/* Real    */ ae_matrix* xy,
9757      ae_int_t npoints,
9758      ae_int_t nfeatures,
9759      ae_int_t disttype,
9760      /* Real    */ ae_matrix* d,
9761      ae_state *_state);
9762 void clusterizergetdistancesbuf(apbuffers* buf,
9763      /* Real    */ ae_matrix* xy,
9764      ae_int_t npoints,
9765      ae_int_t nfeatures,
9766      ae_int_t disttype,
9767      /* Real    */ ae_matrix* d,
9768      ae_state *_state);
9769 void clusterizergetkclusters(ahcreport* rep,
9770      ae_int_t k,
9771      /* Integer */ ae_vector* cidx,
9772      /* Integer */ ae_vector* cz,
9773      ae_state *_state);
9774 void clusterizerseparatedbydist(ahcreport* rep,
9775      double r,
9776      ae_int_t* k,
9777      /* Integer */ ae_vector* cidx,
9778      /* Integer */ ae_vector* cz,
9779      ae_state *_state);
9780 void clusterizerseparatedbycorr(ahcreport* rep,
9781      double r,
9782      ae_int_t* k,
9783      /* Integer */ ae_vector* cidx,
9784      /* Integer */ ae_vector* cz,
9785      ae_state *_state);
9786 void kmeansinitbuf(kmeansbuffers* buf, ae_state *_state);
9787 void kmeansgenerateinternal(/* Real    */ ae_matrix* xy,
9788      ae_int_t npoints,
9789      ae_int_t nvars,
9790      ae_int_t k,
9791      ae_int_t initalgo,
9792      ae_int_t seed,
9793      ae_int_t maxits,
9794      ae_int_t restarts,
9795      ae_bool kmeansdbgnoits,
9796      ae_int_t* info,
9797      ae_int_t* iterationscount,
9798      /* Real    */ ae_matrix* ccol,
9799      ae_bool needccol,
9800      /* Real    */ ae_matrix* crow,
9801      ae_bool needcrow,
9802      /* Integer */ ae_vector* xyc,
9803      double* energy,
9804      kmeansbuffers* buf,
9805      ae_state *_state);
9806 void kmeansupdatedistances(/* Real    */ ae_matrix* xy,
9807      ae_int_t idx0,
9808      ae_int_t idx1,
9809      ae_int_t nvars,
9810      /* Real    */ ae_matrix* ct,
9811      ae_int_t cidx0,
9812      ae_int_t cidx1,
9813      /* Integer */ ae_vector* xyc,
9814      /* Real    */ ae_vector* xydist2,
9815      ae_shared_pool* bufferpool,
9816      ae_state *_state);
9817 ae_bool _trypexec_kmeansupdatedistances(/* Real    */ ae_matrix* xy,
9818     ae_int_t idx0,
9819     ae_int_t idx1,
9820     ae_int_t nvars,
9821     /* Real    */ ae_matrix* ct,
9822     ae_int_t cidx0,
9823     ae_int_t cidx1,
9824     /* Integer */ ae_vector* xyc,
9825     /* Real    */ ae_vector* xydist2,
9826     ae_shared_pool* bufferpool, ae_state *_state);
9827 void _kmeansbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic);
9828 void _kmeansbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9829 void _kmeansbuffers_clear(void* _p);
9830 void _kmeansbuffers_destroy(void* _p);
9831 void _clusterizerstate_init(void* _p, ae_state *_state, ae_bool make_automatic);
9832 void _clusterizerstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9833 void _clusterizerstate_clear(void* _p);
9834 void _clusterizerstate_destroy(void* _p);
9835 void _ahcreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
9836 void _ahcreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9837 void _ahcreport_clear(void* _p);
9838 void _ahcreport_destroy(void* _p);
9839 void _kmeansreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
9840 void _kmeansreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9841 void _kmeansreport_clear(void* _p);
9842 void _kmeansreport_destroy(void* _p);
9843 #endif
9844 #if defined(AE_COMPILE_DFOREST) || !defined(AE_PARTIAL_BUILD)
9845 void dfcreatebuffer(decisionforest* model,
9846      decisionforestbuffer* buf,
9847      ae_state *_state);
9848 void dfbuildercreate(decisionforestbuilder* s, ae_state *_state);
9849 void dfbuildersetdataset(decisionforestbuilder* s,
9850      /* Real    */ ae_matrix* xy,
9851      ae_int_t npoints,
9852      ae_int_t nvars,
9853      ae_int_t nclasses,
9854      ae_state *_state);
9855 void dfbuildersetrndvars(decisionforestbuilder* s,
9856      ae_int_t rndvars,
9857      ae_state *_state);
9858 void dfbuildersetrndvarsratio(decisionforestbuilder* s,
9859      double f,
9860      ae_state *_state);
9861 void dfbuildersetrndvarsauto(decisionforestbuilder* s, ae_state *_state);
9862 void dfbuildersetsubsampleratio(decisionforestbuilder* s,
9863      double f,
9864      ae_state *_state);
9865 void dfbuildersetseed(decisionforestbuilder* s,
9866      ae_int_t seedval,
9867      ae_state *_state);
9868 void dfbuildersetrdfalgo(decisionforestbuilder* s,
9869      ae_int_t algotype,
9870      ae_state *_state);
9871 void dfbuildersetrdfsplitstrength(decisionforestbuilder* s,
9872      ae_int_t splitstrength,
9873      ae_state *_state);
9874 double dfbuildergetprogress(decisionforestbuilder* s, ae_state *_state);
9875 double dfbuilderpeekprogress(decisionforestbuilder* s, ae_state *_state);
9876 void dfbuilderbuildrandomforest(decisionforestbuilder* s,
9877      ae_int_t ntrees,
9878      decisionforest* df,
9879      dfreport* rep,
9880      ae_state *_state);
9881 void dfprocess(decisionforest* df,
9882      /* Real    */ ae_vector* x,
9883      /* Real    */ ae_vector* y,
9884      ae_state *_state);
9885 void dfprocessi(decisionforest* df,
9886      /* Real    */ ae_vector* x,
9887      /* Real    */ ae_vector* y,
9888      ae_state *_state);
9889 double dfprocess0(decisionforest* model,
9890      /* Real    */ ae_vector* x,
9891      ae_state *_state);
9892 ae_int_t dfclassify(decisionforest* model,
9893      /* Real    */ ae_vector* x,
9894      ae_state *_state);
9895 void dftsprocess(decisionforest* df,
9896      decisionforestbuffer* buf,
9897      /* Real    */ ae_vector* x,
9898      /* Real    */ ae_vector* y,
9899      ae_state *_state);
9900 double dfrelclserror(decisionforest* df,
9901      /* Real    */ ae_matrix* xy,
9902      ae_int_t npoints,
9903      ae_state *_state);
9904 double dfavgce(decisionforest* df,
9905      /* Real    */ ae_matrix* xy,
9906      ae_int_t npoints,
9907      ae_state *_state);
9908 double dfrmserror(decisionforest* df,
9909      /* Real    */ ae_matrix* xy,
9910      ae_int_t npoints,
9911      ae_state *_state);
9912 double dfavgerror(decisionforest* df,
9913      /* Real    */ ae_matrix* xy,
9914      ae_int_t npoints,
9915      ae_state *_state);
9916 double dfavgrelerror(decisionforest* df,
9917      /* Real    */ ae_matrix* xy,
9918      ae_int_t npoints,
9919      ae_state *_state);
9920 void dfcopy(decisionforest* df1, decisionforest* df2, ae_state *_state);
9921 void dfalloc(ae_serializer* s, decisionforest* forest, ae_state *_state);
9922 void dfserialize(ae_serializer* s,
9923      decisionforest* forest,
9924      ae_state *_state);
9925 void dfunserialize(ae_serializer* s,
9926      decisionforest* forest,
9927      ae_state *_state);
9928 void dfbuildrandomdecisionforest(/* Real    */ ae_matrix* xy,
9929      ae_int_t npoints,
9930      ae_int_t nvars,
9931      ae_int_t nclasses,
9932      ae_int_t ntrees,
9933      double r,
9934      ae_int_t* info,
9935      decisionforest* df,
9936      dfreport* rep,
9937      ae_state *_state);
9938 void dfbuildrandomdecisionforestx1(/* Real    */ ae_matrix* xy,
9939      ae_int_t npoints,
9940      ae_int_t nvars,
9941      ae_int_t nclasses,
9942      ae_int_t ntrees,
9943      ae_int_t nrndvars,
9944      double r,
9945      ae_int_t* info,
9946      decisionforest* df,
9947      dfreport* rep,
9948      ae_state *_state);
9949 void dfbuildinternal(/* Real    */ ae_matrix* xy,
9950      ae_int_t npoints,
9951      ae_int_t nvars,
9952      ae_int_t nclasses,
9953      ae_int_t ntrees,
9954      ae_int_t samplesize,
9955      ae_int_t nfeatures,
9956      ae_int_t flags,
9957      ae_int_t* info,
9958      decisionforest* df,
9959      dfreport* rep,
9960      ae_state *_state);
9961 void _decisionforestbuilder_init(void* _p, ae_state *_state, ae_bool make_automatic);
9962 void _decisionforestbuilder_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9963 void _decisionforestbuilder_clear(void* _p);
9964 void _decisionforestbuilder_destroy(void* _p);
9965 void _dfworkbuf_init(void* _p, ae_state *_state, ae_bool make_automatic);
9966 void _dfworkbuf_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9967 void _dfworkbuf_clear(void* _p);
9968 void _dfworkbuf_destroy(void* _p);
9969 void _dfvotebuf_init(void* _p, ae_state *_state, ae_bool make_automatic);
9970 void _dfvotebuf_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9971 void _dfvotebuf_clear(void* _p);
9972 void _dfvotebuf_destroy(void* _p);
9973 void _dftreebuf_init(void* _p, ae_state *_state, ae_bool make_automatic);
9974 void _dftreebuf_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9975 void _dftreebuf_clear(void* _p);
9976 void _dftreebuf_destroy(void* _p);
9977 void _decisionforestbuffer_init(void* _p, ae_state *_state, ae_bool make_automatic);
9978 void _decisionforestbuffer_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9979 void _decisionforestbuffer_clear(void* _p);
9980 void _decisionforestbuffer_destroy(void* _p);
9981 void _decisionforest_init(void* _p, ae_state *_state, ae_bool make_automatic);
9982 void _decisionforest_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9983 void _decisionforest_clear(void* _p);
9984 void _decisionforest_destroy(void* _p);
9985 void _dfreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
9986 void _dfreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9987 void _dfreport_clear(void* _p);
9988 void _dfreport_destroy(void* _p);
9989 void _dfinternalbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic);
9990 void _dfinternalbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
9991 void _dfinternalbuffers_clear(void* _p);
9992 void _dfinternalbuffers_destroy(void* _p);
9993 #endif
9994 #if defined(AE_COMPILE_KNN) || !defined(AE_PARTIAL_BUILD)
9995 void knncreatebuffer(knnmodel* model, knnbuffer* buf, ae_state *_state);
9996 void knnbuildercreate(knnbuilder* s, ae_state *_state);
9997 void knnbuildersetdatasetreg(knnbuilder* s,
9998      /* Real    */ ae_matrix* xy,
9999      ae_int_t npoints,
10000      ae_int_t nvars,
10001      ae_int_t nout,
10002      ae_state *_state);
10003 void knnbuildersetdatasetcls(knnbuilder* s,
10004      /* Real    */ ae_matrix* xy,
10005      ae_int_t npoints,
10006      ae_int_t nvars,
10007      ae_int_t nclasses,
10008      ae_state *_state);
10009 void knnbuildersetnorm(knnbuilder* s, ae_int_t nrmtype, ae_state *_state);
10010 void knnbuilderbuildknnmodel(knnbuilder* s,
10011      ae_int_t k,
10012      double eps,
10013      knnmodel* model,
10014      knnreport* rep,
10015      ae_state *_state);
10016 void knnrewritekeps(knnmodel* model,
10017      ae_int_t k,
10018      double eps,
10019      ae_state *_state);
10020 void knnprocess(knnmodel* model,
10021      /* Real    */ ae_vector* x,
10022      /* Real    */ ae_vector* y,
10023      ae_state *_state);
10024 double knnprocess0(knnmodel* model,
10025      /* Real    */ ae_vector* x,
10026      ae_state *_state);
10027 ae_int_t knnclassify(knnmodel* model,
10028      /* Real    */ ae_vector* x,
10029      ae_state *_state);
10030 void knnprocessi(knnmodel* model,
10031      /* Real    */ ae_vector* x,
10032      /* Real    */ ae_vector* y,
10033      ae_state *_state);
10034 void knntsprocess(knnmodel* model,
10035      knnbuffer* buf,
10036      /* Real    */ ae_vector* x,
10037      /* Real    */ ae_vector* y,
10038      ae_state *_state);
10039 double knnrelclserror(knnmodel* model,
10040      /* Real    */ ae_matrix* xy,
10041      ae_int_t npoints,
10042      ae_state *_state);
10043 double knnavgce(knnmodel* model,
10044      /* Real    */ ae_matrix* xy,
10045      ae_int_t npoints,
10046      ae_state *_state);
10047 double knnrmserror(knnmodel* model,
10048      /* Real    */ ae_matrix* xy,
10049      ae_int_t npoints,
10050      ae_state *_state);
10051 double knnavgerror(knnmodel* model,
10052      /* Real    */ ae_matrix* xy,
10053      ae_int_t npoints,
10054      ae_state *_state);
10055 double knnavgrelerror(knnmodel* model,
10056      /* Real    */ ae_matrix* xy,
10057      ae_int_t npoints,
10058      ae_state *_state);
10059 void knnallerrors(knnmodel* model,
10060      /* Real    */ ae_matrix* xy,
10061      ae_int_t npoints,
10062      knnreport* rep,
10063      ae_state *_state);
10064 void knnalloc(ae_serializer* s, knnmodel* model, ae_state *_state);
10065 void knnserialize(ae_serializer* s, knnmodel* model, ae_state *_state);
10066 void knnunserialize(ae_serializer* s, knnmodel* model, ae_state *_state);
10067 void _knnbuffer_init(void* _p, ae_state *_state, ae_bool make_automatic);
10068 void _knnbuffer_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10069 void _knnbuffer_clear(void* _p);
10070 void _knnbuffer_destroy(void* _p);
10071 void _knnbuilder_init(void* _p, ae_state *_state, ae_bool make_automatic);
10072 void _knnbuilder_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10073 void _knnbuilder_clear(void* _p);
10074 void _knnbuilder_destroy(void* _p);
10075 void _knnmodel_init(void* _p, ae_state *_state, ae_bool make_automatic);
10076 void _knnmodel_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10077 void _knnmodel_clear(void* _p);
10078 void _knnmodel_destroy(void* _p);
10079 void _knnreport_init(void* _p, ae_state *_state, ae_bool make_automatic);
10080 void _knnreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic);
10081 void _knnreport_clear(void* _p);
10082 void _knnreport_destroy(void* _p);
10083 #endif
10084 #if defined(AE_COMPILE_DATACOMP) || !defined(AE_PARTIAL_BUILD)
10085 void kmeansgenerate(/* Real    */ ae_matrix* xy,
10086      ae_int_t npoints,
10087      ae_int_t nvars,
10088      ae_int_t k,
10089      ae_int_t restarts,
10090      ae_int_t* info,
10091      /* Real    */ ae_matrix* c,
10092      /* Integer */ ae_vector* xyc,
10093      ae_state *_state);
10094 #endif
10095 
10096 }
10097 #endif
10098 
10099