1 #ifndef FANN_CPP_H_INCLUDED
2 #define FANN_CPP_H_INCLUDED
3 
4 /*
5  *
6  *  Fast Artificial Neural Network (fann) C++ Wrapper
7  *  Copyright (C) 2004-2006 created by freegoldbar (at) yahoo dot com
8  *
9  *  This wrapper is free software; you can redistribute it and/or
10  *  modify it under the terms of the GNU Lesser General Public
11  *  License as published by the Free Software Foundation; either
12  *  version 2.1 of the License, or (at your option) any later version.
13  *
14  *  This wrapper is distributed in the hope that it will be useful,
15  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  *  Lesser General Public License for more details.
18  *
19  *  You should have received a copy of the GNU Lesser General Public
20  *  License along with this library; if not, write to the Free Software
21  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
22  *
23  */
24 
25 /*
26  *  Title: FANN Wrapper for C++
27  *
28  *  Overview:
29  *
30  *  The Fann Wrapper for C++ provides two classes: <neural_net>
31  *  and <training_data>. To use the wrapper include
32  *  doublefann.h, floatfann.h or fixedfann.h before the
33  *  fann_cpp.h header file. To get started see xor_sample.cpp
34  *  in the examples directory. The license is LGPL. Copyright (C)
35  *  2004-2006 created by <freegoldbar@yahoo.com>.
36  *
37  *  Note:  Notes and differences from C API
38  *
39  *  -  The Fann Wrapper for C++ is a minimal wrapper without use of
40  *       templates or exception handling for efficient use in any environment.
41  *       Benefits include stricter type checking, simpler memory
42  *       management and possibly code completion in program editor.
43  *  -  Method names are the same as the function names in the C
44  *       API except the fann_ prefix has been removed. Enums in the
45  *       namespace are similarly defined without the FANN_ prefix.
46  *  -  The arguments to the methods are the same as the C API
47  *       except that the struct fann *ann/struct fann_train_data *data
48  *       arguments are encapsulated so they are not present in the
49  *       method signatures or are translated into class references.
50  *  -  The various create methods return a boolean set to true to
51  *       indicate that the neural network was created, false otherwise.
52  *       The same goes for the read_train_from_file method.
53  *  -  The neural network and training data is automatically cleaned
54  *       up in the destructors and create/read methods.
55  *  -  To make the destructors virtual define USE_VIRTUAL_DESTRUCTOR
56  *       before including the header file.
57  *  -  Additional methods are available on the training_data class to
58  *       give access to the underlying training data. They are get_input,
59  *       get_output and set_train_data. Finally fann_duplicate_train_data
60  *       has been replaced by a copy constructor.
61  *
62  *  Note: Changes
63  *
64  *  Version 2.2.0:
65  *     - General update to fann C library 2.2.0 with support for new functionality
66  *
67  *  Version 2.1.0:
68  *     - General update to fann C library 2.1.0 with support for new functionality
69  *     - Due to changes in the C API the C++ API is not fully backward compatible:
70  *        The create methods have changed names and parameters.
71  *        The training callback function has different parameters and a set_callback.
72  *        Some <training_data> methods have updated names.
73  *        Get activation function and steepness is available for neurons, not layers.
74  *     - Extensions are now part of fann so there is no fann_extensions.h
75  *
76  *  Version 1.2.0:
77  *     - Changed char pointers to const std::string references
78  *     - Added const_casts where the C API required it
79  *     - Initialized enums from the C enums instead of numeric constants
80  *     - Added a method set_train_data that copies and allocates training
81  *     - data in a way that is compatible with the way the C API deallocates
82  *     - the data thus making it possible to change training data.
83  *     - The get_rprop_increase_factor method did not return its value
84  *
85  *  Version 1.0.0:
86  *     - Initial version
87  *
88  */
89 
90 #include <stdarg.h>
91 #include <string>
92 
93 /* Namespace: FANN
94     The FANN namespace groups the C++ wrapper definitions */
95 namespace FANN
96 {
97     /* Enum: error_function_enum
98 	    Error function used during training.
99 
100 	    ERRORFUNC_LINEAR - Standard linear error function.
101 	    ERRORFUNC_TANH - Tanh error function, usually better
102 		    but can require a lower learning rate. This error function agressively targets outputs that
103 		    differ much from the desired, while not targetting outputs that only differ a little that much.
104 		    This activation function is not recommended for cascade training and incremental training.
105 
106 	    See also:
107 		    <neural_net::set_train_error_function>, <neural_net::get_train_error_function>
108     */
109     enum error_function_enum {
110         ERRORFUNC_LINEAR = FANN_ERRORFUNC_LINEAR,
111         ERRORFUNC_TANH
112     };
113 
114     /* Enum: stop_function_enum
115 	    Stop criteria used during training.
116 
117 	    STOPFUNC_MSE - Stop criteria is Mean Square Error (MSE) value.
118 	    STOPFUNC_BIT - Stop criteria is number of bits that fail. The number of bits; means the
119 		    number of output neurons which differ more than the bit fail limit
120 		    (see <neural_net::get_bit_fail_limit>, <neural_net::set_bit_fail_limit>).
121 		    The bits are counted in all of the training data, so this number can be higher than
122 		    the number of training data.
123 
124 	    See also:
125 		    <neural_net::set_train_stop_function>, <neural_net::get_train_stop_function>
126     */
127     enum stop_function_enum
128     {
129 	    STOPFUNC_MSE = FANN_STOPFUNC_MSE,
130 	    STOPFUNC_BIT
131     };
132 
133     /* Enum: training_algorithm_enum
134 	    The Training algorithms used when training on <training_data> with functions like
135 	    <neural_net::train_on_data> or <neural_net::train_on_file>. The incremental training
136         looks alters the weights after each time it is presented an input pattern, while batch
137         only alters the weights once after it has been presented to all the patterns.
138 
139 	    TRAIN_INCREMENTAL -  Standard backpropagation algorithm, where the weights are
140 		    updated after each training pattern. This means that the weights are updated many
141 		    times during a single epoch. For this reason some problems, will train very fast with
142 		    this algorithm, while other more advanced problems will not train very well.
143 	    TRAIN_BATCH -  Standard backpropagation algorithm, where the weights are updated after
144 		    calculating the mean square error for the whole training set. This means that the weights
145 		    are only updated once during a epoch. For this reason some problems, will train slower with
146 		    this algorithm. But since the mean square error is calculated more correctly than in
147 		    incremental training, some problems will reach a better solutions with this algorithm.
148 	    TRAIN_RPROP - A more advanced batch training algorithm which achieves good results
149 		    for many problems. The RPROP training algorithm is adaptive, and does therefore not
150 		    use the learning_rate. Some other parameters can however be set to change the way the
151 		    RPROP algorithm works, but it is only recommended for users with insight in how the RPROP
152 		    training algorithm works. The RPROP training algorithm is described by
153 		    [Riedmiller and Braun, 1993], but the actual learning algorithm used here is the
154 		    iRPROP- training algorithm which is described by [Igel and Husken, 2000] which
155     	    is an variety of the standard RPROP training algorithm.
156 	    TRAIN_QUICKPROP - A more advanced batch training algorithm which achieves good results
157 		    for many problems. The quickprop training algorithm uses the learning_rate parameter
158 		    along with other more advanced parameters, but it is only recommended to change these
159 		    advanced parameters, for users with insight in how the quickprop training algorithm works.
160 		    The quickprop training algorithm is described by [Fahlman, 1988].
161 
162 	    See also:
163 		    <neural_net::set_training_algorithm>, <neural_net::get_training_algorithm>
164     */
165     enum training_algorithm_enum {
166         TRAIN_INCREMENTAL = FANN_TRAIN_INCREMENTAL,
167         TRAIN_BATCH,
168         TRAIN_RPROP,
169         TRAIN_QUICKPROP,
170 	TRAIN_SARPROP
171     };
172 
173     /* Enum: activation_function_enum
174 
175 	    The activation functions used for the neurons during training. The activation functions
176 	    can either be defined for a group of neurons by <neural_net::set_activation_function_hidden>
177         and <neural_net::set_activation_function_output> or it can be defined for a single neuron by
178         <neural_net::set_activation_function>.
179 
180 	    The steepness of an activation function is defined in the same way by
181 	    <neural_net::set_activation_steepness_hidden>, <neural_net::set_activation_steepness_output>
182         and <neural_net::set_activation_steepness>.
183 
184        The functions are described with functions where:
185        * x is the input to the activation function,
186        * y is the output,
187        * s is the steepness and
188        * d is the derivation.
189 
190        FANN_LINEAR - Linear activation function.
191          * span: -inf < y < inf
192 	     * y = x*s, d = 1*s
193 	     * Can NOT be used in fixed point.
194 
195        FANN_THRESHOLD - Threshold activation function.
196 	     * x < 0 -> y = 0, x >= 0 -> y = 1
197 	     * Can NOT be used during training.
198 
199        FANN_THRESHOLD_SYMMETRIC - Threshold activation function.
200 	     * x < 0 -> y = 0, x >= 0 -> y = 1
201 	     * Can NOT be used during training.
202 
203        FANN_SIGMOID - Sigmoid activation function.
204 	     * One of the most used activation functions.
205 	     * span: 0 < y < 1
206 	     * y = 1/(1 + exp(-2*s*x))
207 	     * d = 2*s*y*(1 - y)
208 
209        FANN_SIGMOID_STEPWISE - Stepwise linear approximation to sigmoid.
210 	     * Faster than sigmoid but a bit less precise.
211 
212        FANN_SIGMOID_SYMMETRIC - Symmetric sigmoid activation function, aka. tanh.
213 	     * One of the most used activation functions.
214 	     * span: -1 < y < 1
215 	     * y = tanh(s*x) = 2/(1 + exp(-2*s*x)) - 1
216 	     * d = s*(1-(y*y))
217 
218        FANN_SIGMOID_SYMMETRIC - Stepwise linear approximation to symmetric sigmoid.
219 	     * Faster than symmetric sigmoid but a bit less precise.
220 
221        FANN_GAUSSIAN - Gaussian activation function.
222 	     * 0 when x = -inf, 1 when x = 0 and 0 when x = inf
223 	     * span: 0 < y < 1
224 	     * y = exp(-x*s*x*s)
225 	     * d = -2*x*s*y*s
226 
227        FANN_GAUSSIAN_SYMMETRIC - Symmetric gaussian activation function.
228 	     * -1 when x = -inf, 1 when x = 0 and 0 when x = inf
229 	     * span: -1 < y < 1
230 	     * y = exp(-x*s*x*s)*2-1
231 	     * d = -2*x*s*(y+1)*s
232 
233        FANN_ELLIOT - Fast (sigmoid like) activation function defined by David Elliott
234 	     * span: 0 < y < 1
235 	     * y = ((x*s) / 2) / (1 + |x*s|) + 0.5
236 	     * d = s*1/(2*(1+|x*s|)*(1+|x*s|))
237 
238        FANN_ELLIOT_SYMMETRIC - Fast (symmetric sigmoid like) activation function defined by David Elliott
239 	     * span: -1 < y < 1
240 	     * y = (x*s) / (1 + |x*s|)
241 	     * d = s*1/((1+|x*s|)*(1+|x*s|))
242 
243 	    FANN_LINEAR_PIECE - Bounded linear activation function.
244 	     * span: 0 < y < 1
245 	     * y = x*s, d = 1*s
246 
247 	    FANN_LINEAR_PIECE_SYMMETRIC - Bounded Linear activation function.
248 	     * span: -1 < y < 1
249 	     * y = x*s, d = 1*s
250 
251         FANN_SIN_SYMMETRIC - Periodical sinus activation function.
252          * span: -1 <= y <= 1
253          * y = sin(x*s)
254          * d = s*cos(x*s)
255 
256         FANN_COS_SYMMETRIC - Periodical cosinus activation function.
257          * span: -1 <= y <= 1
258          * y = cos(x*s)
259          * d = s*-sin(x*s)
260 
261 	    See also:
262 		    <neural_net::set_activation_function_hidden>,
263 		    <neural_net::set_activation_function_output>
264     */
265     enum activation_function_enum {
266         LINEAR = FANN_LINEAR,
267         THRESHOLD,
268         THRESHOLD_SYMMETRIC,
269         SIGMOID,
270         SIGMOID_STEPWISE,
271         SIGMOID_SYMMETRIC,
272         SIGMOID_SYMMETRIC_STEPWISE,
273         GAUSSIAN,
274         GAUSSIAN_SYMMETRIC,
275         GAUSSIAN_STEPWISE,
276         ELLIOT,
277         ELLIOT_SYMMETRIC,
278         LINEAR_PIECE,
279         LINEAR_PIECE_SYMMETRIC,
280 	    SIN_SYMMETRIC,
281 	    COS_SYMMETRIC
282     };
283 
284     /* Enum: network_type_enum
285 
286         Definition of network types used by <neural_net::get_network_type>
287 
288         LAYER - Each layer only has connections to the next layer
289         SHORTCUT - Each layer has connections to all following layers
290 
291        See Also:
292           <neural_net::get_network_type>, <fann_get_network_type>
293 
294        This enumeration appears in FANN >= 2.1.0
295     */
296     enum network_type_enum
297     {
298         LAYER = FANN_NETTYPE_LAYER,
299         SHORTCUT
300     };
301 
302     /* Type: connection
303 
304         Describes a connection between two neurons and its weight
305 
306         from_neuron - Unique number used to identify source neuron
307         to_neuron - Unique number used to identify destination neuron
308         weight - The numerical value of the weight
309 
310         See Also:
311             <neural_net::get_connection_array>, <neural_net::set_weight_array>
312 
313        This structure appears in FANN >= 2.1.0
314     */
315     typedef struct fann_connection connection;
316 
317     /* Forward declaration of class neural_net and training_data */
318     class neural_net;
319     class training_data;
320 
321     /* Type: callback_type
322        This callback function can be called during training when using <neural_net::train_on_data>,
323        <neural_net::train_on_file> or <neural_net::cascadetrain_on_data>.
324 
325         >typedef int (*callback_type) (neural_net &net, training_data &train,
326         >    unsigned int max_epochs, unsigned int epochs_between_reports,
327         >    float desired_error, unsigned int epochs, void *user_data);
328 
329 	    The callback can be set by using <neural_net::set_callback> and is very usefull for doing custom
330 	    things during training. It is recommended to use this function when implementing custom
331 	    training procedures, or when visualizing the training in a GUI etc. The parameters which the
332 	    callback function takes is the parameters given to the <neural_net::train_on_data>, plus an epochs
333 	    parameter which tells how many epochs the training have taken so far.
334 
335 	    The callback function should return an integer, if the callback function returns -1, the training
336 	    will terminate.
337 
338 	    Example of a callback function that prints information to cout:
339             >int print_callback(FANN::neural_net &net, FANN::training_data &train,
340             >    unsigned int max_epochs, unsigned int epochs_between_reports,
341             >    float desired_error, unsigned int epochs, void *user_data)
342             >{
343             >    cout << "Epochs     " << setw(8) << epochs << ". "
344             >         << "Current Error: " << left << net.get_MSE() << right << endl;
345             >    return 0;
346             >}
347 
348 	    See also:
349 		    <neural_net::set_callback>, <fann_callback_type>
350      */
351     typedef int (*callback_type) (neural_net &net, training_data &train,
352         unsigned int max_epochs, unsigned int epochs_between_reports,
353         float desired_error, unsigned int epochs, void *user_data);
354 
355     /*************************************************************************/
356 
357     /* Class: training_data
358 
359         Encapsulation of a training data set <struct fann_train_data> and
360         associated C API functions.
361     */
362     class training_data
363     {
364     public:
365         /* Constructor: training_data
366 
367             Default constructor creates an empty neural net.
368             Use <read_train_from_file>, <set_train_data> or <create_train_from_callback> to initialize.
369         */
training_data()370         training_data() : train_data(NULL)
371         {
372         }
373 
374         /* Constructor: training_data
375 
376             Copy constructor constructs a copy of the training data.
377             Corresponds to the C API <fann_duplicate_train_data> function.
378         */
training_data(const training_data & data)379         training_data(const training_data &data)
380         {
381             destroy_train();
382             if (data.train_data != NULL)
383             {
384                 train_data = fann_duplicate_train_data(data.train_data);
385             }
386         }
387 
388         /* Destructor: ~training_data
389 
390             Provides automatic cleanup of data.
391             Define USE_VIRTUAL_DESTRUCTOR if you need the destructor to be virtual.
392 
393             See also:
394                 <destroy>
395         */
396 #ifdef USE_VIRTUAL_DESTRUCTOR
397         virtual
398 #endif
~training_data()399         ~training_data()
400         {
401             destroy_train();
402         }
403 
404         /* Method: destroy
405 
406             Destructs the training data. Called automatically by the destructor.
407 
408             See also:
409                 <~training_data>
410         */
destroy_train()411         void destroy_train()
412         {
413             if (train_data != NULL)
414             {
415                 fann_destroy_train(train_data);
416                 train_data = NULL;
417             }
418         }
419 
420         /* Method: read_train_from_file
421            Reads a file that stores training data.
422 
423            The file must be formatted like:
424            >num_train_data num_input num_output
425            >inputdata seperated by space
426            >outputdata seperated by space
427            >
428            >.
429            >.
430            >.
431            >
432            >inputdata seperated by space
433            >outputdata seperated by space
434 
435            See also:
436    	        <neural_net::train_on_data>, <save_train>, <fann_read_train_from_file>
437 
438             This function appears in FANN >= 1.0.0
439         */
read_train_from_file(const std::string & filename)440         bool read_train_from_file(const std::string &filename)
441         {
442             destroy_train();
443             train_data = fann_read_train_from_file(filename.c_str());
444             return (train_data != NULL);
445         }
446 
447         /* Method: save_train
448 
449            Save the training structure to a file, with the format as specified in <read_train_from_file>
450 
451            Return:
452            The function returns true on success and false on failure.
453 
454            See also:
455    	        <read_train_from_file>, <save_train_to_fixed>, <fann_save_train>
456 
457            This function appears in FANN >= 1.0.0.
458          */
save_train(const std::string & filename)459         bool save_train(const std::string &filename)
460         {
461             if (train_data == NULL)
462             {
463                 return false;
464             }
465             if (fann_save_train(train_data, filename.c_str()) == -1)
466             {
467                 return false;
468             }
469             return true;
470         }
471 
472         /* Method: save_train_to_fixed
473 
474            Saves the training structure to a fixed point data file.
475 
476            This function is very usefull for testing the quality of a fixed point network.
477 
478            Return:
479            The function returns true on success and false on failure.
480 
481            See also:
482    	        <save_train>, <fann_save_train_to_fixed>
483 
484            This function appears in FANN >= 1.0.0.
485          */
save_train_to_fixed(const std::string & filename,unsigned int decimal_point)486         bool save_train_to_fixed(const std::string &filename, unsigned int decimal_point)
487         {
488             if (train_data == NULL)
489             {
490                 return false;
491             }
492             if (fann_save_train_to_fixed(train_data, filename.c_str(), decimal_point) == -1)
493             {
494                 return false;
495             }
496             return true;
497         }
498 
499         /* Method: shuffle_train_data
500 
501            Shuffles training data, randomizing the order.
502            This is recommended for incremental training, while it have no influence during batch training.
503 
504            This function appears in FANN >= 1.1.0.
505          */
shuffle_train_data()506         void shuffle_train_data()
507         {
508             if (train_data != NULL)
509             {
510                 fann_shuffle_train_data(train_data);
511             }
512         }
513 
514         /* Method: merge_train_data
515 
516            Merges the data into the data contained in the <training_data>.
517 
518            This function appears in FANN >= 1.1.0.
519          */
merge_train_data(const training_data & data)520         void merge_train_data(const training_data &data)
521         {
522             fann_train_data *new_data = fann_merge_train_data(train_data, data.train_data);
523             if (new_data != NULL)
524             {
525                 destroy_train();
526                 train_data = new_data;
527             }
528         }
529 
530         /* Method: length_train_data
531 
532            Returns the number of training patterns in the <training_data>.
533 
534            See also:
535            <num_input_train_data>, <num_output_train_data>, <fann_length_train_data>
536 
537            This function appears in FANN >= 2.0.0.
538          */
length_train_data()539         unsigned int length_train_data()
540         {
541             if (train_data == NULL)
542             {
543                 return 0;
544             }
545             else
546             {
547                 return fann_length_train_data(train_data);
548             }
549         }
550 
551         /* Method: num_input_train_data
552 
553            Returns the number of inputs in each of the training patterns in the <training_data>.
554 
555            See also:
556            <num_output_train_data>, <length_train_data>, <fann_num_input_train_data>
557 
558            This function appears in FANN >= 2.0.0.
559          */
num_input_train_data()560         unsigned int num_input_train_data()
561         {
562             if (train_data == NULL)
563             {
564                 return 0;
565             }
566             else
567             {
568                 return fann_num_input_train_data(train_data);
569             }
570         }
571 
572         /* Method: num_output_train_data
573 
574            Returns the number of outputs in each of the training patterns in the <struct fann_train_data>.
575 
576            See also:
577            <num_input_train_data>, <length_train_data>, <fann_num_output_train_data>
578 
579            This function appears in FANN >= 2.0.0.
580          */
num_output_train_data()581         unsigned int num_output_train_data()
582         {
583             if (train_data == NULL)
584             {
585                 return 0;
586             }
587             else
588             {
589                 return fann_num_output_train_data(train_data);
590             }
591         }
592 
593         /* Grant access to the encapsulated data since many situations
594             and applications creates the data from sources other than files
595             or uses the training data for testing and related functions */
596 
597         /* Method: get_input
598 
599             Returns:
600                 A pointer to the array of input training data
601 
602             See also:
603                 <get_output>, <set_train_data>
604         */
get_input()605         fann_type **get_input()
606         {
607             if (train_data == NULL)
608             {
609                 return NULL;
610             }
611             else
612             {
613                 return train_data->input;
614             }
615         }
616 
617         /* Method: get_output
618 
619             Returns:
620                 A pointer to the array of output training data
621 
622             See also:
623                 <get_input>, <set_train_data>
624         */
get_output()625         fann_type **get_output()
626         {
627             if (train_data == NULL)
628             {
629                 return NULL;
630             }
631             else
632             {
633                 return train_data->output;
634             }
635         }
636 
637         /* Method: set_train_data
638 
639             Set the training data to the input and output data provided.
640 
641             A copy of the data is made so there are no restrictions on the
642             allocation of the input/output data and the caller is responsible
643             for the deallocation of the data pointed to by input and output.
644 
645            Parameters:
646              num_data      - The number of training data
647              num_input     - The number of inputs per training data
648              num_output    - The number of ouputs per training data
649              input      - The set of inputs (a pointer to an array of pointers to arrays of floating point data)
650              output     - The set of desired outputs (a pointer to an array of pointers to arrays of floating point data)
651 
652             See also:
653                 <get_input>, <get_output>
654         */
set_train_data(unsigned int num_data,unsigned int num_input,fann_type ** input,unsigned int num_output,fann_type ** output)655         void set_train_data(unsigned int num_data,
656             unsigned int num_input, fann_type **input,
657             unsigned int num_output, fann_type **output)
658         {
659             // Uses the allocation method used in fann
660             struct fann_train_data *data =
661                 (struct fann_train_data *)malloc(sizeof(struct fann_train_data));
662             data->input = (fann_type **)calloc(num_data, sizeof(fann_type *));
663             data->output = (fann_type **)calloc(num_data, sizeof(fann_type *));
664 
665             data->num_data = num_data;
666             data->num_input = num_input;
667             data->num_output = num_output;
668 
669         	fann_type *data_input = (fann_type *)calloc(num_input*num_data, sizeof(fann_type));
670         	fann_type *data_output = (fann_type *)calloc(num_output*num_data, sizeof(fann_type));
671 
672             for (unsigned int i = 0; i < num_data; ++i)
673             {
674                 data->input[i] = data_input;
675                 data_input += num_input;
676                 for (unsigned int j = 0; j < num_input; ++j)
677                 {
678                     data->input[i][j] = input[i][j];
679                 }
680                 data->output[i] = data_output;
681 		        data_output += num_output;
682                 for (unsigned int j = 0; j < num_output; ++j)
683                 {
684                     data->output[i][j] = output[i][j];
685                 }
686             }
687             set_train_data(data);
688         }
689 
690 private:
691         /* Set the training data to the struct fann_training_data pointer.
692             The struct has to be allocated with malloc to be compatible
693             with fann_destroy. */
set_train_data(struct fann_train_data * data)694         void set_train_data(struct fann_train_data *data)
695         {
696             destroy_train();
697             train_data = data;
698         }
699 
700 public:
701         /*********************************************************************/
702 
703         /* Method: create_train_from_callback
704            Creates the training data struct from a user supplied function.
705            As the training data are numerable (data 1, data 2...), the user must write
706            a function that receives the number of the training data set (input,output)
707            and returns the set.
708 
709            Parameters:
710              num_data      - The number of training data
711              num_input     - The number of inputs per training data
712              num_output    - The number of ouputs per training data
713              user_function - The user suplied function
714 
715            Parameters for the user function:
716              num        - The number of the training data set
717              num_input  - The number of inputs per training data
718              num_output - The number of ouputs per training data
719              input      - The set of inputs
720              output     - The set of desired outputs
721 
722            See also:
723              <training_data::read_train_from_file>, <neural_net::train_on_data>,
724              <fann_create_train_from_callback>
725 
726             This function appears in FANN >= 2.1.0
727         */
create_train_from_callback(unsigned int num_data,unsigned int num_input,unsigned int num_output,void (FANN_API * user_function)(unsigned int,unsigned int,unsigned int,fann_type *,fann_type *))728         void create_train_from_callback(unsigned int num_data,
729                                                   unsigned int num_input,
730                                                   unsigned int num_output,
731                                                   void (FANN_API *user_function)( unsigned int,
732                                                                          unsigned int,
733                                                                          unsigned int,
734                                                                          fann_type * ,
735                                                                          fann_type * ))
736         {
737             destroy_train();
738             train_data = fann_create_train_from_callback(num_data, num_input, num_output, user_function);
739         }
740 
741         /* Method: scale_input_train_data
742 
743            Scales the inputs in the training data to the specified range.
744 
745            See also:
746    	        <scale_output_train_data>, <scale_train_data>, <fann_scale_input_train_data>
747 
748            This function appears in FANN >= 2.0.0.
749          */
scale_input_train_data(fann_type new_min,fann_type new_max)750         void scale_input_train_data(fann_type new_min, fann_type new_max)
751         {
752             if (train_data != NULL)
753             {
754                 fann_scale_input_train_data(train_data, new_min, new_max);
755             }
756         }
757 
758         /* Method: scale_output_train_data
759 
760            Scales the outputs in the training data to the specified range.
761 
762            See also:
763    	        <scale_input_train_data>, <scale_train_data>, <fann_scale_output_train_data>
764 
765            This function appears in FANN >= 2.0.0.
766          */
scale_output_train_data(fann_type new_min,fann_type new_max)767         void scale_output_train_data(fann_type new_min, fann_type new_max)
768         {
769             if (train_data != NULL)
770             {
771                 fann_scale_output_train_data(train_data, new_min, new_max);
772             }
773         }
774 
775         /* Method: scale_train_data
776 
777            Scales the inputs and outputs in the training data to the specified range.
778 
779            See also:
780    	        <scale_output_train_data>, <scale_input_train_data>, <fann_scale_train_data>
781 
782            This function appears in FANN >= 2.0.0.
783          */
scale_train_data(fann_type new_min,fann_type new_max)784         void scale_train_data(fann_type new_min, fann_type new_max)
785         {
786             if (train_data != NULL)
787             {
788                 fann_scale_train_data(train_data, new_min, new_max);
789             }
790         }
791 
792         /* Method: subset_train_data
793 
794            Changes the training data to a subset, starting at position *pos*
795            and *length* elements forward. Use the copy constructor to work
796            on a new copy of the training data.
797 
798             >FANN::training_data full_data_set;
799             >full_data_set.read_train_from_file("somefile.train");
800             >FANN::training_data *small_data_set = new FANN::training_data(full_data_set);
801             >small_data_set->subset_train_data(0, 2); // Only use first two
802             >// Use small_data_set ...
803             >delete small_data_set;
804 
805            See also:
806    	        <fann_subset_train_data>
807 
808            This function appears in FANN >= 2.0.0.
809          */
subset_train_data(unsigned int pos,unsigned int length)810         void subset_train_data(unsigned int pos, unsigned int length)
811         {
812             if (train_data != NULL)
813             {
814                 struct fann_train_data *temp = fann_subset_train_data(train_data, pos, length);
815                 destroy_train();
816                 train_data = temp;
817             }
818         }
819 
820         /*********************************************************************/
821 
822     protected:
823         /* The neural_net class has direct access to the training data */
824         friend class neural_net;
825 
826         /* Pointer to the encapsulated training data */
827         struct fann_train_data* train_data;
828     };
829 
830     /*************************************************************************/
831 
832     /* Class: neural_net
833 
834         Encapsulation of a neural network <struct fann> and
835         associated C API functions.
836     */
837     class neural_net
838     {
839     public:
840         /* Constructor: neural_net
841 
842             Default constructor creates an empty neural net.
843             Use one of the create functions to create the neural network.
844 
845             See also:
846 		        <create_standard>, <create_sparse>, <create_shortcut>,
847 		        <create_standard_array>, <create_sparse_array>, <create_shortcut_array>
848         */
neural_net()849         neural_net() : ann(NULL)
850         {
851         }
852 
853 	/* Constructor neural_net
854 
855 	    Creates a copy the other neural_net.
856 
857 	    See also:
858 	    		<copy_from_struct_fann>
859         */
neural_net(const neural_net & other)860 	neural_net(const neural_net& other)
861 	{
862 	    copy_from_struct_fann(other.ann);
863 	}
864 
865 	/* Constructor: neural_net
866 
867 	   Creates a copy the other neural_net.
868 
869 	   See also:
870 	    		<copy_from_struct_fann>
871         */
neural_net(struct fann * other)872 	neural_net(struct fann* other)
873 	{
874 	    copy_from_struct_fann(other);
875 	}
876 
877 	/* Method: copy_from_struct_fann
878 
879 	   Set the internal fann struct to a copy of other
880 	*/
copy_from_struct_fann(struct fann * other)881 	void copy_from_struct_fann(struct fann* other)
882 	{
883 	    destroy();
884 	    if (other != NULL)
885 		ann=fann_copy(other);
886 	}
887 
888         /* Destructor: ~neural_net
889 
890             Provides automatic cleanup of data.
891             Define USE_VIRTUAL_DESTRUCTOR if you need the destructor to be virtual.
892 
893             See also:
894                 <destroy>
895         */
896 #ifdef USE_VIRTUAL_DESTRUCTOR
897         virtual
898 #endif
~neural_net()899         ~neural_net()
900         {
901             destroy();
902         }
903 
904         /* Method: destroy
905 
906             Destructs the entire network. Called automatically by the destructor.
907 
908             See also:
909                 <~neural_net>
910         */
destroy()911         void destroy()
912         {
913             if (ann != NULL)
914             {
915                 user_context *user_data = static_cast<user_context *>(fann_get_user_data(ann));
916                 if (user_data != NULL)
917                     delete user_data;
918 
919                 fann_destroy(ann);
920                 ann = NULL;
921             }
922         }
923 
924         /* Method: create_standard
925 
926 	        Creates a standard fully connected backpropagation neural network.
927 
928 	        There will be a bias neuron in each layer (except the output layer),
929 	        and this bias neuron will be connected to all neurons in the next layer.
930 	        When running the network, the bias nodes always emits 1.
931 
932 	        Parameters:
933 		        num_layers - The total number of layers including the input and the output layer.
934 		        ... - Integer values determining the number of neurons in each layer starting with the
935 			        input layer and ending with the output layer.
936 
937 	        Returns:
938 		        Boolean true if the network was created, false otherwise.
939 
940             Example:
941                 >const unsigned int num_layers = 3;
942                 >const unsigned int num_input = 2;
943                 >const unsigned int num_hidden = 3;
944                 >const unsigned int num_output = 1;
945                 >
946                 >FANN::neural_net net;
947                 >net.create_standard(num_layers, num_input, num_hidden, num_output);
948 
949 	        See also:
950 		        <create_standard_array>, <create_sparse>, <create_shortcut>,
951 		        <fann_create_standard_array>
952 
953 	        This function appears in FANN >= 2.0.0.
954         */
create_standard(unsigned int num_layers,...)955         bool create_standard(unsigned int num_layers, ...)
956         {
957             va_list layers;
958             va_start(layers, num_layers);
959             bool status = create_standard_array(num_layers,
960                 reinterpret_cast<const unsigned int *>(layers));
961             va_end(layers);
962             return status;
963         }
964 
965         /* Method: create_standard_array
966 
967            Just like <create_standard>, but with an array of layer sizes
968            instead of individual parameters.
969 
970 	        See also:
971 		        <create_standard>, <create_sparse>, <create_shortcut>,
972 		        <fann_create_standard>
973 
974 	        This function appears in FANN >= 2.0.0.
975         */
create_standard_array(unsigned int num_layers,const unsigned int * layers)976         bool create_standard_array(unsigned int num_layers, const unsigned int * layers)
977         {
978             destroy();
979             ann = fann_create_standard_array(num_layers, layers);
980             return (ann != NULL);
981         }
982 
983         /* Method: create_sparse
984 
985 	        Creates a standard backpropagation neural network, which is not fully connected.
986 
987 	        Parameters:
988 		        connection_rate - The connection rate controls how many connections there will be in the
989    			        network. If the connection rate is set to 1, the network will be fully
990    			        connected, but if it is set to 0.5 only half of the connections will be set.
991 			        A connection rate of 1 will yield the same result as <fann_create_standard>
992 		        num_layers - The total number of layers including the input and the output layer.
993 		        ... - Integer values determining the number of neurons in each layer starting with the
994 			        input layer and ending with the output layer.
995 
996 	        Returns:
997 		        Boolean true if the network was created, false otherwise.
998 
999 	        See also:
1000 		        <create_standard>, <create_sparse_array>, <create_shortcut>,
1001 		        <fann_create_sparse>
1002 
1003 	        This function appears in FANN >= 2.0.0.
1004         */
create_sparse(float connection_rate,unsigned int num_layers,...)1005         bool create_sparse(float connection_rate, unsigned int num_layers, ...)
1006         {
1007             va_list layers;
1008             va_start(layers, num_layers);
1009             bool status = create_sparse_array(connection_rate, num_layers,
1010                 reinterpret_cast<const unsigned int *>(layers));
1011             va_end(layers);
1012             return status;
1013         }
1014 
1015         /* Method: create_sparse_array
1016            Just like <create_sparse>, but with an array of layer sizes
1017            instead of individual parameters.
1018 
1019            See <create_sparse> for a description of the parameters.
1020 
1021 	        See also:
1022 		        <create_standard>, <create_sparse>, <create_shortcut>,
1023 		        <fann_create_sparse_array>
1024 
1025 	        This function appears in FANN >= 2.0.0.
1026         */
create_sparse_array(float connection_rate,unsigned int num_layers,const unsigned int * layers)1027         bool create_sparse_array(float connection_rate,
1028             unsigned int num_layers, const unsigned int * layers)
1029         {
1030             destroy();
1031             ann = fann_create_sparse_array(connection_rate, num_layers, layers);
1032             return (ann != NULL);
1033         }
1034 
1035         /* Method: create_shortcut
1036 
1037 	        Creates a standard backpropagation neural network, which is not fully connected and which
1038 	        also has shortcut connections.
1039 
1040  	        Shortcut connections are connections that skip layers. A fully connected network with shortcut
1041 	        connections, is a network where all neurons are connected to all neurons in later layers.
1042 	        Including direct connections from the input layer to the output layer.
1043 
1044 	        See <create_standard> for a description of the parameters.
1045 
1046 	        See also:
1047 		        <create_standard>, <create_sparse>, <create_shortcut_array>,
1048 		        <fann_create_shortcut>
1049 
1050 	        This function appears in FANN >= 2.0.0.
1051         */
create_shortcut(unsigned int num_layers,...)1052         bool create_shortcut(unsigned int num_layers, ...)
1053         {
1054             va_list layers;
1055             va_start(layers, num_layers);
1056             bool status = create_shortcut_array(num_layers,
1057                 reinterpret_cast<const unsigned int *>(layers));
1058             va_end(layers);
1059             return status;
1060         }
1061 
1062         /* Method: create_shortcut_array
1063 
1064            Just like <create_shortcut>, but with an array of layer sizes
1065            instead of individual parameters.
1066 
1067 	        See <create_standard_array> for a description of the parameters.
1068 
1069 	        See also:
1070 		        <create_standard>, <create_sparse>, <create_shortcut>,
1071 		        <fann_create_shortcut_array>
1072 
1073 	        This function appears in FANN >= 2.0.0.
1074         */
create_shortcut_array(unsigned int num_layers,const unsigned int * layers)1075         bool create_shortcut_array(unsigned int num_layers,
1076             const unsigned int * layers)
1077         {
1078             destroy();
1079             ann = fann_create_shortcut_array(num_layers, layers);
1080             return (ann != NULL);
1081         }
1082 
1083         /* Method: run
1084 
1085 	        Will run input through the neural network, returning an array of outputs, the number of which being
1086 	        equal to the number of neurons in the output layer.
1087 
1088 	        See also:
1089 		        <test>, <fann_run>
1090 
1091 	        This function appears in FANN >= 1.0.0.
1092         */
run(fann_type * input)1093         fann_type* run(fann_type *input)
1094         {
1095             if (ann == NULL)
1096             {
1097                 return NULL;
1098             }
1099             return fann_run(ann, input);
1100         }
1101 
1102         /* Method: randomize_weights
1103 
1104 	        Give each connection a random weight between *min_weight* and *max_weight*
1105 
1106 	        From the beginning the weights are random between -0.1 and 0.1.
1107 
1108 	        See also:
1109 		        <init_weights>, <fann_randomize_weights>
1110 
1111 	        This function appears in FANN >= 1.0.0.
1112         */
randomize_weights(fann_type min_weight,fann_type max_weight)1113         void randomize_weights(fann_type min_weight, fann_type max_weight)
1114         {
1115             if (ann != NULL)
1116             {
1117                 fann_randomize_weights(ann, min_weight, max_weight);
1118             }
1119         }
1120 
1121         /* Method: init_weights
1122 
1123   	        Initialize the weights using Widrow + Nguyen's algorithm.
1124 
1125  	        This function behaves similarly to fann_randomize_weights. It will use the algorithm developed
1126 	        by Derrick Nguyen and Bernard Widrow to set the weights in such a way
1127 	        as to speed up training. This technique is not always successful, and in some cases can be less
1128 	        efficient than a purely random initialization.
1129 
1130 	        The algorithm requires access to the range of the input data (ie, largest and smallest input),
1131 	        and therefore accepts a second argument, data, which is the training data that will be used to
1132 	        train the network.
1133 
1134 	        See also:
1135 		        <randomize_weights>, <training_data::read_train_from_file>,
1136                 <fann_init_weights>
1137 
1138 	        This function appears in FANN >= 1.1.0.
1139         */
init_weights(const training_data & data)1140         void init_weights(const training_data &data)
1141         {
1142             if ((ann != NULL) && (data.train_data != NULL))
1143             {
1144                 fann_init_weights(ann, data.train_data);
1145             }
1146         }
1147 
1148         /* Method: print_connections
1149 
1150 	        Will print the connections of the ann in a compact matrix, for easy viewing of the internals
1151 	        of the ann.
1152 
1153 	        The output from fann_print_connections on a small (2 2 1) network trained on the xor problem
1154 	        >Layer / Neuron 012345
1155 	        >L   1 / N    3 BBa...
1156 	        >L   1 / N    4 BBA...
1157 	        >L   1 / N    5 ......
1158 	        >L   2 / N    6 ...BBA
1159 	        >L   2 / N    7 ......
1160 
1161 	        This network have five real neurons and two bias neurons. This gives a total of seven neurons
1162 	        named from 0 to 6. The connections between these neurons can be seen in the matrix. "." is a
1163 	        place where there is no connection, while a character tells how strong the connection is on a
1164 	        scale from a-z. The two real neurons in the hidden layer (neuron 3 and 4 in layer 1) has
1165 	        connection from the three neurons in the previous layer as is visible in the first two lines.
1166 	        The output neuron (6) has connections form the three neurons in the hidden layer 3 - 5 as is
1167 	        visible in the fourth line.
1168 
1169 	        To simplify the matrix output neurons is not visible as neurons that connections can come from,
1170 	        and input and bias neurons are not visible as neurons that connections can go to.
1171 
1172 	        This function appears in FANN >= 1.2.0.
1173         */
print_connections()1174         void print_connections()
1175         {
1176             if (ann != NULL)
1177             {
1178                 fann_print_connections(ann);
1179             }
1180         }
1181 
1182         /* Method: create_from_file
1183 
1184            Constructs a backpropagation neural network from a configuration file,
1185            which have been saved by <save>.
1186 
1187            See also:
1188    	        <save>, <save_to_fixed>, <fann_create_from_file>
1189 
1190            This function appears in FANN >= 1.0.0.
1191          */
create_from_file(const std::string & configuration_file)1192         bool create_from_file(const std::string &configuration_file)
1193         {
1194             destroy();
1195             ann = fann_create_from_file(configuration_file.c_str());
1196             return (ann != NULL);
1197         }
1198 
1199         /* Method: save
1200 
1201            Save the entire network to a configuration file.
1202 
1203            The configuration file contains all information about the neural network and enables
1204            <create_from_file> to create an exact copy of the neural network and all of the
1205            parameters associated with the neural network.
1206 
1207            These two parameters (<set_callback>, <set_error_log>) are *NOT* saved
1208            to the file because they cannot safely be ported to a different location. Also temporary
1209            parameters generated during training like <get_MSE> is not saved.
1210 
1211            Return:
1212            The function returns 0 on success and -1 on failure.
1213 
1214            See also:
1215             <create_from_file>, <save_to_fixed>, <fann_save>
1216 
1217            This function appears in FANN >= 1.0.0.
1218          */
save(const std::string & configuration_file)1219         bool save(const std::string &configuration_file)
1220         {
1221             if (ann == NULL)
1222             {
1223                 return false;
1224             }
1225             if (fann_save(ann, configuration_file.c_str()) == -1)
1226             {
1227                 return false;
1228             }
1229             return true;
1230         }
1231 
1232         /* Method: save_to_fixed
1233 
1234            Saves the entire network to a configuration file.
1235            But it is saved in fixed point format no matter which
1236            format it is currently in.
1237 
1238            This is usefull for training a network in floating points,
1239            and then later executing it in fixed point.
1240 
1241            The function returns the bit position of the fix point, which
1242            can be used to find out how accurate the fixed point network will be.
1243            A high value indicates high precision, and a low value indicates low
1244            precision.
1245 
1246            A negative value indicates very low precision, and a very
1247            strong possibility for overflow.
1248            (the actual fix point will be set to 0, since a negative
1249            fix point does not make sence).
1250 
1251            Generally, a fix point lower than 6 is bad, and should be avoided.
1252            The best way to avoid this, is to have less connections to each neuron,
1253            or just less neurons in each layer.
1254 
1255            The fixed point use of this network is only intended for use on machines that
1256            have no floating point processor, like an iPAQ. On normal computers the floating
1257            point version is actually faster.
1258 
1259            See also:
1260             <create_from_file>, <save>, <fann_save_to_fixed>
1261 
1262            This function appears in FANN >= 1.0.0.
1263         */
save_to_fixed(const std::string & configuration_file)1264         int save_to_fixed(const std::string &configuration_file)
1265         {
1266             int fixpoint = 0;
1267             if (ann != NULL)
1268             {
1269                 fixpoint = fann_save_to_fixed(ann, configuration_file.c_str());
1270             }
1271             return fixpoint;
1272         }
1273 
1274 #ifndef FIXEDFANN
1275         /* Method: train
1276 
1277            Train one iteration with a set of inputs, and a set of desired outputs.
1278            This training is always incremental training (see <FANN::training_algorithm_enum>),
1279            since only one pattern is presented.
1280 
1281            Parameters:
1282    	        ann - The neural network structure
1283    	        input - an array of inputs. This array must be exactly <fann_get_num_input> long.
1284    	        desired_output - an array of desired outputs. This array must be exactly <fann_get_num_output> long.
1285 
1286    	        See also:
1287    		        <train_on_data>, <train_epoch>, <fann_train>
1288 
1289    	        This function appears in FANN >= 1.0.0.
1290          */
train(fann_type * input,fann_type * desired_output)1291         void train(fann_type *input, fann_type *desired_output)
1292         {
1293             if (ann != NULL)
1294             {
1295                 fann_train(ann, input, desired_output);
1296             }
1297         }
1298 
1299         /* Method: train_epoch
1300             Train one epoch with a set of training data.
1301 
1302             Train one epoch with the training data stored in data. One epoch is where all of
1303             the training data is considered exactly once.
1304 
1305 	        This function returns the MSE error as it is calculated either before or during
1306 	        the actual training. This is not the actual MSE after the training epoch, but since
1307 	        calculating this will require to go through the entire training set once more, it is
1308 	        more than adequate to use this value during training.
1309 
1310 	        The training algorithm used by this function is chosen by the <fann_set_training_algorithm>
1311 	        function.
1312 
1313 	        See also:
1314 		        <train_on_data>, <test_data>, <fann_train_epoch>
1315 
1316 	        This function appears in FANN >= 1.2.0.
1317          */
train_epoch(const training_data & data)1318         float train_epoch(const training_data &data)
1319         {
1320             float mse = 0.0f;
1321             if ((ann != NULL) && (data.train_data != NULL))
1322             {
1323                 mse = fann_train_epoch(ann, data.train_data);
1324             }
1325             return mse;
1326         }
1327 
1328         /* Method: train_on_data
1329 
1330            Trains on an entire dataset, for a period of time.
1331 
1332            This training uses the training algorithm chosen by <set_training_algorithm>,
1333            and the parameters set for these training algorithms.
1334 
1335            Parameters:
1336    		        ann - The neural network
1337    		        data - The data, which should be used during training
1338    		        max_epochs - The maximum number of epochs the training should continue
1339    		        epochs_between_reports - The number of epochs between printing a status report to stdout.
1340    			        A value of zero means no reports should be printed.
1341    		        desired_error - The desired <get_MSE> or <get_bit_fail>, depending on which stop function
1342    			        is chosen by <set_train_stop_function>.
1343 
1344 	        Instead of printing out reports every epochs_between_reports, a callback function can be called
1345 	        (see <set_callback>).
1346 
1347 	        See also:
1348 		        <train_on_file>, <train_epoch>, <fann_train_on_data>
1349 
1350 	        This function appears in FANN >= 1.0.0.
1351         */
train_on_data(const training_data & data,unsigned int max_epochs,unsigned int epochs_between_reports,float desired_error)1352         void train_on_data(const training_data &data, unsigned int max_epochs,
1353             unsigned int epochs_between_reports, float desired_error)
1354         {
1355             if ((ann != NULL) && (data.train_data != NULL))
1356             {
1357                 fann_train_on_data(ann, data.train_data, max_epochs,
1358                     epochs_between_reports, desired_error);
1359             }
1360         }
1361 
1362         /* Method: train_on_file
1363 
1364            Does the same as <train_on_data>, but reads the training data directly from a file.
1365 
1366            See also:
1367    		        <train_on_data>, <fann_train_on_file>
1368 
1369 	        This function appears in FANN >= 1.0.0.
1370         */
train_on_file(const std::string & filename,unsigned int max_epochs,unsigned int epochs_between_reports,float desired_error)1371         void train_on_file(const std::string &filename, unsigned int max_epochs,
1372             unsigned int epochs_between_reports, float desired_error)
1373         {
1374             if (ann != NULL)
1375             {
1376                 fann_train_on_file(ann, filename.c_str(),
1377                     max_epochs, epochs_between_reports, desired_error);
1378             }
1379         }
1380 #endif /* NOT FIXEDFANN */
1381 
1382         /* Method: test
1383 
1384            Test with a set of inputs, and a set of desired outputs.
1385            This operation updates the mean square error, but does not
1386            change the network in any way.
1387 
1388            See also:
1389    		        <test_data>, <train>, <fann_test>
1390 
1391            This function appears in FANN >= 1.0.0.
1392         */
test(fann_type * input,fann_type * desired_output)1393         fann_type * test(fann_type *input, fann_type *desired_output)
1394         {
1395             fann_type * output = NULL;
1396             if (ann != NULL)
1397             {
1398                 output = fann_test(ann, input, desired_output);
1399             }
1400             return output;
1401         }
1402 
1403         /* Method: test_data
1404 
1405            Test a set of training data and calculates the MSE for the training data.
1406 
1407            This function updates the MSE and the bit fail values.
1408 
1409            See also:
1410  	        <test>, <get_MSE>, <get_bit_fail>, <fann_test_data>
1411 
1412 	        This function appears in FANN >= 1.2.0.
1413          */
test_data(const training_data & data)1414         float test_data(const training_data &data)
1415         {
1416             float mse = 0.0f;
1417             if ((ann != NULL) && (data.train_data != NULL))
1418             {
1419                 mse = fann_test_data(ann, data.train_data);
1420             }
1421             return mse;
1422         }
1423 
1424         /* Method: get_MSE
1425            Reads the mean square error from the network.
1426 
1427            Reads the mean square error from the network. This value is calculated during
1428            training or testing, and can therefore sometimes be a bit off if the weights
1429            have been changed since the last calculation of the value.
1430 
1431            See also:
1432    	        <test_data>, <fann_get_MSE>
1433 
1434 	        This function appears in FANN >= 1.1.0.
1435          */
get_MSE()1436         float get_MSE()
1437         {
1438             float mse = 0.0f;
1439             if (ann != NULL)
1440             {
1441                 mse = fann_get_MSE(ann);
1442             }
1443             return mse;
1444         }
1445 
1446         /* Method: reset_MSE
1447 
1448            Resets the mean square error from the network.
1449 
1450            This function also resets the number of bits that fail.
1451 
1452            See also:
1453    	        <get_MSE>, <get_bit_fail_limit>, <fann_reset_MSE>
1454 
1455             This function appears in FANN >= 1.1.0
1456          */
reset_MSE()1457         void reset_MSE()
1458         {
1459             if (ann != NULL)
1460             {
1461                 fann_reset_MSE(ann);
1462             }
1463         }
1464 
1465         /* Method: set_callback
1466 
1467            Sets the callback function for use during training. The user_data is passed to
1468            the callback. It can point to arbitrary data that the callback might require and
1469            can be NULL if it is not used.
1470 
1471            See <FANN::callback_type> for more information about the callback function.
1472 
1473            The default callback function simply prints out some status information.
1474 
1475            This function appears in FANN >= 2.0.0.
1476          */
set_callback(callback_type callback,void * user_data)1477         void set_callback(callback_type callback, void *user_data)
1478         {
1479             if (ann != NULL)
1480             {
1481                 // Allocated data is also deleted in the destroy method called by the destructor
1482                 user_context *user_instance = static_cast<user_context *>(fann_get_user_data(ann));
1483                 if (user_instance != NULL)
1484                     delete user_instance;
1485 
1486                 user_instance = new user_context();
1487                 user_instance->user_callback = callback;
1488                 user_instance->user_data = user_data;
1489                 user_instance->net = this;
1490                 fann_set_user_data(ann, user_instance);
1491 
1492                 if (callback != NULL)
1493                     fann_set_callback(ann, &FANN::neural_net::internal_callback);
1494                 else
1495                     fann_set_callback(ann, NULL);
1496             }
1497         }
1498 
1499         /* Method: print_parameters
1500 
1501   	        Prints all of the parameters and options of the neural network
1502 
1503             See also:
1504                 <fann_print_parameters>
1505 
1506 	        This function appears in FANN >= 1.2.0.
1507         */
print_parameters()1508         void print_parameters()
1509         {
1510             if (ann != NULL)
1511             {
1512                 fann_print_parameters(ann);
1513             }
1514         }
1515 
1516         /* Method: get_training_algorithm
1517 
1518            Return the training algorithm as described by <FANN::training_algorithm_enum>.
1519            This training algorithm is used by <train_on_data> and associated functions.
1520 
1521            Note that this algorithm is also used during <cascadetrain_on_data>, although only
1522            FANN::TRAIN_RPROP and FANN::TRAIN_QUICKPROP is allowed during cascade training.
1523 
1524            The default training algorithm is FANN::TRAIN_RPROP.
1525 
1526            See also:
1527             <set_training_algorithm>, <FANN::training_algorithm_enum>,
1528             <fann_get_training_algorithm>
1529 
1530            This function appears in FANN >= 1.0.0.
1531          */
get_training_algorithm()1532         training_algorithm_enum get_training_algorithm()
1533         {
1534             fann_train_enum training_algorithm = FANN_TRAIN_INCREMENTAL;
1535             if (ann != NULL)
1536             {
1537                 training_algorithm = fann_get_training_algorithm(ann);
1538             }
1539             return static_cast<training_algorithm_enum>(training_algorithm);
1540         }
1541 
1542         /* Method: set_training_algorithm
1543 
1544            Set the training algorithm.
1545 
1546            More info available in <get_training_algorithm>
1547 
1548            This function appears in FANN >= 1.0.0.
1549          */
set_training_algorithm(training_algorithm_enum training_algorithm)1550         void set_training_algorithm(training_algorithm_enum training_algorithm)
1551         {
1552             if (ann != NULL)
1553             {
1554                 fann_set_training_algorithm(ann,
1555 					static_cast<fann_train_enum>(training_algorithm));
1556             }
1557         }
1558 
1559         /* Method: get_learning_rate
1560 
1561            Return the learning rate.
1562 
1563            The learning rate is used to determine how aggressive training should be for some of the
1564            training algorithms (FANN::TRAIN_INCREMENTAL, FANN::TRAIN_BATCH, FANN::TRAIN_QUICKPROP).
1565            Do however note that it is not used in FANN::TRAIN_RPROP.
1566 
1567            The default learning rate is 0.7.
1568 
1569            See also:
1570    	        <set_learning_rate>, <set_training_algorithm>,
1571             <fann_get_learning_rate>
1572 
1573            This function appears in FANN >= 1.0.0.
1574          */
get_learning_rate()1575         float get_learning_rate()
1576         {
1577             float learning_rate = 0.0f;
1578             if (ann != NULL)
1579             {
1580                 learning_rate = fann_get_learning_rate(ann);
1581             }
1582             return learning_rate;
1583         }
1584 
1585         /* Method: set_learning_rate
1586 
1587            Set the learning rate.
1588 
1589            More info available in <get_learning_rate>
1590 
1591            This function appears in FANN >= 1.0.0.
1592          */
set_learning_rate(float learning_rate)1593         void set_learning_rate(float learning_rate)
1594         {
1595             if (ann != NULL)
1596             {
1597                 fann_set_learning_rate(ann, learning_rate);
1598             }
1599         }
1600 
1601         /*************************************************************************************************************/
1602 
1603         /* Method: get_activation_function
1604 
1605            Get the activation function for neuron number *neuron* in layer number *layer*,
1606            counting the input layer as layer 0.
1607 
1608            It is not possible to get activation functions for the neurons in the input layer.
1609 
1610            Information about the individual activation functions is available at <FANN::activation_function_enum>.
1611 
1612            Returns:
1613             The activation function for the neuron or -1 if the neuron is not defined in the neural network.
1614 
1615            See also:
1616    	        <set_activation_function_layer>, <set_activation_function_hidden>,
1617    	        <set_activation_function_output>, <set_activation_steepness>,
1618             <set_activation_function>, <fann_get_activation_function>
1619 
1620            This function appears in FANN >= 2.1.0
1621          */
get_activation_function(int layer,int neuron)1622         activation_function_enum get_activation_function(int layer, int neuron)
1623         {
1624             unsigned int activation_function = 0;
1625             if (ann != NULL)
1626             {
1627                 activation_function = fann_get_activation_function(ann, layer, neuron);
1628             }
1629             return static_cast<activation_function_enum>(activation_function);
1630         }
1631 
1632         /* Method: set_activation_function
1633 
1634            Set the activation function for neuron number *neuron* in layer number *layer*,
1635            counting the input layer as layer 0.
1636 
1637            It is not possible to set activation functions for the neurons in the input layer.
1638 
1639            When choosing an activation function it is important to note that the activation
1640            functions have different range. FANN::SIGMOID is e.g. in the 0 - 1 range while
1641            FANN::SIGMOID_SYMMETRIC is in the -1 - 1 range and FANN::LINEAR is unbound.
1642 
1643            Information about the individual activation functions is available at <FANN::activation_function_enum>.
1644 
1645            The default activation function is FANN::SIGMOID_STEPWISE.
1646 
1647            See also:
1648    	        <set_activation_function_layer>, <set_activation_function_hidden>,
1649    	        <set_activation_function_output>, <set_activation_steepness>,
1650             <get_activation_function>, <fann_set_activation_function>
1651 
1652            This function appears in FANN >= 2.0.0.
1653          */
set_activation_function(activation_function_enum activation_function,int layer,int neuron)1654         void set_activation_function(activation_function_enum activation_function, int layer, int neuron)
1655         {
1656             if (ann != NULL)
1657             {
1658                 fann_set_activation_function(ann,
1659 					static_cast<fann_activationfunc_enum>(activation_function), layer, neuron);
1660             }
1661         }
1662 
1663         /* Method: set_activation_function_layer
1664 
1665            Set the activation function for all the neurons in the layer number *layer*,
1666            counting the input layer as layer 0.
1667 
1668            It is not possible to set activation functions for the neurons in the input layer.
1669 
1670            See also:
1671    	        <set_activation_function>, <set_activation_function_hidden>,
1672    	        <set_activation_function_output>, <set_activation_steepness_layer>,
1673             <fann_set_activation_function_layer>
1674 
1675            This function appears in FANN >= 2.0.0.
1676          */
set_activation_function_layer(activation_function_enum activation_function,int layer)1677         void set_activation_function_layer(activation_function_enum activation_function, int layer)
1678         {
1679             if (ann != NULL)
1680             {
1681                 fann_set_activation_function_layer(ann,
1682 					static_cast<fann_activationfunc_enum>(activation_function), layer);
1683             }
1684         }
1685 
1686         /* Method: set_activation_function_hidden
1687 
1688            Set the activation function for all of the hidden layers.
1689 
1690            See also:
1691    	        <set_activation_function>, <set_activation_function_layer>,
1692    	        <set_activation_function_output>, <set_activation_steepness_hidden>,
1693             <fann_set_activation_function_hidden>
1694 
1695            This function appears in FANN >= 1.0.0.
1696          */
set_activation_function_hidden(activation_function_enum activation_function)1697         void set_activation_function_hidden(activation_function_enum activation_function)
1698         {
1699             if (ann != NULL)
1700             {
1701                 fann_set_activation_function_hidden(ann,
1702 					static_cast<fann_activationfunc_enum>(activation_function));
1703             }
1704         }
1705 
1706         /* Method: set_activation_function_output
1707 
1708            Set the activation function for the output layer.
1709 
1710            See also:
1711    	        <set_activation_function>, <set_activation_function_layer>,
1712    	        <set_activation_function_hidden>, <set_activation_steepness_output>,
1713             <fann_set_activation_function_output>
1714 
1715            This function appears in FANN >= 1.0.0.
1716          */
set_activation_function_output(activation_function_enum activation_function)1717         void set_activation_function_output(activation_function_enum activation_function)
1718         {
1719             if (ann != NULL)
1720             {
1721                 fann_set_activation_function_output(ann,
1722 					static_cast<fann_activationfunc_enum>(activation_function));
1723             }
1724         }
1725 
1726         /* Method: get_activation_steepness
1727 
1728            Get the activation steepness for neuron number *neuron* in layer number *layer*,
1729            counting the input layer as layer 0.
1730 
1731            It is not possible to get activation steepness for the neurons in the input layer.
1732 
1733            The steepness of an activation function says something about how fast the activation function
1734            goes from the minimum to the maximum. A high value for the activation function will also
1735            give a more agressive training.
1736 
1737            When training neural networks where the output values should be at the extremes (usually 0 and 1,
1738            depending on the activation function), a steep activation function can be used (e.g. 1.0).
1739 
1740            The default activation steepness is 0.5.
1741 
1742            Returns:
1743             The activation steepness for the neuron or -1 if the neuron is not defined in the neural network.
1744 
1745            See also:
1746    	        <set_activation_steepness_layer>, <set_activation_steepness_hidden>,
1747    	        <set_activation_steepness_output>, <set_activation_function>,
1748             <set_activation_steepness>, <fann_get_activation_steepness>
1749 
1750            This function appears in FANN >= 2.1.0
1751          */
get_activation_steepness(int layer,int neuron)1752         fann_type get_activation_steepness(int layer, int neuron)
1753         {
1754             fann_type activation_steepness = 0;
1755             if (ann != NULL)
1756             {
1757                 activation_steepness = fann_get_activation_steepness(ann, layer, neuron);
1758             }
1759             return activation_steepness;
1760         }
1761 
1762         /* Method: set_activation_steepness
1763 
1764            Set the activation steepness for neuron number *neuron* in layer number *layer*,
1765            counting the input layer as layer 0.
1766 
1767            It is not possible to set activation steepness for the neurons in the input layer.
1768 
1769            The steepness of an activation function says something about how fast the activation function
1770            goes from the minimum to the maximum. A high value for the activation function will also
1771            give a more agressive training.
1772 
1773            When training neural networks where the output values should be at the extremes (usually 0 and 1,
1774            depending on the activation function), a steep activation function can be used (e.g. 1.0).
1775 
1776            The default activation steepness is 0.5.
1777 
1778            See also:
1779    	        <set_activation_steepness_layer>, <set_activation_steepness_hidden>,
1780    	        <set_activation_steepness_output>, <set_activation_function>,
1781             <get_activation_steepness>, <fann_set_activation_steepness>
1782 
1783            This function appears in FANN >= 2.0.0.
1784          */
set_activation_steepness(fann_type steepness,int layer,int neuron)1785         void set_activation_steepness(fann_type steepness, int layer, int neuron)
1786         {
1787             if (ann != NULL)
1788             {
1789                 fann_set_activation_steepness(ann, steepness, layer, neuron);
1790             }
1791         }
1792 
1793         /* Method: set_activation_steepness_layer
1794 
1795            Set the activation steepness all of the neurons in layer number *layer*,
1796            counting the input layer as layer 0.
1797 
1798            It is not possible to set activation steepness for the neurons in the input layer.
1799 
1800            See also:
1801    	        <set_activation_steepness>, <set_activation_steepness_hidden>,
1802    	        <set_activation_steepness_output>, <set_activation_function_layer>,
1803             <fann_set_activation_steepness_layer>
1804 
1805            This function appears in FANN >= 2.0.0.
1806          */
set_activation_steepness_layer(fann_type steepness,int layer)1807         void set_activation_steepness_layer(fann_type steepness, int layer)
1808         {
1809             if (ann != NULL)
1810             {
1811                 fann_set_activation_steepness_layer(ann, steepness, layer);
1812             }
1813         }
1814 
1815         /* Method: set_activation_steepness_hidden
1816 
1817            Set the steepness of the activation steepness in all of the hidden layers.
1818 
1819            See also:
1820    	        <set_activation_steepness>, <set_activation_steepness_layer>,
1821    	        <set_activation_steepness_output>, <set_activation_function_hidden>,
1822             <fann_set_activation_steepness_hidden>
1823 
1824            This function appears in FANN >= 1.2.0.
1825          */
set_activation_steepness_hidden(fann_type steepness)1826         void set_activation_steepness_hidden(fann_type steepness)
1827         {
1828             if (ann != NULL)
1829             {
1830                 fann_set_activation_steepness_hidden(ann, steepness);
1831             }
1832         }
1833 
1834         /* Method: set_activation_steepness_output
1835 
1836            Set the steepness of the activation steepness in the output layer.
1837 
1838            See also:
1839    	        <set_activation_steepness>, <set_activation_steepness_layer>,
1840    	        <set_activation_steepness_hidden>, <set_activation_function_output>,
1841             <fann_set_activation_steepness_output>
1842 
1843            This function appears in FANN >= 1.2.0.
1844          */
set_activation_steepness_output(fann_type steepness)1845         void set_activation_steepness_output(fann_type steepness)
1846         {
1847             if (ann != NULL)
1848             {
1849                 fann_set_activation_steepness_output(ann, steepness);
1850             }
1851         }
1852 
1853         /*************************************************************************************************************/
1854 
1855         /* Method: get_train_error_function
1856 
1857            Returns the error function used during training.
1858 
1859            The error functions is described further in <FANN::error_function_enum>
1860 
1861            The default error function is FANN::ERRORFUNC_TANH
1862 
1863            See also:
1864    	        <set_train_error_function>, <fann_get_train_error_function>
1865 
1866            This function appears in FANN >= 1.2.0.
1867           */
get_train_error_function()1868         error_function_enum get_train_error_function()
1869         {
1870             fann_errorfunc_enum train_error_function = FANN_ERRORFUNC_LINEAR;
1871             if (ann != NULL)
1872             {
1873                 train_error_function = fann_get_train_error_function(ann);
1874             }
1875             return static_cast<error_function_enum>(train_error_function);
1876         }
1877 
1878         /* Method: set_train_error_function
1879 
1880            Set the error function used during training.
1881 
1882            The error functions is described further in <FANN::error_function_enum>
1883 
1884            See also:
1885    	        <get_train_error_function>, <fann_set_train_error_function>
1886 
1887            This function appears in FANN >= 1.2.0.
1888          */
set_train_error_function(error_function_enum train_error_function)1889         void set_train_error_function(error_function_enum train_error_function)
1890         {
1891             if (ann != NULL)
1892             {
1893                 fann_set_train_error_function(ann,
1894 					static_cast<fann_errorfunc_enum>(train_error_function));
1895             }
1896         }
1897 
1898         /* Method: get_quickprop_decay
1899 
1900            The decay is a small negative valued number which is the factor that the weights
1901            should become smaller in each iteration during quickprop training. This is used
1902            to make sure that the weights do not become too high during training.
1903 
1904            The default decay is -0.0001.
1905 
1906            See also:
1907    	        <set_quickprop_decay>, <fann_get_quickprop_decay>
1908 
1909            This function appears in FANN >= 1.2.0.
1910          */
get_quickprop_decay()1911         float get_quickprop_decay()
1912         {
1913             float quickprop_decay = 0.0f;
1914             if (ann != NULL)
1915             {
1916                 quickprop_decay = fann_get_quickprop_decay(ann);
1917             }
1918             return quickprop_decay;
1919         }
1920 
1921         /* Method: set_quickprop_decay
1922 
1923            Sets the quickprop decay factor.
1924 
1925            See also:
1926    	        <get_quickprop_decay>, <fann_set_quickprop_decay>
1927 
1928            This function appears in FANN >= 1.2.0.
1929         */
set_quickprop_decay(float quickprop_decay)1930         void set_quickprop_decay(float quickprop_decay)
1931         {
1932             if (ann != NULL)
1933             {
1934                 fann_set_quickprop_decay(ann, quickprop_decay);
1935             }
1936         }
1937 
1938         /* Method: get_quickprop_mu
1939 
1940            The mu factor is used to increase and decrease the step-size during quickprop training.
1941            The mu factor should always be above 1, since it would otherwise decrease the step-size
1942            when it was suppose to increase it.
1943 
1944            The default mu factor is 1.75.
1945 
1946            See also:
1947    	        <set_quickprop_mu>, <fann_get_quickprop_mu>
1948 
1949            This function appears in FANN >= 1.2.0.
1950         */
get_quickprop_mu()1951         float get_quickprop_mu()
1952         {
1953             float quickprop_mu = 0.0f;
1954             if (ann != NULL)
1955             {
1956                 quickprop_mu = fann_get_quickprop_mu(ann);
1957             }
1958             return quickprop_mu;
1959         }
1960 
1961         /* Method: set_quickprop_mu
1962 
1963             Sets the quickprop mu factor.
1964 
1965            See also:
1966    	        <get_quickprop_mu>, <fann_set_quickprop_mu>
1967 
1968            This function appears in FANN >= 1.2.0.
1969         */
set_quickprop_mu(float quickprop_mu)1970         void set_quickprop_mu(float quickprop_mu)
1971         {
1972             if (ann != NULL)
1973             {
1974                 fann_set_quickprop_mu(ann, quickprop_mu);
1975             }
1976         }
1977 
1978         /* Method: get_rprop_increase_factor
1979 
1980            The increase factor is a value larger than 1, which is used to
1981            increase the step-size during RPROP training.
1982 
1983            The default increase factor is 1.2.
1984 
1985            See also:
1986    	        <set_rprop_increase_factor>, <fann_get_rprop_increase_factor>
1987 
1988            This function appears in FANN >= 1.2.0.
1989         */
get_rprop_increase_factor()1990         float get_rprop_increase_factor()
1991         {
1992             float factor = 0.0f;
1993             if (ann != NULL)
1994             {
1995                 factor = fann_get_rprop_increase_factor(ann);
1996             }
1997             return factor;
1998         }
1999 
2000         /* Method: set_rprop_increase_factor
2001 
2002            The increase factor used during RPROP training.
2003 
2004            See also:
2005    	        <get_rprop_increase_factor>, <fann_set_rprop_increase_factor>
2006 
2007            This function appears in FANN >= 1.2.0.
2008         */
set_rprop_increase_factor(float rprop_increase_factor)2009         void set_rprop_increase_factor(float rprop_increase_factor)
2010         {
2011             if (ann != NULL)
2012             {
2013                 fann_set_rprop_increase_factor(ann, rprop_increase_factor);
2014             }
2015         }
2016 
2017         /* Method: get_rprop_decrease_factor
2018 
2019            The decrease factor is a value smaller than 1, which is used to decrease the step-size during RPROP training.
2020 
2021            The default decrease factor is 0.5.
2022 
2023            See also:
2024             <set_rprop_decrease_factor>, <fann_get_rprop_decrease_factor>
2025 
2026            This function appears in FANN >= 1.2.0.
2027         */
get_rprop_decrease_factor()2028         float get_rprop_decrease_factor()
2029         {
2030             float factor = 0.0f;
2031             if (ann != NULL)
2032             {
2033                 factor = fann_get_rprop_decrease_factor(ann);
2034             }
2035             return factor;
2036         }
2037 
2038         /* Method: set_rprop_decrease_factor
2039 
2040            The decrease factor is a value smaller than 1, which is used to decrease the step-size during RPROP training.
2041 
2042            See also:
2043             <get_rprop_decrease_factor>, <fann_set_rprop_decrease_factor>
2044 
2045            This function appears in FANN >= 1.2.0.
2046         */
set_rprop_decrease_factor(float rprop_decrease_factor)2047         void set_rprop_decrease_factor(float rprop_decrease_factor)
2048         {
2049             if (ann != NULL)
2050             {
2051                 fann_set_rprop_decrease_factor(ann, rprop_decrease_factor);
2052             }
2053         }
2054 
2055         /* Method: get_rprop_delta_zero
2056 
2057            The initial step-size is a small positive number determining how small the initial step-size may be.
2058 
2059            The default value delta zero is 0.1.
2060 
2061            See also:
2062    	        <set_rprop_delta_zero>, <fann_get_rprop_delta_zero>
2063 
2064            This function appears in FANN >= 2.1.0.
2065         */
get_rprop_delta_zero()2066         float get_rprop_delta_zero()
2067         {
2068             float delta = 0.0f;
2069             if (ann != NULL)
2070             {
2071                 delta = fann_get_rprop_delta_zero(ann);
2072             }
2073             return delta;
2074         }
2075 
2076         /* Method: set_rprop_delta_zero
2077 
2078            The initial step-size is a small positive number determining how small the initial step-size may be.
2079 
2080            See also:
2081    	        <get_rprop_delta_zero>, <fann_set_rprop_delta_zero>
2082 
2083            This function appears in FANN >= 2.1.0.
2084         */
set_rprop_delta_zero(float rprop_delta_zero)2085         void set_rprop_delta_zero(float rprop_delta_zero)
2086         {
2087             if (ann != NULL)
2088             {
2089                 fann_set_rprop_delta_zero(ann, rprop_delta_zero);
2090             }
2091         }
2092         /* Method: get_rprop_delta_min
2093 
2094            The minimum step-size is a small positive number determining how small the minimum step-size may be.
2095 
2096            The default value delta min is 0.0.
2097 
2098            See also:
2099    	        <set_rprop_delta_min>, <fann_get_rprop_delta_min>
2100 
2101            This function appears in FANN >= 1.2.0.
2102         */
get_rprop_delta_min()2103         float get_rprop_delta_min()
2104         {
2105             float delta = 0.0f;
2106             if (ann != NULL)
2107             {
2108                 delta = fann_get_rprop_delta_min(ann);
2109             }
2110             return delta;
2111         }
2112 
2113         /* Method: set_rprop_delta_min
2114 
2115            The minimum step-size is a small positive number determining how small the minimum step-size may be.
2116 
2117            See also:
2118    	        <get_rprop_delta_min>, <fann_set_rprop_delta_min>
2119 
2120            This function appears in FANN >= 1.2.0.
2121         */
set_rprop_delta_min(float rprop_delta_min)2122         void set_rprop_delta_min(float rprop_delta_min)
2123         {
2124             if (ann != NULL)
2125             {
2126                 fann_set_rprop_delta_min(ann, rprop_delta_min);
2127             }
2128         }
2129 
2130         /* Method: get_rprop_delta_max
2131 
2132            The maximum step-size is a positive number determining how large the maximum step-size may be.
2133 
2134            The default delta max is 50.0.
2135 
2136            See also:
2137    	        <set_rprop_delta_max>, <get_rprop_delta_min>, <fann_get_rprop_delta_max>
2138 
2139            This function appears in FANN >= 1.2.0.
2140         */
get_rprop_delta_max()2141         float get_rprop_delta_max()
2142         {
2143             float delta = 0.0f;
2144             if (ann != NULL)
2145             {
2146                 delta = fann_get_rprop_delta_max(ann);
2147             }
2148             return delta;
2149         }
2150 
2151         /* Method: set_rprop_delta_max
2152 
2153            The maximum step-size is a positive number determining how large the maximum step-size may be.
2154 
2155            See also:
2156    	        <get_rprop_delta_max>, <get_rprop_delta_min>, <fann_set_rprop_delta_max>
2157 
2158            This function appears in FANN >= 1.2.0.
2159         */
set_rprop_delta_max(float rprop_delta_max)2160         void set_rprop_delta_max(float rprop_delta_max)
2161         {
2162             if (ann != NULL)
2163             {
2164                 fann_set_rprop_delta_max(ann, rprop_delta_max);
2165             }
2166         }
2167 
2168         /* Method: get_sarprop_weight_decay_shift
2169 
2170            The sarprop weight decay shift.
2171 
2172            The default delta max is -6.644.
2173 
2174            See also:
2175    	        <set_sarprop_weight_decay_shift>, <fann get_sarprop_weight_decay_shift>
2176 
2177            This function appears in FANN >= 2.1.0.
2178         */
get_sarprop_weight_decay_shift()2179         float get_sarprop_weight_decay_shift()
2180         {
2181             float res = 0.0f;
2182             if (ann != NULL)
2183             {
2184                 res = fann_get_rprop_delta_max(ann);
2185             }
2186             return res;
2187         }
2188 
2189         /* Method: set_sarprop_weight_decay_shift
2190 
2191            Set the sarprop weight decay shift.
2192 
2193 	        This function appears in FANN >= 2.1.0.
2194 
2195 	    See also:
2196    	        <get_sarprop_weight_decay_shift>, <fann_set_sarprop_weight_decay_shift>
2197         */
set_sarprop_weight_decay_shift(float sarprop_weight_decay_shift)2198         void set_sarprop_weight_decay_shift(float sarprop_weight_decay_shift)
2199         {
2200             if (ann != NULL)
2201             {
2202                 fann_set_sarprop_weight_decay_shift(ann, sarprop_weight_decay_shift);
2203             }
2204         }
2205 
2206         /* Method: get_sarprop_step_error_threshold_factor
2207 
2208            The sarprop step error threshold factor.
2209 
2210            The default delta max is 0.1.
2211 
2212            See also:
2213    	        <set_sarprop_step_error_threshold_factor>, <fann get_sarprop_step_error_threshold_factor>
2214 
2215            This function appears in FANN >= 2.1.0.
2216         */
get_sarprop_step_error_threshold_factor()2217         float get_sarprop_step_error_threshold_factor()
2218         {
2219             float res = 0.0f;
2220             if (ann != NULL)
2221             {
2222                 res = fann_get_rprop_delta_max(ann);
2223             }
2224             return res;
2225         }
2226 
2227         /* Method: set_sarprop_step_error_threshold_factor
2228 
2229            Set the sarprop step error threshold factor.
2230 
2231 	        This function appears in FANN >= 2.1.0.
2232 
2233 	    See also:
2234    	        <get_sarprop_step_error_threshold_factor>, <fann_set_sarprop_step_error_threshold_factor>
2235         */
set_sarprop_step_error_threshold_factor(float sarprop_step_error_threshold_factor)2236         void set_sarprop_step_error_threshold_factor(float sarprop_step_error_threshold_factor)
2237         {
2238             if (ann != NULL)
2239             {
2240                 fann_set_sarprop_step_error_threshold_factor(ann, sarprop_step_error_threshold_factor);
2241             }
2242         }
2243 
2244         /* Method: get_sarprop_step_error_shift
2245 
2246            The get sarprop step error shift.
2247 
2248            The default delta max is 1.385.
2249 
2250            See also:
2251    	        <set_sarprop_step_error_shift>, <fann get_sarprop_step_error_shift>
2252 
2253            This function appears in FANN >= 2.1.0.
2254         */
get_sarprop_step_error_shift()2255         float get_sarprop_step_error_shift()
2256         {
2257             float res = 0.0f;
2258             if (ann != NULL)
2259             {
2260                 res = fann_get_rprop_delta_max(ann);
2261             }
2262             return res;
2263         }
2264 
2265         /* Method: set_sarprop_step_error_shift
2266 
2267            Set the sarprop step error shift.
2268 
2269 	        This function appears in FANN >= 2.1.0.
2270 
2271 	    See also:
2272    	        <get_sarprop_step_error_shift>, <fann_set_sarprop_step_error_shift>
2273         */
set_sarprop_step_error_shift(float sarprop_step_error_shift)2274         void set_sarprop_step_error_shift(float sarprop_step_error_shift)
2275         {
2276             if (ann != NULL)
2277             {
2278                 fann_set_sarprop_step_error_shift(ann, sarprop_step_error_shift);
2279             }
2280         }
2281 
2282 	/* Method: get_sarprop_temperature
2283 
2284            The sarprop weight decay shift.
2285 
2286            The default delta max is 0.015.
2287 
2288            See also:
2289    	        <set_sarprop_temperature>, <fann get_sarprop_temperature>
2290 
2291            This function appears in FANN >= 2.1.0.
2292         */
get_sarprop_temperature()2293         float get_sarprop_temperature()
2294         {
2295             float res = 0.0f;
2296             if (ann != NULL)
2297             {
2298                 res = fann_get_rprop_delta_max(ann);
2299             }
2300             return res;
2301         }
2302 
2303         /* Method: set_sarprop_temperature
2304 
2305            Set the sarprop_temperature.
2306 
2307 	        This function appears in FANN >= 2.1.0.
2308 
2309 	    See also:
2310    	        <get_sarprop_temperature>, <fann_set_sarprop_temperature>
2311         */
set_sarprop_temperature(float sarprop_temperature)2312         void set_sarprop_temperature(float sarprop_temperature)
2313         {
2314             if (ann != NULL)
2315             {
2316                 fann_set_sarprop_temperature(ann, sarprop_temperature);
2317             }
2318         }
2319 
2320 
2321         /* Method: get_num_input
2322 
2323            Get the number of input neurons.
2324 
2325 	        This function appears in FANN >= 1.0.0.
2326         */
get_num_input()2327         unsigned int get_num_input()
2328         {
2329             unsigned int num_input = 0;
2330             if (ann != NULL)
2331             {
2332                 num_input = fann_get_num_input(ann);
2333             }
2334             return num_input;
2335         }
2336 
2337         /* Method: get_num_output
2338 
2339            Get the number of output neurons.
2340 
2341 	        This function appears in FANN >= 1.0.0.
2342         */
get_num_output()2343         unsigned int get_num_output()
2344         {
2345             unsigned int num_output = 0;
2346             if (ann != NULL)
2347             {
2348                 num_output = fann_get_num_output(ann);
2349             }
2350             return num_output;
2351         }
2352 
2353         /* Method: get_total_neurons
2354 
2355            Get the total number of neurons in the entire network. This number does also include the
2356 	        bias neurons, so a 2-4-2 network has 2+4+2 +2(bias) = 10 neurons.
2357 
2358 	        This function appears in FANN >= 1.0.0.
2359         */
get_total_neurons()2360         unsigned int get_total_neurons()
2361         {
2362             if (ann == NULL)
2363             {
2364                 return 0;
2365             }
2366             return fann_get_total_neurons(ann);
2367         }
2368 
2369         /* Method: get_total_connections
2370 
2371            Get the total number of connections in the entire network.
2372 
2373 	        This function appears in FANN >= 1.0.0.
2374         */
get_total_connections()2375         unsigned int get_total_connections()
2376         {
2377             if (ann == NULL)
2378             {
2379                 return 0;
2380             }
2381             return fann_get_total_connections(ann);
2382         }
2383 
2384 #ifdef FIXEDFANN
2385         /* Method: get_decimal_point
2386 
2387 	        Returns the position of the decimal point in the ann.
2388 
2389 	        This function is only available when the ANN is in fixed point mode.
2390 
2391 	        The decimal point is described in greater detail in the tutorial <Fixed Point Usage>.
2392 
2393 	        See also:
2394 		        <Fixed Point Usage>, <get_multiplier>, <save_to_fixed>,
2395                 <training_data::save_train_to_fixed>, <fann_get_decimal_point>
2396 
2397 	        This function appears in FANN >= 1.0.0.
2398         */
get_decimal_point()2399         unsigned int get_decimal_point()
2400         {
2401             if (ann == NULL)
2402             {
2403                 return 0;
2404             }
2405             return fann_get_decimal_point(ann);
2406         }
2407 
2408         /* Method: get_multiplier
2409 
2410             Returns the multiplier that fix point data is multiplied with.
2411 
2412 	        This function is only available when the ANN is in fixed point mode.
2413 
2414 	        The multiplier is the used to convert between floating point and fixed point notation.
2415 	        A floating point number is multiplied with the multiplier in order to get the fixed point
2416 	        number and visa versa.
2417 
2418 	        The multiplier is described in greater detail in the tutorial <Fixed Point Usage>.
2419 
2420 	        See also:
2421 		        <Fixed Point Usage>, <get_decimal_point>, <save_to_fixed>,
2422                 <training_data::save_train_to_fixed>, <fann_get_multiplier>
2423 
2424 	        This function appears in FANN >= 1.0.0.
2425         */
get_multiplier()2426         unsigned int get_multiplier()
2427         {
2428             if (ann == NULL)
2429             {
2430                 return 0;
2431             }
2432             return fann_get_multiplier(ann);
2433         }
2434 #endif /* FIXEDFANN */
2435 
2436         /*********************************************************************/
2437 
2438         /* Method: get_network_type
2439 
2440             Get the type of neural network it was created as.
2441 
2442 	        Returns:
2443                 The neural network type from enum <FANN::network_type_enum>
2444 
2445             See Also:
2446                 <fann_get_network_type>
2447 
2448            This function appears in FANN >= 2.1.0
2449         */
get_network_type()2450         network_type_enum get_network_type()
2451         {
2452             fann_nettype_enum network_type = FANN_NETTYPE_LAYER;
2453             if (ann != NULL)
2454             {
2455                 network_type = fann_get_network_type(ann);
2456             }
2457             return static_cast<network_type_enum>(network_type);
2458         }
2459 
2460         /* Method: get_connection_rate
2461 
2462             Get the connection rate used when the network was created
2463 
2464 	        Returns:
2465                 The connection rate
2466 
2467             See also:
2468                 <fann_get_connection_rate>
2469 
2470            This function appears in FANN >= 2.1.0
2471         */
get_connection_rate()2472         float get_connection_rate()
2473         {
2474             if (ann == NULL)
2475             {
2476                 return 0;
2477             }
2478             return fann_get_connection_rate(ann);
2479         }
2480 
2481         /* Method: get_num_layers
2482 
2483             Get the number of layers in the network
2484 
2485 	        Returns:
2486 		        The number of layers in the neural network
2487 
2488             See also:
2489                 <fann_get_num_layers>
2490 
2491            This function appears in FANN >= 2.1.0
2492         */
get_num_layers()2493         unsigned int get_num_layers()
2494         {
2495             if (ann == NULL)
2496             {
2497                 return 0;
2498             }
2499             return fann_get_num_layers(ann);
2500         }
2501 
2502         /* Method: get_layer_array
2503 
2504             Get the number of neurons in each layer in the network.
2505 
2506             Bias is not included so the layers match the create methods.
2507 
2508             The layers array must be preallocated to at least
2509             sizeof(unsigned int) * get_num_layers() long.
2510 
2511             See also:
2512                 <fann_get_layer_array>
2513 
2514            This function appears in FANN >= 2.1.0
2515         */
get_layer_array(unsigned int * layers)2516         void get_layer_array(unsigned int *layers)
2517         {
2518             if (ann != NULL)
2519             {
2520                 fann_get_layer_array(ann, layers);
2521             }
2522         }
2523 
2524         /* Method: get_bias_array
2525 
2526             Get the number of bias in each layer in the network.
2527 
2528             The bias array must be preallocated to at least
2529             sizeof(unsigned int) * get_num_layers() long.
2530 
2531             See also:
2532                 <fann_get_bias_array>
2533 
2534             This function appears in FANN >= 2.1.0
2535         */
get_bias_array(unsigned int * bias)2536         void get_bias_array(unsigned int *bias)
2537         {
2538             if (ann != NULL)
2539             {
2540                 fann_get_bias_array(ann, bias);
2541             }
2542         }
2543 
2544         /* Method: get_connection_array
2545 
2546             Get the connections in the network.
2547 
2548             The connections array must be preallocated to at least
2549             sizeof(struct fann_connection) * get_total_connections() long.
2550 
2551             See also:
2552                 <fann_get_connection_array>
2553 
2554            This function appears in FANN >= 2.1.0
2555         */
get_connection_array(connection * connections)2556         void get_connection_array(connection *connections)
2557         {
2558             if (ann != NULL)
2559             {
2560                 fann_get_connection_array(ann, connections);
2561             }
2562         }
2563 
2564         /* Method: set_weight_array
2565 
2566             Set connections in the network.
2567 
2568             Only the weights can be changed, connections and weights are ignored
2569             if they do not already exist in the network.
2570 
2571             The array must have sizeof(struct fann_connection) * num_connections size.
2572 
2573             See also:
2574                 <fann_set_weight_array>
2575 
2576            This function appears in FANN >= 2.1.0
2577         */
set_weight_array(connection * connections,unsigned int num_connections)2578         void set_weight_array(connection *connections, unsigned int num_connections)
2579         {
2580             if (ann != NULL)
2581             {
2582                 fann_set_weight_array(ann, connections, num_connections);
2583             }
2584         }
2585 
2586         /* Method: set_weight
2587 
2588             Set a connection in the network.
2589 
2590             Only the weights can be changed. The connection/weight is
2591             ignored if it does not already exist in the network.
2592 
2593             See also:
2594                 <fann_set_weight>
2595 
2596            This function appears in FANN >= 2.1.0
2597         */
set_weight(unsigned int from_neuron,unsigned int to_neuron,fann_type weight)2598         void set_weight(unsigned int from_neuron, unsigned int to_neuron, fann_type weight)
2599         {
2600             if (ann != NULL)
2601             {
2602                 fann_set_weight(ann, from_neuron, to_neuron, weight);
2603             }
2604         }
2605 
2606         /*********************************************************************/
2607 
2608         /* Method: get_learning_momentum
2609 
2610            Get the learning momentum.
2611 
2612            The learning momentum can be used to speed up FANN::TRAIN_INCREMENTAL training.
2613            A too high momentum will however not benefit training. Setting momentum to 0 will
2614            be the same as not using the momentum parameter. The recommended value of this parameter
2615            is between 0.0 and 1.0.
2616 
2617            The default momentum is 0.
2618 
2619            See also:
2620            <set_learning_momentum>, <set_training_algorithm>
2621 
2622            This function appears in FANN >= 2.0.0.
2623          */
get_learning_momentum()2624         float get_learning_momentum()
2625         {
2626             float learning_momentum = 0.0f;
2627             if (ann != NULL)
2628             {
2629                 learning_momentum = fann_get_learning_momentum(ann);
2630             }
2631             return learning_momentum;
2632         }
2633 
2634         /* Method: set_learning_momentum
2635 
2636            Set the learning momentum.
2637 
2638            More info available in <get_learning_momentum>
2639 
2640            This function appears in FANN >= 2.0.0.
2641          */
set_learning_momentum(float learning_momentum)2642         void set_learning_momentum(float learning_momentum)
2643         {
2644             if (ann != NULL)
2645             {
2646                 fann_set_learning_momentum(ann, learning_momentum);
2647             }
2648         }
2649 
2650         /* Method: get_train_stop_function
2651 
2652            Returns the the stop function used during training.
2653 
2654            The stop function is described further in <FANN::stop_function_enum>
2655 
2656            The default stop function is FANN::STOPFUNC_MSE
2657 
2658            See also:
2659    	        <get_train_stop_function>, <get_bit_fail_limit>
2660 
2661            This function appears in FANN >= 2.0.0.
2662          */
get_train_stop_function()2663         stop_function_enum get_train_stop_function()
2664         {
2665             enum fann_stopfunc_enum stopfunc = FANN_STOPFUNC_MSE;
2666             if (ann != NULL)
2667             {
2668                 stopfunc = fann_get_train_stop_function(ann);
2669             }
2670             return static_cast<stop_function_enum>(stopfunc);
2671         }
2672 
2673         /* Method: set_train_stop_function
2674 
2675            Set the stop function used during training.
2676 
2677            The stop function is described further in <FANN::stop_function_enum>
2678 
2679            See also:
2680    	        <get_train_stop_function>
2681 
2682            This function appears in FANN >= 2.0.0.
2683          */
set_train_stop_function(stop_function_enum train_stop_function)2684         void set_train_stop_function(stop_function_enum train_stop_function)
2685         {
2686             if (ann != NULL)
2687             {
2688                 fann_set_train_stop_function(ann,
2689                     static_cast<enum fann_stopfunc_enum>(train_stop_function));
2690             }
2691         }
2692 
2693         /* Method: get_bit_fail_limit
2694 
2695            Returns the bit fail limit used during training.
2696 
2697            The bit fail limit is used during training when the <FANN::stop_function_enum> is set to FANN_STOPFUNC_BIT.
2698 
2699            The limit is the maximum accepted difference between the desired output and the actual output during
2700            training. Each output that diverges more than this limit is counted as an error bit.
2701            This difference is divided by two when dealing with symmetric activation functions,
2702            so that symmetric and not symmetric activation functions can use the same limit.
2703 
2704            The default bit fail limit is 0.35.
2705 
2706            See also:
2707    	        <set_bit_fail_limit>
2708 
2709            This function appears in FANN >= 2.0.0.
2710          */
get_bit_fail_limit()2711         fann_type get_bit_fail_limit()
2712         {
2713             fann_type bit_fail_limit = 0.0f;
2714 
2715             if (ann != NULL)
2716             {
2717                 bit_fail_limit = fann_get_bit_fail_limit(ann);
2718             }
2719             return bit_fail_limit;
2720         }
2721 
2722         /* Method: set_bit_fail_limit
2723 
2724            Set the bit fail limit used during training.
2725 
2726            See also:
2727    	        <get_bit_fail_limit>
2728 
2729            This function appears in FANN >= 2.0.0.
2730          */
set_bit_fail_limit(fann_type bit_fail_limit)2731         void set_bit_fail_limit(fann_type bit_fail_limit)
2732         {
2733             if (ann != NULL)
2734             {
2735                 fann_set_bit_fail_limit(ann, bit_fail_limit);
2736             }
2737         }
2738 
2739         /* Method: get_bit_fail
2740 
2741 	        The number of fail bits; means the number of output neurons which differ more
2742 	        than the bit fail limit (see <get_bit_fail_limit>, <set_bit_fail_limit>).
2743 	        The bits are counted in all of the training data, so this number can be higher than
2744 	        the number of training data.
2745 
2746 	        This value is reset by <reset_MSE> and updated by all the same functions which also
2747 	        updates the MSE value (e.g. <test_data>, <train_epoch>)
2748 
2749 	        See also:
2750 		        <FANN::stop_function_enum>, <get_MSE>
2751 
2752 	        This function appears in FANN >= 2.0.0
2753         */
get_bit_fail()2754         unsigned int get_bit_fail()
2755         {
2756             unsigned int bit_fail = 0;
2757             if (ann != NULL)
2758             {
2759                 bit_fail = fann_get_bit_fail(ann);
2760             }
2761             return bit_fail;
2762         }
2763 
2764         /*********************************************************************/
2765 
2766         /* Method: cascadetrain_on_data
2767 
2768            Trains on an entire dataset, for a period of time using the Cascade2 training algorithm.
2769            This algorithm adds neurons to the neural network while training, which means that it
2770            needs to start with an ANN without any hidden layers. The neural network should also use
2771            shortcut connections, so <create_shortcut> should be used to create the ANN like this:
2772            >net.create_shortcut(2, train_data.num_input_train_data(), train_data.num_output_train_data());
2773 
2774            This training uses the parameters set using the set_cascade_..., but it also uses another
2775            training algorithm as it's internal training algorithm. This algorithm can be set to either
2776            FANN::TRAIN_RPROP or FANN::TRAIN_QUICKPROP by <set_training_algorithm>, and the parameters
2777            set for these training algorithms will also affect the cascade training.
2778 
2779            Parameters:
2780    		        data - The data, which should be used during training
2781    		        max_neuron - The maximum number of neurons to be added to neural network
2782    		        neurons_between_reports - The number of neurons between printing a status report to stdout.
2783    			        A value of zero means no reports should be printed.
2784    		        desired_error - The desired <fann_get_MSE> or <fann_get_bit_fail>, depending on which stop function
2785    			        is chosen by <fann_set_train_stop_function>.
2786 
2787 	        Instead of printing out reports every neurons_between_reports, a callback function can be called
2788 	        (see <set_callback>).
2789 
2790 	        See also:
2791 		        <train_on_data>, <cascadetrain_on_file>, <fann_cascadetrain_on_data>
2792 
2793 	        This function appears in FANN >= 2.0.0.
2794         */
cascadetrain_on_data(const training_data & data,unsigned int max_neurons,unsigned int neurons_between_reports,float desired_error)2795         void cascadetrain_on_data(const training_data &data, unsigned int max_neurons,
2796             unsigned int neurons_between_reports, float desired_error)
2797         {
2798             if ((ann != NULL) && (data.train_data != NULL))
2799             {
2800                 fann_cascadetrain_on_data(ann, data.train_data, max_neurons,
2801                     neurons_between_reports, desired_error);
2802             }
2803         }
2804 
2805         /* Method: cascadetrain_on_file
2806 
2807            Does the same as <cascadetrain_on_data>, but reads the training data directly from a file.
2808 
2809            See also:
2810    		        <fann_cascadetrain_on_data>, <fann_cascadetrain_on_file>
2811 
2812 	        This function appears in FANN >= 2.0.0.
2813         */
cascadetrain_on_file(const std::string & filename,unsigned int max_neurons,unsigned int neurons_between_reports,float desired_error)2814         void cascadetrain_on_file(const std::string &filename, unsigned int max_neurons,
2815             unsigned int neurons_between_reports, float desired_error)
2816         {
2817             if (ann != NULL)
2818             {
2819                 fann_cascadetrain_on_file(ann, filename.c_str(),
2820                     max_neurons, neurons_between_reports, desired_error);
2821             }
2822         }
2823 
2824         /* Method: get_cascade_output_change_fraction
2825 
2826            The cascade output change fraction is a number between 0 and 1 determining how large a fraction
2827            the <get_MSE> value should change within <get_cascade_output_stagnation_epochs> during
2828            training of the output connections, in order for the training not to stagnate. If the training
2829            stagnates, the training of the output connections will be ended and new candidates will be prepared.
2830 
2831            This means:
2832            If the MSE does not change by a fraction of <get_cascade_output_change_fraction> during a
2833            period of <get_cascade_output_stagnation_epochs>, the training of the output connections
2834            is stopped because the training has stagnated.
2835 
2836            If the cascade output change fraction is low, the output connections will be trained more and if the
2837            fraction is high they will be trained less.
2838 
2839            The default cascade output change fraction is 0.01, which is equalent to a 1% change in MSE.
2840 
2841            See also:
2842    		        <set_cascade_output_change_fraction>, <get_MSE>,
2843                 <get_cascade_output_stagnation_epochs>, <fann_get_cascade_output_change_fraction>
2844 
2845 	        This function appears in FANN >= 2.0.0.
2846          */
get_cascade_output_change_fraction()2847         float get_cascade_output_change_fraction()
2848         {
2849             float change_fraction = 0.0f;
2850             if (ann != NULL)
2851             {
2852                 change_fraction = fann_get_cascade_output_change_fraction(ann);
2853             }
2854             return change_fraction;
2855         }
2856 
2857         /* Method: set_cascade_output_change_fraction
2858 
2859            Sets the cascade output change fraction.
2860 
2861            See also:
2862    		        <get_cascade_output_change_fraction>, <fann_set_cascade_output_change_fraction>
2863 
2864 	        This function appears in FANN >= 2.0.0.
2865          */
set_cascade_output_change_fraction(float cascade_output_change_fraction)2866         void set_cascade_output_change_fraction(float cascade_output_change_fraction)
2867         {
2868             if (ann != NULL)
2869             {
2870                 fann_set_cascade_output_change_fraction(ann, cascade_output_change_fraction);
2871             }
2872         }
2873 
2874         /* Method: get_cascade_output_stagnation_epochs
2875 
2876            The number of cascade output stagnation epochs determines the number of epochs training is allowed to
2877            continue without changing the MSE by a fraction of <get_cascade_output_change_fraction>.
2878 
2879            See more info about this parameter in <get_cascade_output_change_fraction>.
2880 
2881            The default number of cascade output stagnation epochs is 12.
2882 
2883            See also:
2884    		        <set_cascade_output_stagnation_epochs>, <get_cascade_output_change_fraction>,
2885                 <fann_get_cascade_output_stagnation_epochs>
2886 
2887 	        This function appears in FANN >= 2.0.0.
2888          */
get_cascade_output_stagnation_epochs()2889         unsigned int get_cascade_output_stagnation_epochs()
2890         {
2891             unsigned int stagnation_epochs = 0;
2892             if (ann != NULL)
2893             {
2894                 stagnation_epochs = fann_get_cascade_output_stagnation_epochs(ann);
2895             }
2896             return stagnation_epochs;
2897         }
2898 
2899         /* Method: set_cascade_output_stagnation_epochs
2900 
2901            Sets the number of cascade output stagnation epochs.
2902 
2903            See also:
2904    		        <get_cascade_output_stagnation_epochs>, <fann_set_cascade_output_stagnation_epochs>
2905 
2906 	        This function appears in FANN >= 2.0.0.
2907          */
set_cascade_output_stagnation_epochs(unsigned int cascade_output_stagnation_epochs)2908         void set_cascade_output_stagnation_epochs(unsigned int cascade_output_stagnation_epochs)
2909         {
2910             if (ann != NULL)
2911             {
2912                 fann_set_cascade_output_stagnation_epochs(ann, cascade_output_stagnation_epochs);
2913             }
2914         }
2915 
2916         /* Method: get_cascade_candidate_change_fraction
2917 
2918            The cascade candidate change fraction is a number between 0 and 1 determining how large a fraction
2919            the <get_MSE> value should change within <get_cascade_candidate_stagnation_epochs> during
2920            training of the candidate neurons, in order for the training not to stagnate. If the training
2921            stagnates, the training of the candidate neurons will be ended and the best candidate will be selected.
2922 
2923            This means:
2924            If the MSE does not change by a fraction of <get_cascade_candidate_change_fraction> during a
2925            period of <get_cascade_candidate_stagnation_epochs>, the training of the candidate neurons
2926            is stopped because the training has stagnated.
2927 
2928            If the cascade candidate change fraction is low, the candidate neurons will be trained more and if the
2929            fraction is high they will be trained less.
2930 
2931            The default cascade candidate change fraction is 0.01, which is equalent to a 1% change in MSE.
2932 
2933            See also:
2934    		        <set_cascade_candidate_change_fraction>, <get_MSE>,
2935                 <get_cascade_candidate_stagnation_epochs>, <fann_get_cascade_candidate_change_fraction>
2936 
2937 	        This function appears in FANN >= 2.0.0.
2938          */
get_cascade_candidate_change_fraction()2939         float get_cascade_candidate_change_fraction()
2940         {
2941             float change_fraction = 0.0f;
2942             if (ann != NULL)
2943             {
2944                 change_fraction = fann_get_cascade_candidate_change_fraction(ann);
2945             }
2946             return change_fraction;
2947         }
2948 
2949         /* Method: set_cascade_candidate_change_fraction
2950 
2951            Sets the cascade candidate change fraction.
2952 
2953            See also:
2954    		        <get_cascade_candidate_change_fraction>,
2955                 <fann_set_cascade_candidate_change_fraction>
2956 
2957 	        This function appears in FANN >= 2.0.0.
2958          */
set_cascade_candidate_change_fraction(float cascade_candidate_change_fraction)2959         void set_cascade_candidate_change_fraction(float cascade_candidate_change_fraction)
2960         {
2961             if (ann != NULL)
2962             {
2963                 fann_set_cascade_candidate_change_fraction(ann, cascade_candidate_change_fraction);
2964             }
2965         }
2966 
2967         /* Method: get_cascade_candidate_stagnation_epochs
2968 
2969            The number of cascade candidate stagnation epochs determines the number of epochs training is allowed to
2970            continue without changing the MSE by a fraction of <get_cascade_candidate_change_fraction>.
2971 
2972            See more info about this parameter in <get_cascade_candidate_change_fraction>.
2973 
2974            The default number of cascade candidate stagnation epochs is 12.
2975 
2976            See also:
2977    		        <set_cascade_candidate_stagnation_epochs>, <get_cascade_candidate_change_fraction>,
2978                 <fann_get_cascade_candidate_stagnation_epochs>
2979 
2980 	        This function appears in FANN >= 2.0.0.
2981          */
get_cascade_candidate_stagnation_epochs()2982         unsigned int get_cascade_candidate_stagnation_epochs()
2983         {
2984             unsigned int stagnation_epochs = 0;
2985             if (ann != NULL)
2986             {
2987                 stagnation_epochs = fann_get_cascade_candidate_stagnation_epochs(ann);
2988             }
2989             return stagnation_epochs;
2990         }
2991 
2992         /* Method: set_cascade_candidate_stagnation_epochs
2993 
2994            Sets the number of cascade candidate stagnation epochs.
2995 
2996            See also:
2997    		        <get_cascade_candidate_stagnation_epochs>,
2998                 <fann_set_cascade_candidate_stagnation_epochs>
2999 
3000 	        This function appears in FANN >= 2.0.0.
3001          */
set_cascade_candidate_stagnation_epochs(unsigned int cascade_candidate_stagnation_epochs)3002         void set_cascade_candidate_stagnation_epochs(unsigned int cascade_candidate_stagnation_epochs)
3003         {
3004             if (ann != NULL)
3005             {
3006                 fann_set_cascade_candidate_stagnation_epochs(ann, cascade_candidate_stagnation_epochs);
3007             }
3008         }
3009 
3010         /* Method: get_cascade_weight_multiplier
3011 
3012            The weight multiplier is a parameter which is used to multiply the weights from the candidate neuron
3013            before adding the neuron to the neural network. This parameter is usually between 0 and 1, and is used
3014            to make the training a bit less aggressive.
3015 
3016            The default weight multiplier is 0.4
3017 
3018            See also:
3019    		        <set_cascade_weight_multiplier>, <fann_get_cascade_weight_multiplier>
3020 
3021 	        This function appears in FANN >= 2.0.0.
3022          */
get_cascade_weight_multiplier()3023         fann_type get_cascade_weight_multiplier()
3024         {
3025             fann_type weight_multiplier = 0;
3026             if (ann != NULL)
3027             {
3028                 weight_multiplier = fann_get_cascade_weight_multiplier(ann);
3029             }
3030             return weight_multiplier;
3031         }
3032 
3033         /* Method: set_cascade_weight_multiplier
3034 
3035            Sets the weight multiplier.
3036 
3037            See also:
3038    		        <get_cascade_weight_multiplier>, <fann_set_cascade_weight_multiplier>
3039 
3040 	        This function appears in FANN >= 2.0.0.
3041          */
set_cascade_weight_multiplier(fann_type cascade_weight_multiplier)3042         void set_cascade_weight_multiplier(fann_type cascade_weight_multiplier)
3043         {
3044             if (ann != NULL)
3045             {
3046                 fann_set_cascade_weight_multiplier(ann, cascade_weight_multiplier);
3047             }
3048         }
3049 
3050         /* Method: get_cascade_candidate_limit
3051 
3052            The candidate limit is a limit for how much the candidate neuron may be trained.
3053            The limit is a limit on the proportion between the MSE and candidate score.
3054 
3055            Set this to a lower value to avoid overfitting and to a higher if overfitting is
3056            not a problem.
3057 
3058            The default candidate limit is 1000.0
3059 
3060            See also:
3061    		        <set_cascade_candidate_limit>, <fann_get_cascade_candidate_limit>
3062 
3063 	        This function appears in FANN >= 2.0.0.
3064          */
get_cascade_candidate_limit()3065         fann_type get_cascade_candidate_limit()
3066         {
3067             fann_type candidate_limit = 0;
3068             if (ann != NULL)
3069             {
3070                 candidate_limit = fann_get_cascade_candidate_limit(ann);
3071             }
3072             return candidate_limit;
3073         }
3074 
3075         /* Method: set_cascade_candidate_limit
3076 
3077            Sets the candidate limit.
3078 
3079            See also:
3080    		        <get_cascade_candidate_limit>, <fann_set_cascade_candidate_limit>
3081 
3082 	        This function appears in FANN >= 2.0.0.
3083          */
set_cascade_candidate_limit(fann_type cascade_candidate_limit)3084         void set_cascade_candidate_limit(fann_type cascade_candidate_limit)
3085         {
3086             if (ann != NULL)
3087             {
3088                 fann_set_cascade_candidate_limit(ann, cascade_candidate_limit);
3089             }
3090         }
3091 
3092         /* Method: get_cascade_max_out_epochs
3093 
3094            The maximum out epochs determines the maximum number of epochs the output connections
3095            may be trained after adding a new candidate neuron.
3096 
3097            The default max out epochs is 150
3098 
3099            See also:
3100    		        <set_cascade_max_out_epochs>, <fann_get_cascade_max_out_epochs>
3101 
3102 	        This function appears in FANN >= 2.0.0.
3103          */
get_cascade_max_out_epochs()3104         unsigned int get_cascade_max_out_epochs()
3105         {
3106             unsigned int max_out_epochs = 0;
3107             if (ann != NULL)
3108             {
3109                 max_out_epochs = fann_get_cascade_max_out_epochs(ann);
3110             }
3111             return max_out_epochs;
3112         }
3113 
3114         /* Method: set_cascade_max_out_epochs
3115 
3116            Sets the maximum out epochs.
3117 
3118            See also:
3119    		        <get_cascade_max_out_epochs>, <fann_set_cascade_max_out_epochs>
3120 
3121 	        This function appears in FANN >= 2.0.0.
3122          */
set_cascade_max_out_epochs(unsigned int cascade_max_out_epochs)3123         void set_cascade_max_out_epochs(unsigned int cascade_max_out_epochs)
3124         {
3125             if (ann != NULL)
3126             {
3127                 fann_set_cascade_max_out_epochs(ann, cascade_max_out_epochs);
3128             }
3129         }
3130 
3131         /* Method: get_cascade_max_cand_epochs
3132 
3133            The maximum candidate epochs determines the maximum number of epochs the input
3134            connections to the candidates may be trained before adding a new candidate neuron.
3135 
3136            The default max candidate epochs is 150
3137 
3138            See also:
3139    		        <set_cascade_max_cand_epochs>, <fann_get_cascade_max_cand_epochs>
3140 
3141 	        This function appears in FANN >= 2.0.0.
3142          */
get_cascade_max_cand_epochs()3143         unsigned int get_cascade_max_cand_epochs()
3144         {
3145             unsigned int max_cand_epochs = 0;
3146             if (ann != NULL)
3147             {
3148                 max_cand_epochs = fann_get_cascade_max_cand_epochs(ann);
3149             }
3150             return max_cand_epochs;
3151         }
3152 
3153         /* Method: set_cascade_max_cand_epochs
3154 
3155            Sets the max candidate epochs.
3156 
3157            See also:
3158    		        <get_cascade_max_cand_epochs>, <fann_set_cascade_max_cand_epochs>
3159 
3160 	        This function appears in FANN >= 2.0.0.
3161          */
set_cascade_max_cand_epochs(unsigned int cascade_max_cand_epochs)3162         void set_cascade_max_cand_epochs(unsigned int cascade_max_cand_epochs)
3163         {
3164             if (ann != NULL)
3165             {
3166                 fann_set_cascade_max_cand_epochs(ann, cascade_max_cand_epochs);
3167             }
3168         }
3169 
3170         /* Method: get_cascade_num_candidates
3171 
3172            The number of candidates used during training (calculated by multiplying <get_cascade_activation_functions_count>,
3173            <get_cascade_activation_steepnesses_count> and <get_cascade_num_candidate_groups>).
3174 
3175            The actual candidates is defined by the <get_cascade_activation_functions> and
3176            <get_cascade_activation_steepnesses> arrays. These arrays define the activation functions
3177            and activation steepnesses used for the candidate neurons. If there are 2 activation functions
3178            in the activation function array and 3 steepnesses in the steepness array, then there will be
3179            2x3=6 different candidates which will be trained. These 6 different candidates can be copied into
3180            several candidate groups, where the only difference between these groups is the initial weights.
3181            If the number of groups is set to 2, then the number of candidate neurons will be 2x3x2=12. The
3182            number of candidate groups is defined by <set_cascade_num_candidate_groups>.
3183 
3184            The default number of candidates is 6x4x2 = 48
3185 
3186            See also:
3187    		        <get_cascade_activation_functions>, <get_cascade_activation_functions_count>,
3188    		        <get_cascade_activation_steepnesses>, <get_cascade_activation_steepnesses_count>,
3189    		        <get_cascade_num_candidate_groups>, <fann_get_cascade_num_candidates>
3190 
3191 	        This function appears in FANN >= 2.0.0.
3192          */
get_cascade_num_candidates()3193         unsigned int get_cascade_num_candidates()
3194         {
3195             unsigned int num_candidates = 0;
3196             if (ann != NULL)
3197             {
3198                 num_candidates = fann_get_cascade_num_candidates(ann);
3199             }
3200             return num_candidates;
3201         }
3202 
3203         /* Method: get_cascade_activation_functions_count
3204 
3205            The number of activation functions in the <get_cascade_activation_functions> array.
3206 
3207            The default number of activation functions is 6.
3208 
3209            See also:
3210    		        <get_cascade_activation_functions>, <set_cascade_activation_functions>,
3211                 <fann_get_cascade_activation_functions_count>
3212 
3213 	        This function appears in FANN >= 2.0.0.
3214          */
get_cascade_activation_functions_count()3215         unsigned int get_cascade_activation_functions_count()
3216         {
3217             unsigned int activation_functions_count = 0;
3218             if (ann != NULL)
3219             {
3220                 activation_functions_count = fann_get_cascade_activation_functions_count(ann);
3221             }
3222             return activation_functions_count;
3223         }
3224 
3225         /* Method: get_cascade_activation_functions
3226 
3227            The cascade activation functions array is an array of the different activation functions used by
3228            the candidates.
3229 
3230            See <get_cascade_num_candidates> for a description of which candidate neurons will be
3231            generated by this array.
3232 
3233            See also:
3234    		        <get_cascade_activation_functions_count>, <set_cascade_activation_functions>,
3235    		        <FANN::activation_function_enum>
3236 
3237 	        This function appears in FANN >= 2.0.0.
3238          */
get_cascade_activation_functions()3239         activation_function_enum * get_cascade_activation_functions()
3240         {
3241             enum fann_activationfunc_enum *activation_functions = NULL;
3242             if (ann != NULL)
3243             {
3244                 activation_functions = fann_get_cascade_activation_functions(ann);
3245             }
3246             return reinterpret_cast<activation_function_enum *>(activation_functions);
3247         }
3248 
3249         /* Method: set_cascade_activation_functions
3250 
3251            Sets the array of cascade candidate activation functions. The array must be just as long
3252            as defined by the count.
3253 
3254            See <get_cascade_num_candidates> for a description of which candidate neurons will be
3255            generated by this array.
3256 
3257            See also:
3258    		        <get_cascade_activation_steepnesses_count>, <get_cascade_activation_steepnesses>,
3259                 <fann_set_cascade_activation_functions>
3260 
3261 	        This function appears in FANN >= 2.0.0.
3262          */
set_cascade_activation_functions(activation_function_enum * cascade_activation_functions,unsigned int cascade_activation_functions_count)3263         void set_cascade_activation_functions(activation_function_enum *cascade_activation_functions,
3264             unsigned int cascade_activation_functions_count)
3265         {
3266             if (ann != NULL)
3267             {
3268                 fann_set_cascade_activation_functions(ann,
3269                     reinterpret_cast<enum fann_activationfunc_enum *>(cascade_activation_functions),
3270                     cascade_activation_functions_count);
3271             }
3272         }
3273 
3274         /* Method: get_cascade_activation_steepnesses_count
3275 
3276            The number of activation steepnesses in the <get_cascade_activation_functions> array.
3277 
3278            The default number of activation steepnesses is 4.
3279 
3280            See also:
3281    		        <get_cascade_activation_steepnesses>, <set_cascade_activation_functions>,
3282                 <fann_get_cascade_activation_steepnesses_count>
3283 
3284 	        This function appears in FANN >= 2.0.0.
3285          */
get_cascade_activation_steepnesses_count()3286         unsigned int get_cascade_activation_steepnesses_count()
3287         {
3288             unsigned int activation_steepness_count = 0;
3289             if (ann != NULL)
3290             {
3291                 activation_steepness_count = fann_get_cascade_activation_steepnesses_count(ann);
3292             }
3293             return activation_steepness_count;
3294         }
3295 
3296         /* Method: get_cascade_activation_steepnesses
3297 
3298            The cascade activation steepnesses array is an array of the different activation functions used by
3299            the candidates.
3300 
3301            See <get_cascade_num_candidates> for a description of which candidate neurons will be
3302            generated by this array.
3303 
3304            The default activation steepnesses is {0.25, 0.50, 0.75, 1.00}
3305 
3306            See also:
3307    		        <set_cascade_activation_steepnesses>, <get_cascade_activation_steepnesses_count>,
3308                 <fann_get_cascade_activation_steepnesses>
3309 
3310 	        This function appears in FANN >= 2.0.0.
3311          */
get_cascade_activation_steepnesses()3312         fann_type *get_cascade_activation_steepnesses()
3313         {
3314             fann_type *activation_steepnesses = NULL;
3315             if (ann != NULL)
3316             {
3317                 activation_steepnesses = fann_get_cascade_activation_steepnesses(ann);
3318             }
3319             return activation_steepnesses;
3320         }
3321 
3322         /* Method: set_cascade_activation_steepnesses
3323 
3324            Sets the array of cascade candidate activation steepnesses. The array must be just as long
3325            as defined by the count.
3326 
3327            See <get_cascade_num_candidates> for a description of which candidate neurons will be
3328            generated by this array.
3329 
3330            See also:
3331    		        <get_cascade_activation_steepnesses>, <get_cascade_activation_steepnesses_count>,
3332                 <fann_set_cascade_activation_steepnesses>
3333 
3334 	        This function appears in FANN >= 2.0.0.
3335          */
set_cascade_activation_steepnesses(fann_type * cascade_activation_steepnesses,unsigned int cascade_activation_steepnesses_count)3336         void set_cascade_activation_steepnesses(fann_type *cascade_activation_steepnesses,
3337             unsigned int cascade_activation_steepnesses_count)
3338         {
3339             if (ann != NULL)
3340             {
3341                 fann_set_cascade_activation_steepnesses(ann,
3342                     cascade_activation_steepnesses, cascade_activation_steepnesses_count);
3343             }
3344         }
3345 
3346         /* Method: get_cascade_num_candidate_groups
3347 
3348            The number of candidate groups is the number of groups of identical candidates which will be used
3349            during training.
3350 
3351            This number can be used to have more candidates without having to define new parameters for the candidates.
3352 
3353            See <get_cascade_num_candidates> for a description of which candidate neurons will be
3354            generated by this parameter.
3355 
3356            The default number of candidate groups is 2
3357 
3358            See also:
3359    		        <set_cascade_num_candidate_groups>, <fann_get_cascade_num_candidate_groups>
3360 
3361 	        This function appears in FANN >= 2.0.0.
3362          */
get_cascade_num_candidate_groups()3363         unsigned int get_cascade_num_candidate_groups()
3364         {
3365             unsigned int num_candidate_groups = 0;
3366             if (ann != NULL)
3367             {
3368                 num_candidate_groups = fann_get_cascade_num_candidate_groups(ann);
3369             }
3370             return num_candidate_groups;
3371         }
3372 
3373         /* Method: set_cascade_num_candidate_groups
3374 
3375            Sets the number of candidate groups.
3376 
3377            See also:
3378    		        <get_cascade_num_candidate_groups>, <fann_set_cascade_num_candidate_groups>
3379 
3380 	        This function appears in FANN >= 2.0.0.
3381          */
set_cascade_num_candidate_groups(unsigned int cascade_num_candidate_groups)3382         void set_cascade_num_candidate_groups(unsigned int cascade_num_candidate_groups)
3383         {
3384             if (ann != NULL)
3385             {
3386                 fann_set_cascade_num_candidate_groups(ann, cascade_num_candidate_groups);
3387             }
3388         }
3389 
3390         /*********************************************************************/
3391 
3392 #ifndef FIXEDFANN
3393         /* Method: scale_train
3394 
3395            Scale input and output data based on previously calculated parameters.
3396 
3397            See also:
3398    		        <descale_train>, <fann_scale_train>
3399 
3400 	        This function appears in FANN >= 2.1.0.
3401          */
scale_train(training_data & data)3402         void scale_train(training_data &data)
3403         {
3404             if (ann != NULL)
3405             {
3406                 fann_scale_train(ann, data.train_data);
3407             }
3408         }
3409 
3410         /* Method: descale_train
3411 
3412            Descale input and output data based on previously calculated parameters.
3413 
3414            See also:
3415    		        <scale_train>, <fann_descale_train>
3416 
3417 	        This function appears in FANN >= 2.1.0.
3418          */
descale_train(training_data & data)3419         void descale_train(training_data &data)
3420         {
3421             if (ann != NULL)
3422             {
3423                 fann_descale_train(ann, data.train_data);
3424             }
3425         }
3426 
3427         /* Method: set_input_scaling_params
3428 
3429            Calculate scaling parameters for future use based on training data.
3430 
3431            See also:
3432    		        <set_output_scaling_params>, <fann_set_input_scaling_params>
3433 
3434 	        This function appears in FANN >= 2.1.0.
3435          */
set_input_scaling_params(const training_data & data,float new_input_min,float new_input_max)3436         bool set_input_scaling_params(const training_data &data, float new_input_min, float new_input_max)
3437         {
3438             bool status = false;
3439             if (ann != NULL)
3440             {
3441                 status = (fann_set_input_scaling_params(ann, data.train_data, new_input_min, new_input_max) != -1);
3442             }
3443             return status;
3444         }
3445 
3446         /* Method: set_output_scaling_params
3447 
3448            Calculate scaling parameters for future use based on training data.
3449 
3450            See also:
3451    		        <set_input_scaling_params>, <fann_set_output_scaling_params>
3452 
3453 	        This function appears in FANN >= 2.1.0.
3454          */
set_output_scaling_params(const training_data & data,float new_output_min,float new_output_max)3455         bool set_output_scaling_params(const training_data &data, float new_output_min, float new_output_max)
3456         {
3457             bool status = false;
3458             if (ann != NULL)
3459             {
3460                 status = (fann_set_output_scaling_params(ann, data.train_data, new_output_min, new_output_max) != -1);
3461             }
3462             return status;
3463         }
3464 
3465         /* Method: set_scaling_params
3466 
3467            Calculate scaling parameters for future use based on training data.
3468 
3469            See also:
3470    		        <clear_scaling_params>, <fann_set_scaling_params>
3471 
3472 	        This function appears in FANN >= 2.1.0.
3473          */
set_scaling_params(const training_data & data,float new_input_min,float new_input_max,float new_output_min,float new_output_max)3474         bool set_scaling_params(const training_data &data,
3475 	        float new_input_min, float new_input_max, float new_output_min, float new_output_max)
3476         {
3477             bool status = false;
3478             if (ann != NULL)
3479             {
3480                 status = (fann_set_scaling_params(ann, data.train_data,
3481                     new_input_min, new_input_max, new_output_min, new_output_max) != -1);
3482             }
3483             return status;
3484         }
3485 
3486         /* Method: clear_scaling_params
3487 
3488            Clears scaling parameters.
3489 
3490            See also:
3491    		        <set_scaling_params>, <fann_clear_scaling_params>
3492 
3493 	        This function appears in FANN >= 2.1.0.
3494          */
clear_scaling_params()3495         bool clear_scaling_params()
3496         {
3497             bool status = false;
3498             if (ann != NULL)
3499             {
3500                 status = (fann_clear_scaling_params(ann) != -1);
3501             }
3502             return status;
3503         }
3504 
3505         /* Method: scale_input
3506 
3507            Scale data in input vector before feed it to ann based on previously calculated parameters.
3508 
3509            See also:
3510    		        <descale_input>, <scale_output>, <fann_scale_input>
3511 
3512 	        This function appears in FANN >= 2.1.0.
3513          */
scale_input(fann_type * input_vector)3514         void scale_input(fann_type *input_vector)
3515         {
3516             if (ann != NULL)
3517             {
3518                 fann_scale_input(ann, input_vector );
3519             }
3520         }
3521 
3522         /* Method: scale_output
3523 
3524            Scale data in output vector before feed it to ann based on previously calculated parameters.
3525 
3526            See also:
3527    		        <descale_output>, <scale_input>, <fann_scale_output>
3528 
3529 	        This function appears in FANN >= 2.1.0.
3530          */
scale_output(fann_type * output_vector)3531         void scale_output(fann_type *output_vector)
3532         {
3533             if (ann != NULL)
3534             {
3535                 fann_scale_output(ann, output_vector );
3536             }
3537         }
3538 
3539         /* Method: descale_input
3540 
3541            Scale data in input vector after get it from ann based on previously calculated parameters.
3542 
3543            See also:
3544    		        <scale_input>, <descale_output>, <fann_descale_input>
3545 
3546 	        This function appears in FANN >= 2.1.0.
3547          */
descale_input(fann_type * input_vector)3548         void descale_input(fann_type *input_vector)
3549         {
3550             if (ann != NULL)
3551             {
3552                 fann_descale_input(ann, input_vector );
3553             }
3554         }
3555 
3556         /* Method: descale_output
3557 
3558            Scale data in output vector after get it from ann based on previously calculated parameters.
3559 
3560            See also:
3561    		        <scale_output>, <descale_input>, <fann_descale_output>
3562 
3563 	        This function appears in FANN >= 2.1.0.
3564          */
descale_output(fann_type * output_vector)3565         void descale_output(fann_type *output_vector)
3566         {
3567             if (ann != NULL)
3568             {
3569                 fann_descale_output(ann, output_vector );
3570             }
3571         }
3572 
3573 #endif /* FIXEDFANN */
3574 
3575         /*********************************************************************/
3576 
3577         /* Method: set_error_log
3578 
3579            Change where errors are logged to.
3580 
3581            If log_file is NULL, no errors will be printed.
3582 
3583            If neural_net is empty i.e. ann is NULL, the default log will be set.
3584            The default log is the log used when creating a neural_net.
3585            This default log will also be the default for all new structs
3586            that are created.
3587 
3588            The default behavior is to log them to stderr.
3589 
3590            See also:
3591                 <struct fann_error>, <fann_set_error_log>
3592 
3593            This function appears in FANN >= 1.1.0.
3594          */
set_error_log(FILE * log_file)3595         void set_error_log(FILE *log_file)
3596         {
3597             fann_set_error_log(reinterpret_cast<struct fann_error *>(ann), log_file);
3598         }
3599 
3600         /* Method: get_errno
3601 
3602            Returns the last error number.
3603 
3604            See also:
3605             <fann_errno_enum>, <fann_reset_errno>, <fann_get_errno>
3606 
3607            This function appears in FANN >= 1.1.0.
3608          */
get_errno()3609         unsigned int get_errno()
3610         {
3611             return fann_get_errno(reinterpret_cast<struct fann_error *>(ann));
3612         }
3613 
3614         /* Method: reset_errno
3615 
3616            Resets the last error number.
3617 
3618            This function appears in FANN >= 1.1.0.
3619          */
reset_errno()3620         void reset_errno()
3621         {
3622             fann_reset_errno(reinterpret_cast<struct fann_error *>(ann));
3623         }
3624 
3625         /* Method: reset_errstr
3626 
3627            Resets the last error string.
3628 
3629            This function appears in FANN >= 1.1.0.
3630          */
reset_errstr()3631         void reset_errstr()
3632         {
3633             fann_reset_errstr(reinterpret_cast<struct fann_error *>(ann));
3634         }
3635 
3636         /* Method: get_errstr
3637 
3638            Returns the last errstr.
3639 
3640            This function calls <fann_reset_errno> and <fann_reset_errstr>
3641 
3642            This function appears in FANN >= 1.1.0.
3643          */
get_errstr()3644         std::string get_errstr()
3645         {
3646             return std::string(fann_get_errstr(reinterpret_cast<struct fann_error *>(ann)));
3647         }
3648 
3649         /* Method: print_error
3650 
3651            Prints the last error to stderr.
3652 
3653            This function appears in FANN >= 1.1.0.
3654          */
print_error()3655         void print_error()
3656         {
3657             fann_print_error(reinterpret_cast<struct fann_error *>(ann));
3658         }
3659 
3660         /*********************************************************************/
3661 
3662     private:
3663         // Structure used by set_callback to hold information about a user callback
3664         typedef struct user_context_type
3665         {
3666             callback_type user_callback; // Pointer to user callback function
3667             void *user_data; // Arbitrary data pointer passed to the callback
3668             neural_net *net; // This pointer for the neural network
3669         } user_context;
3670 
3671         // Internal callback used to convert from pointers to class references
internal_callback(struct fann * ann,struct fann_train_data * train,unsigned int max_epochs,unsigned int epochs_between_reports,float desired_error,unsigned int epochs)3672         static int FANN_API internal_callback(struct fann *ann, struct fann_train_data *train,
3673             unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error, unsigned int epochs)
3674         {
3675             user_context *user_data = static_cast<user_context *>(fann_get_user_data(ann));
3676             if (user_data != NULL)
3677             {
3678                 FANN::training_data data;
3679                 data.train_data = train;
3680 
3681                 int result = (*user_data->user_callback)(*user_data->net,
3682                     data, max_epochs, epochs_between_reports, desired_error, epochs, user_data);
3683 
3684                 data.train_data = NULL; // Prevent automatic cleanup
3685                 return result;
3686             }
3687             else
3688             {
3689                 return -1; // This should not occur except if out of memory
3690             }
3691         }
3692     protected:
3693         // Pointer the encapsulated fann neural net structure
3694         struct fann *ann;
3695     };
3696 
3697     /*************************************************************************/
3698 }
3699 
3700 #endif /* FANN_CPP_H_INCLUDED */
3701