1 //   OpenNN: Open Neural Networks Library
2 //   www.opennn.net
3 //
4 //   P E R C E P T R O N   L A Y E R   C L A S S
5 //
6 //   Artificial Intelligence Techniques SL
7 //   artelnics@artelnics.com
8 
9 #include "perceptron_layer.h"
10 
11 namespace OpenNN
12 {
13 
14 /// Default constructor.
15 /// It creates a empty layer object, with no perceptrons.
16 /// This constructor also initializes the rest of class members to their default values.
17 
PerceptronLayer()18 PerceptronLayer::PerceptronLayer() : Layer()
19 {
20     set();
21 
22     layer_type = Perceptron;
23 }
24 
25 
26 /// Layer architecture constructor.
27 /// It creates a layer object with given numbers of inputs and perceptrons.
28 /// The parameters are initialized at random.
29 /// This constructor also initializes the rest of class members to their default values.
30 /// @param new_inputs_number Number of inputs in the layer.
31 /// @param new_neurons_number Number of perceptrons in the layer.
32 
PerceptronLayer(const Index & new_inputs_number,const Index & new_neurons_number,const Index & layer_number,const PerceptronLayer::ActivationFunction & new_activation_function)33 PerceptronLayer::PerceptronLayer(const Index& new_inputs_number, const Index& new_neurons_number,
34                                  const Index& layer_number, const PerceptronLayer::ActivationFunction& new_activation_function) : Layer()
35 {
36     set(new_inputs_number, new_neurons_number, new_activation_function);
37 
38     layer_type = Perceptron;
39 
40     layer_name = "perceptron_layer_" + to_string(layer_number);
41 
42 }
43 
44 
45 /// Destructor.
46 /// This destructor does not delete any pointer.
47 
~PerceptronLayer()48 PerceptronLayer::~PerceptronLayer()
49 {
50 }
51 
52 
53 /// Returns the number of inputs to the layer.
54 
get_inputs_number() const55 Index PerceptronLayer::get_inputs_number() const
56 {
57     return synaptic_weights.dimension(0);
58 }
59 
60 
61 /// Returns the number of neurons in the layer.
62 
get_neurons_number() const63 Index PerceptronLayer::get_neurons_number() const
64 {
65     return biases.size();
66 }
67 
68 
get_biases_number() const69 Index PerceptronLayer::get_biases_number() const
70 {
71     return biases.size();
72 }
73 
74 
75 /// Returns the number of layer's synaptic weights
76 
get_synaptic_weights_number() const77 Index PerceptronLayer::get_synaptic_weights_number() const
78 {
79     return synaptic_weights.size();
80 }
81 
82 /// Returns the number of parameters(biases and synaptic weights) of the layer.
83 
get_parameters_number() const84 Index PerceptronLayer::get_parameters_number() const
85 {
86     return biases.size() + synaptic_weights.size();
87 }
88 
89 
90 /// Returns the biases from all the perceptrons in the layer.
91 /// The format is a vector of real values.
92 /// The size of this vector is the number of neurons in the layer.
93 
get_biases() const94 const Tensor<type, 2>& PerceptronLayer::get_biases() const
95 {
96     return biases;
97 }
98 
99 
100 /// Returns the synaptic weights from the perceptrons.
101 /// The format is a matrix of real values.
102 /// The number of rows is the number of neurons in the layer.
103 /// The number of columns is the number of inputs to the layer.
104 
get_synaptic_weights() const105 const Tensor<type, 2>& PerceptronLayer::get_synaptic_weights() const
106 {
107     return synaptic_weights;
108 }
109 
110 
get_synaptic_weights(const Tensor<type,1> & parameters) const111 Tensor<type, 2> PerceptronLayer::get_synaptic_weights(const Tensor<type, 1>& parameters) const
112 {
113     const Index inputs_number = get_inputs_number();
114 
115     const Index neurons_number = get_neurons_number();
116 
117     const Index synaptic_weights_number = get_synaptic_weights_number();
118 
119     const Index parameters_size = parameters.size();
120 
121     const Index start_synaptic_weights_number = (parameters_size - synaptic_weights_number);
122 
123     Tensor<type, 1> new_synaptic_weights = parameters.slice(Eigen::array<Eigen::Index, 1>({start_synaptic_weights_number}), Eigen::array<Eigen::Index, 1>({synaptic_weights_number}));
124 
125     Eigen::array<Index, 2> two_dim{{inputs_number, neurons_number}};
126 
127     return new_synaptic_weights.reshape(two_dim);
128 
129 }
130 
131 
get_biases(const Tensor<type,1> & parameters) const132 Tensor<type, 2> PerceptronLayer::get_biases(const Tensor<type, 1>& parameters) const
133 {
134     const Index biases_number = biases.size();
135 
136     Tensor<type,1> new_biases(biases_number);
137 
138     new_biases = parameters.slice(Eigen::array<Eigen::Index, 1>({0}), Eigen::array<Eigen::Index, 1>({biases_number}));
139 
140     Eigen::array<Index, 2> two_dim{{1, biases.dimension(1)}};
141 
142     return new_biases.reshape(two_dim);
143 
144 }
145 
146 
147 /// Returns a single vector with all the layer parameters.
148 /// The format is a vector of real values.
149 /// The size is the number of parameters in the layer.
150 
get_parameters() const151 Tensor<type, 1> PerceptronLayer:: get_parameters() const
152 {
153 //    Eigen::array<Index, 1> one_dim_weight{{synaptic_weights.dimension(0)*synaptic_weights.dimension(1)}};
154 
155 //    Eigen::array<Index, 1> one_dim_bias{{biases.dimension(0)*biases.dimension(1)}};
156 
157 //    Tensor<type, 1> synaptic_weights_vector = synaptic_weights.reshape(one_dim_weight);
158 
159 //    Tensor<type, 1> biases_vector = biases.reshape(one_dim_bias);
160 
161     Tensor<type, 1> parameters(synaptic_weights.size() + biases.size());
162 /*
163     for(Index i = 0; i < biases_vector.size(); i++)
164     {
165         fill_n(parameters.data()+i, 1, biases_vector(i));
166     }
167     for(Index i = 0; i < synaptic_weights_vector.size(); i++)
168     {
169         fill_n(parameters.data()+ biases_vector.size() +i, 1, synaptic_weights_vector(i));
170     }
171 */
172     for(Index i = 0; i < biases.size(); i++)
173     {
174         fill_n(parameters.data()+i, 1, biases(i));
175     }
176 
177     for(Index i = 0; i < synaptic_weights.size(); i++)
178     {
179         fill_n(parameters.data()+ biases.size() +i, 1, synaptic_weights(i));
180     }
181 
182     return parameters;
183 }
184 
185 
186 /// Returns the activation function of the layer.
187 /// The activation function of a layer is the activation function of all perceptrons in it.
188 
get_activation_function() const189 const PerceptronLayer::ActivationFunction& PerceptronLayer::get_activation_function() const
190 {
191     return activation_function;
192 }
193 
194 
195 /// Returns a string with the name of the layer activation function.
196 /// This can be: Logistic, HyperbolicTangent, Threshold, SymmetricThreshold, Linear, RectifiedLinear, ScaledExponentialLinear.
197 
write_activation_function() const198 string PerceptronLayer::write_activation_function() const
199 {
200     switch(activation_function)
201     {
202     case Logistic:
203         return "Logistic";
204 
205     case HyperbolicTangent:
206         return "HyperbolicTangent";
207 
208     case Threshold:
209         return "Threshold";
210 
211     case SymmetricThreshold:
212         return "SymmetricThreshold";
213 
214     case Linear:
215         return "Linear";
216 
217     case RectifiedLinear:
218         return "RectifiedLinear";
219 
220     case ScaledExponentialLinear:
221         return "ScaledExponentialLinear";
222 
223     case SoftPlus:
224         return "SoftPlus";
225 
226     case SoftSign:
227         return "SoftSign";
228 
229     case HardSigmoid:
230         return "HardSigmoid";
231 
232     case ExponentialLinear:
233         return "ExponentialLinear";
234     }
235 
236     return string();
237 }
238 
239 
240 /// Returns true if messages from this class are to be displayed on the screen,
241 /// or false if messages from this class are not to be displayed on the screen.
242 
get_display() const243 const bool& PerceptronLayer::get_display() const
244 {
245     return display;
246 }
247 
248 
249 /// Sets an empty layer, wihtout any perceptron.
250 /// It also sets the rest of members to their default values.
251 
set()252 void PerceptronLayer::set()
253 {
254     biases.resize(0, 0);
255 
256     synaptic_weights.resize(0, 0);
257 
258     set_default();
259 }
260 
261 
262 /// Sets new numbers of inputs and perceptrons in the layer.
263 /// It also sets the rest of members to their default values.
264 /// @param new_inputs_number Number of inputs.
265 /// @param new_neurons_number Number of perceptron neurons.
266 
set(const Index & new_inputs_number,const Index & new_neurons_number,const PerceptronLayer::ActivationFunction & new_activation_function)267 void PerceptronLayer::set(const Index& new_inputs_number, const Index& new_neurons_number,
268                           const PerceptronLayer::ActivationFunction& new_activation_function)
269 {
270     biases.resize(1, new_neurons_number);
271 
272     synaptic_weights.resize(new_inputs_number, new_neurons_number);
273 
274     set_parameters_random();
275 
276     activation_function = new_activation_function;
277 
278     set_default();
279 }
280 
281 
282 /// Sets those members not related to the vector of perceptrons to their default value.
283 /// <ul>
284 /// <li> Display: True.
285 /// <li> layer_type: Perceptron_Layer.
286 /// <li> trainable: True.
287 /// </ul>
288 
set_default()289 void PerceptronLayer::set_default()
290 {
291     layer_name = "perceptron_layer";
292 
293     display = true;
294 
295     layer_type = Perceptron;
296 }
297 
set_layer_name(const string & new_layer_name)298 void PerceptronLayer::set_layer_name(const string& new_layer_name)
299 {
300     layer_name = new_layer_name;
301 }
302 
303 
304 /// Sets a new number of inputs in the layer.
305 /// The new synaptic weights are initialized at random.
306 /// @param new_inputs_number Number of layer inputs.
307 
set_inputs_number(const Index & new_inputs_number)308 void PerceptronLayer::set_inputs_number(const Index& new_inputs_number)
309 {
310     const Index neurons_number = get_neurons_number();
311 
312     biases.resize(1,neurons_number);
313 
314     synaptic_weights.resize(new_inputs_number, neurons_number);
315 }
316 
317 
318 /// Sets a new number perceptrons in the layer.
319 /// All the parameters are also initialized at random.
320 /// @param new_neurons_number New number of neurons in the layer.
321 
set_neurons_number(const Index & new_neurons_number)322 void PerceptronLayer::set_neurons_number(const Index& new_neurons_number)
323 {
324     const Index inputs_number = get_inputs_number();
325 
326     biases.resize(1, new_neurons_number);
327 
328     synaptic_weights.resize(inputs_number, new_neurons_number);
329 }
330 
331 
332 /// Sets the biases of all perceptrons in the layer from a single vector.
333 /// @param new_biases New set of biases in the layer.
334 
set_biases(const Tensor<type,2> & new_biases)335 void PerceptronLayer::set_biases(const Tensor<type, 2>& new_biases)
336 {
337     biases = new_biases;
338 }
339 
340 
341 /// Sets the synaptic weights of this perceptron layer from a single matrix.
342 /// The format is a matrix of real numbers.
343 /// The number of rows is the number of neurons in the corresponding layer.
344 /// The number of columns is the number of inputs to the corresponding layer.
345 /// @param new_synaptic_weights New set of synaptic weights in that layer.
346 
set_synaptic_weights(const Tensor<type,2> & new_synaptic_weights)347 void PerceptronLayer::set_synaptic_weights(const Tensor<type, 2>& new_synaptic_weights)
348 {
349     synaptic_weights = new_synaptic_weights;
350 }
351 
352 
353 /// Sets the parameters of this layer.
354 /// @param new_parameters Parameters vector for that layer.
355 
set_parameters(const Tensor<type,1> & new_parameters,const Index & index)356 void PerceptronLayer::set_parameters(const Tensor<type, 1>& new_parameters, const Index& index)
357 {
358     /*
359 #ifdef __OPENNN_DEBUG__
360     const Index new_parameters_size = new_parameters.size();
361     const Index parameters_number = get_parameters_number();
362     if(new_parameters_size != parameters_number)
363     {
364         ostringstream buffer;
365         buffer << "OpenNN Exception: PerceptronLayer class.\n"
366                << "void set_parameters(const Tensor<type, 1>&) method.\n"
367                << "Size of new parameters (" << new_parameters_size << ") must be equal to number of parameters (" << parameters_number << ").\n";
368         throw logic_error(buffer.str());
369     }
370 #endif
371 */
372     const Index biases_number = get_biases_number();
373     const Index synaptic_weights_number = get_synaptic_weights_number();
374 
375     memcpy(biases.data(),
376            new_parameters.data() + index,
377            static_cast<size_t>(biases_number)*sizeof(type));
378 
379     memcpy(synaptic_weights.data(),
380            new_parameters.data() + biases_number + index,
381            static_cast<size_t>(synaptic_weights_number)*sizeof(type));
382 }
383 
384 
385 /// This class sets a new activation(or transfer) function in a single layer.
386 /// @param new_activation_function Activation function for the layer.
387 
set_activation_function(const PerceptronLayer::ActivationFunction & new_activation_function)388 void PerceptronLayer::set_activation_function(const PerceptronLayer::ActivationFunction& new_activation_function)
389 {
390     activation_function = new_activation_function;
391 }
392 
393 
394 /// Sets a new activation(or transfer) function in a single layer.
395 /// The second argument is a string containing the name of the function("Logistic", "HyperbolicTangent", "Threshold", etc).
396 /// @param new_activation_function Activation function for that layer.
397 
set_activation_function(const string & new_activation_function_name)398 void PerceptronLayer::set_activation_function(const string& new_activation_function_name)
399 {
400     if(new_activation_function_name == "Logistic")
401     {
402         activation_function = Logistic;
403     }
404     else if(new_activation_function_name == "HyperbolicTangent")
405     {
406         activation_function = HyperbolicTangent;
407     }
408     else if(new_activation_function_name == "Threshold")
409     {
410         activation_function = Threshold;
411     }
412     else if(new_activation_function_name == "SymmetricThreshold")
413     {
414         activation_function = SymmetricThreshold;
415     }
416     else if(new_activation_function_name == "Linear")
417     {
418         activation_function = Linear;
419     }
420     else if(new_activation_function_name == "RectifiedLinear")
421     {
422         activation_function = RectifiedLinear;
423     }
424     else if(new_activation_function_name == "ScaledExponentialLinear")
425     {
426         activation_function = ScaledExponentialLinear;
427     }
428     else if(new_activation_function_name == "SoftPlus")
429     {
430         activation_function = SoftPlus;
431     }
432     else if(new_activation_function_name == "SoftSign")
433     {
434         activation_function = SoftSign;
435     }
436     else if(new_activation_function_name == "HardSigmoid")
437     {
438         activation_function = HardSigmoid;
439     }
440     else if(new_activation_function_name == "ExponentialLinear")
441     {
442         activation_function = ExponentialLinear;
443     }
444     else
445     {
446         ostringstream buffer;
447 
448         buffer << "OpenNN Exception: PerceptronLayer class.\n"
449                << "void set_activation_function(const string&) method.\n"
450                << "Unknown activation function: " << new_activation_function_name << ".\n";
451 
452         throw logic_error(buffer.str());
453     }
454 }
455 
456 
457 /// Sets a new display value.
458 /// If it is set to true messages from this class are to be displayed on the screen;
459 /// if it is set to false messages from this class are not to be displayed on the screen.
460 /// @param new_display Display value.
461 
set_display(const bool & new_display)462 void PerceptronLayer::set_display(const bool& new_display)
463 {
464     display = new_display;
465 }
466 
467 
468 /// Initializes the biases of all the perceptrons in the layer of perceptrons with a given value.
469 /// @param value Biases initialization value.
470 
set_biases_constant(const type & value)471 void PerceptronLayer::set_biases_constant(const type& value)
472 {
473     biases.setConstant(value);
474 }
475 
476 
477 /// Initializes the synaptic weights of all the perceptrons in the layer of perceptrons with a given value.
478 /// @param value Synaptic weights initialization value.
479 
set_synaptic_weights_constant(const type & value)480 void PerceptronLayer::set_synaptic_weights_constant(const type& value)
481 {
482     synaptic_weights.setConstant(value);
483 }
484 
485 
486 /// Initializes the synaptic weights of all the perceptrons in the layer of perceptrons with glorot uniform distribution.
487 
set_synaptic_weights_glorot()488 void PerceptronLayer::set_synaptic_weights_glorot()
489 {
490     Index fan_in;
491     Index fan_out;
492 
493     type scale = 1.0;
494     type limit;
495 
496     fan_in = synaptic_weights.dimension(0);
497     fan_out = synaptic_weights.dimension(1);
498 
499     scale /= ((fan_in + fan_out) / static_cast<type>(2.0));
500     limit = sqrt(static_cast<type>(3.0) * scale);
501 
502 //    biases.setRandom<Eigen::internal::UniformRandomGenerator<type>>();
503     biases.setZero();
504 
505     synaptic_weights.setRandom<Eigen::internal::UniformRandomGenerator<type>>();
506 
507     Eigen::Tensor<type, 0> min_weight = synaptic_weights.minimum();
508     Eigen::Tensor<type, 0> max_weight = synaptic_weights.maximum();
509 
510     synaptic_weights = (synaptic_weights - synaptic_weights.constant(min_weight(0))) / (synaptic_weights.constant(max_weight(0))- synaptic_weights.constant(min_weight(0)));
511     synaptic_weights = (synaptic_weights * synaptic_weights.constant(2. * limit)) - synaptic_weights.constant(limit);
512 }
513 
514 
515 /// Initializes all the biases and synaptic weights in the neural newtork with a given value.
516 /// @param value Parameters initialization value.
517 
set_parameters_constant(const type & value)518 void PerceptronLayer::set_parameters_constant(const type& value)
519 {
520     biases.setConstant(value);
521 
522     synaptic_weights.setConstant(value);
523 }
524 
525 
526 /// Initializes all the biases and synaptic weights in the neural newtork at random with values comprised
527 /// between -1 and +1.
528 
set_parameters_random()529 void PerceptronLayer::set_parameters_random()
530 {
531     const type minimum = -1;
532     const type maximum = 1;
533 
534 //    biases.setRandom();
535 //    synaptic_weights.setRandom();
536 
537     for(Index i = 0; i < biases.size(); i++)
538     {
539         const type random = static_cast<type>(rand()/(RAND_MAX+1.0));
540 
541         biases(i) = minimum +(maximum-minimum)*random;
542     }
543 
544     for(Index i = 0; i < synaptic_weights.size(); i++)
545     {
546         const type random = static_cast<type>(rand()/(RAND_MAX+1.0));
547 
548         synaptic_weights(i) = minimum +(maximum-minimum)*random;
549     }
550 }
551 
552 
calculate_combinations(const Tensor<type,2> & inputs,const Tensor<type,2> & biases,const Tensor<type,2> & synaptic_weights,Tensor<type,2> & combinations_2d) const553 void PerceptronLayer::calculate_combinations(const Tensor<type, 2>& inputs,
554                             const Tensor<type, 2>& biases,
555                             const Tensor<type, 2>& synaptic_weights,
556                             Tensor<type, 2>& combinations_2d) const
557 {
558     const Index batch_samples_number = inputs.dimension(0);
559     const Index biases_number = get_biases_number();
560 
561     for(Index i = 0; i < biases_number; i++)
562     {
563         fill_n(combinations_2d.data() + i*batch_samples_number, batch_samples_number, biases(i));
564     }
565 
566     combinations_2d.device(*thread_pool_device) += inputs.contract(synaptic_weights, A_B);
567 }
568 
569 
calculate_activations(const Tensor<type,2> & combinations_2d,Tensor<type,2> & activations_2d) const570 void PerceptronLayer::calculate_activations(const Tensor<type, 2>& combinations_2d, Tensor<type, 2>& activations_2d) const
571 {
572      #ifdef __OPENNN_DEBUG__
573 
574      const Index neurons_number = get_neurons_number();
575 
576      const Index combinations_columns_number = combinations_2d.dimension(1);
577 
578      if(combinations_columns_number != neurons_number)
579      {
580         ostringstream buffer;
581 
582         buffer << "OpenNN Exception: PerceptronLayer class.\n"
583                << "void calculate_activations(const Tensor<type, 2>&, Tensor<type, 2>&) const method.\n"
584                << "Number of combinations_2d columns (" << combinations_columns_number
585                << ") must be equal to number of neurons (" << neurons_number << ").\n";
586 
587         throw logic_error(buffer.str());
588      }
589 
590      #endif
591 
592      switch(activation_function)
593      {
594          case Linear: linear(combinations_2d, activations_2d); return;
595 
596          case Logistic: logistic(combinations_2d, activations_2d); return;
597 
598          case HyperbolicTangent: hyperbolic_tangent(combinations_2d, activations_2d); return;
599 
600          case Threshold: threshold(combinations_2d, activations_2d); return;
601 
602          case SymmetricThreshold: symmetric_threshold(combinations_2d, activations_2d); return;
603 
604          case RectifiedLinear: rectified_linear(combinations_2d, activations_2d); return;
605 
606          case ScaledExponentialLinear: scaled_exponential_linear(combinations_2d, activations_2d); return;
607 
608          case SoftPlus: soft_plus(combinations_2d, activations_2d); return;
609 
610          case SoftSign: soft_sign(combinations_2d, activations_2d); return;
611 
612          case HardSigmoid: hard_sigmoid(combinations_2d, activations_2d); return;
613 
614          case ExponentialLinear: exponential_linear(combinations_2d, activations_2d); return;
615      }
616 }
617 
calculate_activations_derivatives(const Tensor<type,2> & combinations_2d,Tensor<type,2> & activations,Tensor<type,2> & activations_derivatives) const618 void PerceptronLayer::calculate_activations_derivatives(const Tensor<type, 2>& combinations_2d,
619                                                         Tensor<type, 2>& activations,
620                                                         Tensor<type, 2>& activations_derivatives) const
621 {
622      #ifdef __OPENNN_DEBUG__
623 
624      const Index neurons_number = get_neurons_number();
625 
626      const Index combinations_columns_number = combinations_2d.dimension(1);
627 
628      if(combinations_columns_number != neurons_number)
629      {
630         ostringstream buffer;
631 
632         buffer << "OpenNN Exception: PerceptronLayer class.\n"
633                << "void calculate_activations_derivatives(const Tensor<type, 2>&, Tensor<type, 2>&) const method.\n"
634                << "Number of combinations_2d columns (" << combinations_columns_number
635                << ") must be equal to number of neurons (" << neurons_number << ").\n";
636 
637         throw logic_error(buffer.str());
638      }
639 
640      #endif
641 
642      switch(activation_function)
643      {
644          case Linear: linear_derivatives(combinations_2d, activations, activations_derivatives); return;
645 
646          case Logistic: logistic_derivatives(combinations_2d, activations, activations_derivatives); return;
647 
648          case HyperbolicTangent: hyperbolic_tangent_derivatives(combinations_2d, activations, activations_derivatives); return;
649 
650          case Threshold: threshold_derivatives(combinations_2d, activations, activations_derivatives); return;
651 
652          case SymmetricThreshold: symmetric_threshold_derivatives(combinations_2d, activations, activations_derivatives); return;
653 
654          case RectifiedLinear: rectified_linear_derivatives(combinations_2d, activations, activations_derivatives); return;
655 
656          case ScaledExponentialLinear: scaled_exponential_linear_derivatives(combinations_2d, activations, activations_derivatives); return;
657 
658          case SoftPlus: soft_plus_derivatives(combinations_2d, activations, activations_derivatives); return;
659 
660          case SoftSign: soft_sign_derivatives(combinations_2d, activations, activations_derivatives); return;
661 
662          case HardSigmoid: hard_sigmoid_derivatives(combinations_2d, activations, activations_derivatives); return;
663 
664          case ExponentialLinear: exponential_linear_derivatives(combinations_2d, activations, activations_derivatives); return;
665      }
666 }
667 
668 
calculate_outputs(const Tensor<type,2> & inputs)669 Tensor<type, 2> PerceptronLayer::calculate_outputs(const Tensor<type, 2>& inputs)
670 {
671 
672 #ifdef __OPENNN_DEBUG__
673     const Index inputs_dimensions_number = inputs.rank();
674 
675     if(inputs_dimensions_number != 2)
676     {
677         ostringstream buffer;
678 
679         buffer << "OpenNN Exception: PerceptronLayer class.\n"
680                << "Tensor<type, 2> calculate_outputs(const Tensor<type, 2>&) const method.\n"
681                << "Number of dimensions (" << inputs_dimensions_number << ") must be equal to 2.\n";
682 
683         throw logic_error(buffer.str());
684     }
685 
686     const Index inputs_number = get_inputs_number();
687 
688     const Index inputs_columns_number = inputs.dimension(1);
689 
690     if(inputs_columns_number != inputs_number)
691     {
692         ostringstream buffer;
693 
694         buffer << "OpenNN Exception: PerceptronLayer class.\n"
695                << "Tensor<type, 2> calculate_outputs(const Tensor<type, 2>&) const method.\n"
696                << "Number of columns (" << inputs_columns_number << ") must be equal to number of inputs (" << inputs_number << ").\n";
697 
698         throw logic_error(buffer.str());
699     }
700 
701 #endif
702 
703     const Index batch_size = inputs.dimension(0);
704     const Index outputs_number = get_neurons_number();
705 
706     Tensor<type, 2> outputs(batch_size, outputs_number);
707 
708     calculate_combinations(inputs, biases, synaptic_weights, outputs);
709 
710     calculate_activations(outputs, outputs);
711 
712     return outputs;
713 }
714 
715 
forward_propagate(const Tensor<type,2> & inputs,ForwardPropagation & forward_propagation) const716 void PerceptronLayer::forward_propagate(const Tensor<type, 2>& inputs,
717                                    ForwardPropagation& forward_propagation) const
718  {
719 #ifdef __OPENNN_DEBUG__
720 
721     const Index inputs_number = get_inputs_number();
722 
723     if(inputs_number != inputs.dimension(1))
724     {
725         ostringstream buffer;
726 
727         buffer << "OpenNN Exception: PerceptronLayer class.\n"
728                << "void forward_propagate(const Tensor<type, 2>&, ForwardPropagation&) method.\n"
729                << "Number of inputs columns (" << inputs.dimension(1) << ") must be equal to number of inputs ("
730                << inputs_number << ").\n";
731 
732         throw logic_error(buffer.str());
733     }
734 
735 #endif
736 
737     calculate_combinations(inputs,
738                            biases,
739                            synaptic_weights,
740                            forward_propagation.combinations_2d);
741 
742     calculate_activations_derivatives(forward_propagation.combinations_2d,
743                                       forward_propagation.activations_2d,
744                                       forward_propagation.activations_derivatives_2d);
745 }
746 
747 
forward_propagate(const Tensor<type,2> & inputs,Tensor<type,1> potential_parameters,ForwardPropagation & forward_propagation) const748 void PerceptronLayer::forward_propagate(const Tensor<type, 2>& inputs,
749                                    Tensor<type, 1> potential_parameters,
750                                    ForwardPropagation& forward_propagation) const
751    {
752     const Index neurons_number = get_neurons_number();
753     const Index inputs_number = get_inputs_number();
754 
755 #ifdef __OPENNN_DEBUG__
756 
757     if(inputs_number != inputs.dimension(1))
758     {
759         ostringstream buffer;
760 
761         buffer << "OpenNN Exception: PerceptronLayer class.\n"
762                << "void forward_propagate(const Tensor<type, 2>&, Tensor<type, 1>&, ForwardPropagation&) method.\n"
763                << "Number of inputs columns (" << inputs.dimension(1) << ") must be equal to number of inputs ("
764                << inputs_number << ").\n";
765 
766         throw logic_error(buffer.str());
767     }
768 
769 #endif
770 
771     const TensorMap<Tensor<type, 2>> potential_biases(potential_parameters.data(), neurons_number, 1);
772 
773     const TensorMap<Tensor<type, 2>> potential_synaptic_weights(potential_parameters.data()+neurons_number,
774                                                                 inputs_number, neurons_number);
775 
776     calculate_combinations(inputs,
777                            potential_biases,
778                            potential_synaptic_weights,
779                            forward_propagation.combinations_2d);
780 
781     calculate_activations_derivatives(forward_propagation.combinations_2d,
782                                       forward_propagation.activations_2d,
783                                       forward_propagation.activations_derivatives_2d);
784 }
785 
786 
calculate_output_delta(ForwardPropagation & forward_propagation,const Tensor<type,2> & output_gradient,Tensor<type,2> & output_delta) const787 void PerceptronLayer::calculate_output_delta(ForwardPropagation& forward_propagation,
788                                const Tensor<type, 2>& output_gradient,
789                                Tensor<type, 2>& output_delta) const
790 {
791     output_delta.device(*thread_pool_device) = forward_propagation.activations_derivatives_2d*output_gradient;
792 }
793 
794 
calculate_hidden_delta(Layer * next_layer_pointer,const Tensor<type,2> &,ForwardPropagation & forward_propagation,const Tensor<type,2> & next_layer_delta,Tensor<type,2> & hidden_delta) const795 void PerceptronLayer::calculate_hidden_delta(Layer* next_layer_pointer,
796                             const Tensor<type, 2>&,
797                             ForwardPropagation& forward_propagation,
798                             const Tensor<type, 2>& next_layer_delta,
799                             Tensor<type, 2>& hidden_delta) const
800 {
801     const Type next_layer_type = next_layer_pointer->get_type();
802 
803     switch(next_layer_type)
804     {
805          case Perceptron:
806 
807          calculate_hidden_delta_perceptron(next_layer_pointer, forward_propagation.activations_derivatives_2d, next_layer_delta, hidden_delta);
808 
809          return;
810 
811          case Probabilistic:
812 
813          calculate_hidden_delta_probabilistic(next_layer_pointer, forward_propagation.activations_derivatives_2d, next_layer_delta, hidden_delta);
814 
815          return;
816 
817          default:
818 
819          return;
820     }
821 }
822 
calculate_hidden_delta_perceptron(Layer * next_layer_pointer,const Tensor<type,2> & activations_derivatives,const Tensor<type,2> & next_layer_delta,Tensor<type,2> & hidden_delta) const823 void PerceptronLayer::calculate_hidden_delta_perceptron(Layer* next_layer_pointer,
824                                        const Tensor<type, 2>& activations_derivatives,
825                                        const Tensor<type, 2>& next_layer_delta,
826                                        Tensor<type, 2>& hidden_delta) const
827 {
828     const PerceptronLayer* next_perceptron_layer = dynamic_cast<PerceptronLayer*>(next_layer_pointer);
829 
830     const Tensor<type, 2>& next_synaptic_weights = next_perceptron_layer->get_synaptic_weights();
831 
832     hidden_delta.device(*thread_pool_device) = next_layer_delta.contract(next_synaptic_weights, A_BT);
833 
834     hidden_delta.device(*thread_pool_device) = hidden_delta*activations_derivatives;
835 }
836 
837 
calculate_hidden_delta_probabilistic(Layer * next_layer_pointer,const Tensor<type,2> & activations_derivatives,const Tensor<type,2> & next_layer_delta,Tensor<type,2> & hidden_delta) const838 void PerceptronLayer::calculate_hidden_delta_probabilistic(Layer* next_layer_pointer,
839                                                            const Tensor<type, 2>& activations_derivatives,
840                                                            const Tensor<type, 2>& next_layer_delta,
841                                                            Tensor<type, 2>& hidden_delta) const
842 {
843     const ProbabilisticLayer* next_probabilistic_layer = dynamic_cast<ProbabilisticLayer*>(next_layer_pointer);
844 
845     const Tensor<type, 2>& next_synaptic_weights = next_probabilistic_layer->get_synaptic_weights();
846 
847     hidden_delta.device(*thread_pool_device) = next_layer_delta.contract(next_synaptic_weights, A_BT);
848 
849     hidden_delta.device(*thread_pool_device) = hidden_delta*activations_derivatives;
850 }
851 
852 
853 // Gradient methods
854 
calculate_error_gradient(const Tensor<type,2> & inputs,const Layer::ForwardPropagation &,Layer::BackPropagation & back_propagation) const855 void PerceptronLayer::calculate_error_gradient(const Tensor<type, 2>& inputs,
856                                                const Layer::ForwardPropagation&,
857                                                Layer::BackPropagation& back_propagation) const
858 {
859     back_propagation.biases_derivatives.device(*thread_pool_device)
860             = back_propagation.delta.sum(Eigen::array<Index, 1>({0}));
861 
862     back_propagation.synaptic_weights_derivatives.device(*thread_pool_device)
863             = inputs.contract(back_propagation.delta, AT_B);
864 
865 }
866 
867 
insert_gradient(const BackPropagation & back_propagation,const Index & index,Tensor<type,1> & gradient) const868 void PerceptronLayer::insert_gradient(const BackPropagation& back_propagation, const Index& index, Tensor<type, 1>& gradient) const
869 {
870     const Index biases_number = get_biases_number();
871     const Index synaptic_weights_number = get_synaptic_weights_number();
872 
873     memcpy(gradient.data() + index,
874            back_propagation.biases_derivatives.data(),
875            static_cast<size_t>(biases_number)*sizeof(type));
876 
877     memcpy(gradient.data() + index + biases_number,
878            back_propagation.synaptic_weights_derivatives.data(),
879            static_cast<size_t>(synaptic_weights_number)*sizeof(type));
880 }
881 
882 
883 /// Returns a string with the expression of the inputs-outputs relationship of the layer.
884 /// @param inputs_names vector of strings with the name of the layer inputs.
885 /// @param outputs_names vector of strings with the name of the layer outputs.
886 
write_expression(const Tensor<string,1> & inputs_names,const Tensor<string,1> & outputs_names) const887 string PerceptronLayer::write_expression(const Tensor<string, 1>& inputs_names, const Tensor<string, 1>& outputs_names) const
888 {
889 #ifdef __OPENNN_DEBUG__
890 
891     const Index neurons_number = get_neurons_number();
892 
893     const Index inputs_number = get_inputs_number();
894     const Index inputs_name_size = inputs_names.size();
895 
896     if(inputs_name_size != inputs_number)
897     {
898         ostringstream buffer;
899 
900         buffer << "OpenNN Exception: PerceptronLayer class.\n"
901                << "string write_expression(const Tensor<string, 1>&, const Tensor<string, 1>&) const method.\n"
902                << "Size of inputs name must be equal to number of layer inputs.\n";
903 
904         throw logic_error(buffer.str());
905     }
906 
907     const Index outputs_name_size = outputs_names.size();
908 
909     if(outputs_name_size != neurons_number)
910     {
911         ostringstream buffer;
912 
913         buffer << "OpenNN Exception: PerceptronLayer class.\n"
914                << "string write_expression(const Tensor<string, 1>&, const Tensor<string, 1>&) const method.\n"
915                << "Size of outputs name must be equal to number of perceptrons.\n";
916 
917         throw logic_error(buffer.str());
918     }
919 
920 #endif
921 
922 
923     switch(perceptron_layer_type)
924     {
925          case HiddenLayer:
926          {
927             return write_hidden_layer_expression(inputs_names, outputs_names);
928          }
929 
930          case OutputLayer:
931          {
932             return write_output_layer_expression(inputs_names, outputs_names);
933          }
934     }
935 
936 //    ostringstream buffer;
937 
938 //    for(Index j = 0; j < outputs_names.size(); j++)
939 //    {
940 
941 //      Tensor<type, 1> synaptic_weights_column =  synaptic_weights.chip(j,1);
942 
943 //               buffer << outputs_names[j] << " = " << write_activation_function_expression() << "[ " << biases(0,j) << " +";
944 
945 //               for(Index i = 0; i < inputs_names.size() - 1; i++)
946 //               {
947 
948 //                   buffer << " (" << inputs_names[i] << "*" << synaptic_weights_column(i) << ")+";
949 //               }
950 
951 //               buffer << " (" << inputs_names[inputs_names.size() - 1] << "*" << synaptic_weights_column[inputs_names.size() - 1] << ") ];\n";
952 //    }
953 
954     return string();
955 }
956 
957 
write_hidden_layer_expression(const Tensor<string,1> & inputs_names,const Tensor<string,1> & outputs_names) const958 string PerceptronLayer::write_hidden_layer_expression(const Tensor<string, 1> & inputs_names, const Tensor<string, 1> & outputs_names) const
959 {
960     ostringstream buffer;
961 
962     for(Index j = 0; j < outputs_names.size(); j++)
963     {
964 
965       Tensor<type, 1> synaptic_weights_column =  synaptic_weights.chip(j,1);
966 
967                buffer << outputs_names[j] << to_string(j) << " = " << write_activation_function_expression() << "[ " << biases(0,j) << " +";
968 
969                for(Index i = 0; i < inputs_names.size() - 1; i++)
970                {
971 
972                    buffer << " (" << inputs_names[i] << "*" << synaptic_weights_column(i) << ")+";
973                }
974 
975                buffer << " (" << inputs_names[inputs_names.size() - 1] << "*" << synaptic_weights_column[inputs_names.size() - 1] << ") ];\n";
976     }
977 
978     return buffer.str();
979 }
980 
981 
write_output_layer_expression(const Tensor<string,1> & inputs_names,const Tensor<string,1> & outputs_names) const982 string PerceptronLayer::write_output_layer_expression(const Tensor<string, 1> & inputs_names, const Tensor<string, 1> & outputs_names) const
983 {
984 
985     ostringstream buffer;
986 
987     for(Index j = 0; j < outputs_names.size(); j++)
988     {
989 
990         Tensor<type, 1> synaptic_weights_column =  synaptic_weights.chip(j,1);
991 
992         buffer << outputs_names[j] << " = " << write_activation_function_expression() << "[ " << biases(0,j) << " +";
993 
994         for(Index i = 0; i < inputs_names.size() - 1; i++)
995         {
996            buffer << " (" << inputs_names[i] << "*" << synaptic_weights_column(i) << ")+";
997         }
998 
999         buffer << " (" << inputs_names[inputs_names.size() - 1] << "*" << synaptic_weights_column[inputs_names.size() - 1] << ") ];\n";
1000     }
1001 
1002     return buffer.str();
1003 }
1004 
1005 
from_XML(const tinyxml2::XMLDocument & document)1006 void PerceptronLayer::from_XML(const tinyxml2::XMLDocument& document)
1007 {
1008     ostringstream buffer;
1009 
1010     // Perceptron layer
1011 
1012     const tinyxml2::XMLElement* perceptron_layer_element = document.FirstChildElement("PerceptronLayer");
1013 
1014     if(!perceptron_layer_element)
1015     {
1016         buffer << "OpenNN Exception: PerceptronLayer class.\n"
1017                << "void from_XML(const tinyxml2::XMLDocument&) method.\n"
1018                << "PerceptronLayer element is nullptr.\n";
1019 
1020         throw logic_error(buffer.str());
1021     }
1022 
1023 
1024     // Layer name
1025 
1026     const tinyxml2::XMLElement* layer_name_element = perceptron_layer_element->FirstChildElement("LayerName");
1027 
1028     if(!layer_name_element)
1029     {
1030         buffer << "OpenNN Exception: PerceptronLayer class.\n"
1031                << "void from_XML(const tinyxml2::XMLDocument&) method.\n"
1032                << "LayerName element is nullptr.\n";
1033 
1034         throw logic_error(buffer.str());
1035     }
1036 
1037     if(layer_name_element->GetText())
1038     {
1039         set_layer_name(layer_name_element->GetText());
1040     }
1041 
1042     // Inputs number
1043 
1044     const tinyxml2::XMLElement* inputs_number_element = perceptron_layer_element->FirstChildElement("InputsNumber");
1045 
1046     if(!inputs_number_element)
1047     {
1048         buffer << "OpenNN Exception: PerceptronLayer class.\n"
1049                << "void from_XML(const tinyxml2::XMLDocument&) method.\n"
1050                << "InputsNumber element is nullptr.\n";
1051 
1052         throw logic_error(buffer.str());
1053     }
1054 
1055     if(inputs_number_element->GetText())
1056     {
1057         set_inputs_number(static_cast<Index>(stoi(inputs_number_element->GetText())));
1058     }
1059 
1060     // Neurons number
1061 
1062     const tinyxml2::XMLElement* neurons_number_element = perceptron_layer_element->FirstChildElement("NeuronsNumber");
1063 
1064     if(!neurons_number_element)
1065     {
1066         buffer << "OpenNN Exception: PerceptronLayer class.\n"
1067                << "void from_XML(const tinyxml2::XMLDocument&) method.\n"
1068                << "NeuronsNumber element is nullptr.\n";
1069 
1070         throw logic_error(buffer.str());
1071     }
1072 
1073     if(neurons_number_element->GetText())
1074     {
1075         set_neurons_number(static_cast<Index>(stoi(neurons_number_element->GetText())));
1076     }
1077 
1078     // Activation function
1079 
1080     const tinyxml2::XMLElement* activation_function_element = perceptron_layer_element->FirstChildElement("ActivationFunction");
1081 
1082     if(!activation_function_element)
1083     {
1084         buffer << "OpenNN Exception: PerceptronLayer class.\n"
1085                << "void from_XML(const tinyxml2::XMLDocument&) method.\n"
1086                << "ActivationFunction element is nullptr.\n";
1087 
1088         throw logic_error(buffer.str());
1089     }
1090 
1091     if(activation_function_element->GetText())
1092     {
1093         set_activation_function(activation_function_element->GetText());
1094     }
1095 
1096     // Parameters
1097 
1098     const tinyxml2::XMLElement* parameters_element = perceptron_layer_element->FirstChildElement("Parameters");
1099 
1100     if(!parameters_element)
1101     {
1102         buffer << "OpenNN Exception: PerceptronLayer class.\n"
1103                << "void from_XML(const tinyxml2::XMLDocument&) method.\n"
1104                << "Parameters element is nullptr.\n";
1105 
1106         throw logic_error(buffer.str());
1107     }
1108 
1109     if(parameters_element->GetText())
1110     {
1111         const string parameters_string = parameters_element->GetText();
1112 
1113         set_parameters(to_type_vector(parameters_string, ' '));
1114     }
1115 }
1116 
1117 
write_XML(tinyxml2::XMLPrinter & file_stream) const1118 void PerceptronLayer::write_XML(tinyxml2::XMLPrinter& file_stream) const
1119 {
1120     ostringstream buffer;
1121 
1122     // Perceptron layer
1123 
1124     file_stream.OpenElement("PerceptronLayer");
1125 
1126     // Layer name
1127     file_stream.OpenElement("LayerName");
1128     buffer.str("");
1129     buffer << layer_name;
1130     file_stream.PushText(buffer.str().c_str());
1131     file_stream.CloseElement();
1132 
1133     // Inputs number
1134     file_stream.OpenElement("InputsNumber");
1135 
1136     buffer.str("");
1137     buffer << get_inputs_number();
1138 
1139     file_stream.PushText(buffer.str().c_str());
1140 
1141     file_stream.CloseElement();
1142 
1143     // Outputs number
1144 
1145     file_stream.OpenElement("NeuronsNumber");
1146 
1147     buffer.str("");
1148     buffer << get_neurons_number();
1149 
1150     file_stream.PushText(buffer.str().c_str());
1151 
1152     file_stream.CloseElement();
1153 
1154     // Activation function
1155 
1156     file_stream.OpenElement("ActivationFunction");
1157 
1158     file_stream.PushText(write_activation_function().c_str());
1159 
1160     file_stream.CloseElement();
1161 
1162     // Parameters
1163 
1164     file_stream.OpenElement("Parameters");
1165 
1166     buffer.str("");
1167 
1168     const Tensor<type, 1> parameters = get_parameters();
1169     const Index parameters_size = parameters.size();
1170 
1171     for(Index i = 0; i < parameters_size; i++)
1172     {
1173         buffer << parameters(i);
1174 
1175         if(i != (parameters_size-1)) buffer << " ";
1176     }
1177 
1178     file_stream.PushText(buffer.str().c_str());
1179 
1180     file_stream.CloseElement();
1181 
1182     // Peceptron layer (end tag)
1183 
1184     file_stream.CloseElement();
1185 }
1186 
1187 
write_activation_function_expression() const1188 string PerceptronLayer::write_activation_function_expression() const
1189 {
1190     switch(activation_function)
1191     {
1192     case Threshold:
1193         return "threshold";
1194 
1195     case SymmetricThreshold:
1196         return "symmetric_threshold";
1197 
1198     case Logistic:
1199         return "logistic";
1200 
1201     case HyperbolicTangent:
1202         return "tanh";
1203 
1204     case Linear:
1205         return "";
1206 
1207     case RectifiedLinear:
1208         return "ReLU";
1209 
1210     case ExponentialLinear:
1211         return "ELU";
1212 
1213     case ScaledExponentialLinear:
1214         return "SELU";
1215 
1216     case SoftPlus:
1217         return "soft_plus";
1218 
1219     case SoftSign:
1220         return "soft_sign";
1221 
1222     case HardSigmoid:
1223         return "hard_sigmoid";
1224 
1225     default:
1226         return write_activation_function();
1227     }
1228 }
1229 
write_combinations_c() const1230 string PerceptronLayer::write_combinations_c() const
1231 {
1232     ostringstream buffer;
1233 
1234     const Index inputs_number = get_inputs_number();
1235     const Index neurons_number = get_neurons_number();
1236 
1237     buffer << "\tvector<float> combinations(" << neurons_number << ");\n" << endl;
1238 
1239     for(Index i = 0; i < neurons_number; i++)
1240     {
1241         buffer << "\tcombinations[" << i << "] = " << biases(i);
1242 
1243         for(Index j = 0; j < inputs_number; j++)
1244         {
1245              buffer << " +" << synaptic_weights(j, i) << "*inputs[" << j << "]";
1246         }
1247 
1248         buffer << ";" << endl;
1249     }
1250 
1251     return buffer.str();
1252 }
1253 
1254 
write_activations_c() const1255 string PerceptronLayer::write_activations_c() const
1256 {
1257     ostringstream buffer;
1258 
1259     const Index neurons_number = get_neurons_number();
1260 
1261     buffer << "\n\tvector<float> activations(" << neurons_number << ");\n" << endl;
1262 
1263     for(Index i = 0; i < neurons_number; i++)
1264     {
1265         buffer << "\tactivations[" << i << "] = ";
1266 
1267         switch(activation_function)
1268         {
1269 
1270         case HyperbolicTangent:
1271             buffer << "tanh(combinations[" << i << "]);\n";
1272             break;
1273 
1274         case RectifiedLinear:
1275             buffer << "combinations[" << i << "] < 0.0 ? 0.0 : combinations[" << i << "];\n";
1276             break;
1277 
1278         case Logistic:
1279             buffer << "1.0/(1.0 + exp(-combinations[" << i << "]));\n";
1280             break;
1281 
1282         case Threshold:
1283             buffer << "combinations[" << i << "] >= 0.0 ? 1.0 : 0.0;\n";
1284             break;
1285 
1286         case SymmetricThreshold:
1287             buffer << "combinations[" << i << "] >= 0.0 ? 1.0 : -1.0;\n";
1288             break;
1289 
1290         case Linear:
1291             buffer << "combinations[" << i << "];\n";
1292             break;
1293 
1294         case ScaledExponentialLinear:
1295             buffer << "combinations[" << i << "] < 0.0 ? 1.0507*1.67326*(exp(combinations[" << i << "]) - 1.0) : 1.0507*combinations[" << i << "];\n";
1296             break;
1297 
1298         case SoftPlus:
1299             buffer << "log(1.0 + exp(combinations[" << i << "]));\n";
1300             break;
1301 
1302         case SoftSign:
1303             buffer << "combinations[" << i << "] < 0.0 ? combinations[" << i << "]/(1.0 - combinations[" << i << "] ) : combinations[" << i << "]/(1.0 + combinations[" << i << "] );\n";
1304             break;
1305 
1306         case ExponentialLinear:
1307             buffer << "combinations[" << i << "] < 0.0 ? 1.0*(exp(combinations[" << i << "]) - 1.0) : combinations[" << i << "];\n";
1308             break;
1309 
1310         case HardSigmoid:
1311             ///@todo
1312             break;
1313 
1314         }
1315     }
1316 
1317     return buffer.str();
1318 }
1319 
1320 
write_combinations_python() const1321 string PerceptronLayer::write_combinations_python() const
1322 {
1323     ostringstream buffer;
1324 
1325     const Index inputs_number = get_inputs_number();
1326     const Index neurons_number = get_neurons_number();
1327 
1328     buffer << "\tcombinations = [None] * "<<neurons_number<<"\n" << endl;
1329 
1330     for(Index i = 0; i < neurons_number; i++)
1331     {
1332         buffer << "\tcombinations[" << i << "] = " << biases(i);
1333 
1334         for(Index j = 0; j < inputs_number; j++)
1335         {
1336              buffer << " +" << synaptic_weights(j, i) << "*inputs[" << j << "]";
1337         }
1338 
1339         buffer << " " << endl;
1340     }
1341 
1342     buffer << "\t" << endl;
1343 
1344     return buffer.str();
1345 }
1346 
1347 
write_activations_python() const1348 string PerceptronLayer::write_activations_python() const
1349 {
1350     ostringstream buffer;
1351 
1352     const Index neurons_number = get_neurons_number();
1353 
1354     buffer << "\tactivations = [None] * "<<neurons_number<<"\n" << endl;
1355 
1356     for(Index i = 0; i < neurons_number; i++)
1357     {
1358         buffer << "\tactivations[" << i << "] = ";
1359 
1360         switch(activation_function)
1361         {
1362 
1363         case HyperbolicTangent:
1364             buffer << "np.tanh(combinations[" << i << "])\n";
1365             break;
1366 
1367         case RectifiedLinear:
1368             buffer << "np.maximum(0.0, combinations[" << i << "])\n";
1369             break;
1370 
1371         case Logistic:
1372             buffer << "1.0/(1.0 + np.exp(-combinations[" << i << "]))\n";
1373             break;
1374 
1375         case Threshold:
1376             buffer << "1.0 if combinations[" << i << "] >= 0.0 else 0.0\n";
1377             break;
1378 
1379         case SymmetricThreshold:
1380             buffer << "1.0 if combinations[" << i << "] >= 0.0 else -1.0\n";
1381             break;
1382 
1383         case Linear:
1384             buffer << "combinations[" << i << "]\n";
1385             break;
1386 
1387         case ScaledExponentialLinear:
1388             buffer << "1.0507*1.67326*(np.exp(combinations[" << i << "]) - 1.0) if combinations[" << i << "] < 0.0 else 1.0507*combinations[" << i << "]\n";
1389             break;
1390 
1391         case SoftPlus:
1392             buffer << "np.log(1.0 + np.exp(combinations[" << i << "]))\n";
1393             break;
1394 
1395         case SoftSign:
1396             buffer << "combinations[" << i << "]/(1.0 - combinations[" << i << "] ) if combinations[" << i << "] < 0.0 else combinations[" << i << "]/(1.0 + combinations[" << i << "] )\n";
1397             break;
1398 
1399         case ExponentialLinear:
1400             buffer << "1.0*(np.exp(combinations[" << i << "]) - 1.0) if combinations[" << i << "] < 0.0 else combinations[" << i << "]\n";
1401             break;
1402 
1403         case HardSigmoid:
1404             ///@todo
1405             break;
1406 
1407         }
1408     }
1409 
1410     return buffer.str();
1411 }
1412 
1413 
write_expression_c() const1414 string PerceptronLayer::write_expression_c() const
1415 {
1416     ostringstream buffer;
1417 
1418     buffer << "vector<float> " << layer_name << "(const vector<float>& inputs)\n{" << endl;
1419 
1420     buffer << write_combinations_c();
1421 
1422     buffer << write_activations_c();
1423 
1424     buffer << "\n\treturn activations;\n}" << endl;
1425 
1426     return buffer.str();
1427 }
1428 
1429 
write_expression_python() const1430 string PerceptronLayer::write_expression_python() const
1431 {
1432     ostringstream buffer;
1433 
1434     buffer << "def " << layer_name << "(inputs):\n" << endl;
1435 
1436     buffer << write_combinations_python();
1437 
1438     buffer << write_activations_python();
1439 
1440     buffer << "\n\treturn activations;\n" << endl;
1441 
1442     return buffer.str();
1443 }
1444 
1445 
1446 }
1447 
1448 // OpenNN: Open Neural Networks Library.
1449 // Copyright(C) 2005-2020 Artificial Intelligence Techniques, SL.
1450 //
1451 // This library is free software; you can redistribute it and/or
1452 // modify it under the terms of the GNU Lesser General Public
1453 // License as published by the Free Software Foundation; either
1454 // version 2.1 of the License, or any later version.
1455 //
1456 // This library is distributed in the hope that it will be useful,
1457 // but WITHOUT ANY WARRANTY; without even the implied warranty of
1458 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1459 // Lesser General Public License for more details.
1460 
1461 // You should have received a copy of the GNU Lesser General Public
1462 // License along with this library; if not, write to the Free Software
1463 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
1464