1 //   OpenNN: Open Neural Networks Library
2 //   www.opennn.net
3 //
4 //   M E A N   S Q U A R E D   E R R O R   C L A S S
5 //
6 //   Artificial Intelligence Techniques SL
7 //   artelnics@artelnics.com
8 
9 #include "mean_squared_error.h"
10 
11 namespace OpenNN
12 {
13 
14 /// Default constructor.
15 /// It creates a mean squared error term not associated to any
16 /// neural network and not measured on any data set.
17 /// It also initializes all the rest of class members to their default values.
18 
MeanSquaredError()19 MeanSquaredError::MeanSquaredError() : LossIndex()
20 {
21 }
22 
23 
24 /// Neural network and data set constructor.
25 /// It creates a mean squared error term object associated to a
26 /// neural network and measured on a data set.
27 /// It also initializes all the rest of class members to their default values.
28 /// @param new_neural_network_pointer Pointer to a neural network object.
29 /// @param new_data_set_pointer Pointer to a data set object.
30 
MeanSquaredError(NeuralNetwork * new_neural_network_pointer,DataSet * new_data_set_pointer)31 MeanSquaredError::MeanSquaredError(NeuralNetwork* new_neural_network_pointer, DataSet* new_data_set_pointer)
32     : LossIndex(new_neural_network_pointer, new_data_set_pointer)
33 {
34 }
35 
36 
37 /// Destructor.
38 
~MeanSquaredError()39 MeanSquaredError::~MeanSquaredError()
40 {
41 }
42 
43 
44 ////// \brief MeanSquaredError::calculate_error
45 ////// \param batch
46 ////// \param forward_propagation
47 ////// \param back_propagation
calculate_error(const DataSet::Batch & batch,const NeuralNetwork::ForwardPropagation & forward_propagation,LossIndex::BackPropagation & back_propagation) const48 void MeanSquaredError::calculate_error(const DataSet::Batch& batch,
49                      const NeuralNetwork::ForwardPropagation& forward_propagation,
50                      LossIndex::BackPropagation& back_propagation) const
51 {
52     Tensor<type, 0> sum_squared_error;
53 
54     const Index batch_samples_number = batch.inputs_2d.dimension(0);
55 
56     const Index trainable_layers_number = neural_network_pointer->get_trainable_layers_number();
57 
58     const Tensor<type, 2>& outputs = forward_propagation.layers(trainable_layers_number-1).activations_2d;
59     const Tensor<type, 2>& targets = batch.targets_2d;
60 
61     back_propagation.errors.device(*thread_pool_device) = outputs - targets;
62 
63     sum_squared_error.device(*thread_pool_device) = back_propagation.errors.contract(back_propagation.errors, SSE);
64 
65     back_propagation.error = sum_squared_error(0)/static_cast<type>(batch_samples_number);
66 }
67 
68 
calculate_error_terms(const DataSet::Batch & batch,const NeuralNetwork::ForwardPropagation & forward_propagation,SecondOrderLoss & second_order_loss) const69 void MeanSquaredError::calculate_error_terms(const DataSet::Batch& batch,
70                                                    const NeuralNetwork::ForwardPropagation& forward_propagation,
71                                                    SecondOrderLoss& second_order_loss) const
72 {
73 
74     const Index trainable_layers_number = neural_network_pointer->get_trainable_layers_number();
75 
76     const Index batch_samples_number = batch.get_samples_number();
77 
78     const Tensor<type, 2>& outputs = forward_propagation.layers(trainable_layers_number-1).activations_2d;
79     const Tensor<type, 2>& targets = batch.targets_2d;
80 
81     second_order_loss.error_terms.resize(outputs.dimension(0));
82     const Eigen::array<int, 1> rows_sum = {Eigen::array<int, 1>({1})};
83 
84     second_order_loss.error_terms.device(*thread_pool_device) = ((outputs - targets).square().sum(rows_sum)).sqrt();
85 
86     Tensor<type, 0> error;
87     error.device(*thread_pool_device) = second_order_loss.error_terms.contract(second_order_loss.error_terms, AT_B);
88 
89     second_order_loss.error = error()/static_cast<type>(batch_samples_number);
90 }
91 
92 
calculate_output_gradient(const DataSet::Batch & batch,const NeuralNetwork::ForwardPropagation & forward_propagation,BackPropagation & back_propagation) const93 void MeanSquaredError::calculate_output_gradient(const DataSet::Batch& batch,
94                                const NeuralNetwork::ForwardPropagation& forward_propagation,
95                                BackPropagation& back_propagation) const
96 {
97      #ifdef __OPENNN_DEBUG__
98 
99      check();
100 
101      #endif
102 
103      const Index batch_samples_number = batch.inputs_2d.dimension(0);
104 
105      const type coefficient = static_cast<type>(2.0)/static_cast<type>(batch_samples_number);
106 
107      const Index trainable_layers_number = neural_network_pointer->get_trainable_layers_number();
108 
109      const Tensor<type, 2>& outputs = forward_propagation.layers(trainable_layers_number-1).activations_2d;
110      const Tensor<type, 2>& targets = batch.targets_2d;
111 
112      back_propagation.errors.device(*thread_pool_device) = outputs - targets;
113 
114      back_propagation.output_gradient.device(*thread_pool_device) = coefficient*back_propagation.errors;
115 }
116 
117 
calculate_Jacobian_gradient(const DataSet::Batch & batch,LossIndex::SecondOrderLoss & second_order_loss) const118 void MeanSquaredError::calculate_Jacobian_gradient(const DataSet::Batch& batch,
119                                                    LossIndex::SecondOrderLoss& second_order_loss) const
120 {
121 #ifdef __OPENNN_DEBUG__
122 
123     check();
124 
125 #endif
126 
127     const Index batch_samples_number = batch.get_samples_number();
128 
129     const type coefficient = static_cast<type>(2)/static_cast<type>(batch_samples_number);
130 
131     second_order_loss.gradient.device(*thread_pool_device) = second_order_loss.error_terms_Jacobian.contract(second_order_loss.error_terms, AT_B);
132 
133     second_order_loss.gradient.device(*thread_pool_device) = coefficient*second_order_loss.gradient;
134 }
135 
136 
137 
calculate_hessian_approximation(const DataSet::Batch & batch,LossIndex::SecondOrderLoss & second_order_loss) const138 void MeanSquaredError::calculate_hessian_approximation(const DataSet::Batch& batch, LossIndex::SecondOrderLoss& second_order_loss) const
139 {
140      #ifdef __OPENNN_DEBUG__
141 
142      check();
143 
144      #endif
145 
146      const Index batch_samples_number = batch.inputs_2d.dimension(0);
147 
148      const type coefficient = (static_cast<type>(2.0)/static_cast<type>(batch_samples_number));
149 
150      second_order_loss.hessian.device(*thread_pool_device) = second_order_loss.error_terms_Jacobian.contract(second_order_loss.error_terms_Jacobian, AT_B);
151 
152      second_order_loss.hessian.device(*thread_pool_device) = coefficient*second_order_loss.hessian;
153 }
154 
155 
156 /// Returns a string with the name of the mean squared error loss type, "MEAN_SQUARED_ERROR".
157 
get_error_type() const158 string MeanSquaredError::get_error_type() const
159 {
160     return "MEAN_SQUARED_ERROR";
161 }
162 
163 
164 /// Returns a string with the name of the mean squared error loss type in text format.
165 
get_error_type_text() const166 string MeanSquaredError::get_error_type_text() const
167 {
168     return "Mean squared error";
169 }
170 
171 
172 /// Serializes the cross entropy error object into a XML document of the TinyXML library without keep the DOM tree in memory.
173 /// See the OpenNN manual for more information about the format of this document
174 
write_XML(tinyxml2::XMLPrinter & file_stream) const175 void MeanSquaredError::write_XML(tinyxml2::XMLPrinter& file_stream) const
176 {
177     // Error type
178 
179     file_stream.OpenElement("MeanSquaredError");
180 
181     file_stream.CloseElement();
182 }
183 
184 }
185 
186 
187 // OpenNN: Open Neural Networks Library.
188 // Copyright(C) 2005-2020 Artificial Intelligence Techniques, SL.
189 //
190 // This library is free software; you can redistribute it and/or
191 // modify it under the terms of the GNU Lesser General Public
192 // License as published by the Free Software Foundation; either
193 // version 2.1 of the License, or any later version.
194 //
195 // This library is distributed in the hope that it will be useful,
196 // but WITHOUT ANY WARRANTY; without even the implied warranty of
197 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
198 // Lesser General Public License for more details.
199 
200 // You should have received a copy of the GNU Lesser General Public
201 // License along with this library; if not, write to the Free Software
202 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
203