1 //   OpenNN: Open Neural Networks Library
2 //   www.opennn.net
3 //
4 //   M E A N   S Q U A R E D   E R R O R   T E S T   C L A S S
5 //
6 //   Artificial Intelligence Techniques SL
7 //   artelnics@artelnics.com
8 
9 #include "mean_squared_error_test.h"
10 
11 
MeanSquaredErrorTest()12 MeanSquaredErrorTest::MeanSquaredErrorTest() : UnitTesting()
13 {
14 }
15 
16 
~MeanSquaredErrorTest()17 MeanSquaredErrorTest::~MeanSquaredErrorTest()
18 {
19 }
20 
21 
test_constructor()22 void MeanSquaredErrorTest::test_constructor()
23 {
24    cout << "test_constructor\n";
25 
26    // Default
27 
28    MeanSquaredError mse1;
29 
30    assert_true(mse1.has_neural_network() == false, LOG);
31    assert_true(mse1.has_data_set() == false, LOG);
32 
33    // Neural network and data set
34 
35    NeuralNetwork nn3;
36    DataSet ds3;
37    MeanSquaredError mse3(&nn3, &ds3);
38 
39    assert_true(mse3.has_neural_network() == true, LOG);
40    assert_true(mse3.has_data_set() == true, LOG);
41 }
42 
43 
test_destructor()44 void MeanSquaredErrorTest::test_destructor()
45 {
46     cout << "test_destructor\n";
47 }
48 
49 
test_calculate_error()50 void MeanSquaredErrorTest::test_calculate_error()
51 {
52    cout << "test_calculate_error\n";
53 
54    //Case1
55 
56    Tensor<type, 1> parameters;
57 
58    Tensor<Index, 1> architecture(3);
59 
60    architecture.setValues({1,1,1});
61 
62    NeuralNetwork neural_network(NeuralNetwork::Approximation, architecture);
63    neural_network.set_parameters_constant(0.0);
64 
65    DataSet data_set(1, 1, 1);
66    data_set.initialize_data(0.0);
67 
68    MeanSquaredError mean_squared_error(&neural_network, &data_set);
69    DataSet::Batch batch(1, &data_set);
70 
71 
72    Index batch_samples_number = batch.get_samples_number();
73 
74    neural_network.set(NeuralNetwork::Approximation, architecture);
75    neural_network.set_parameters_constant(0.0);
76 
77    data_set.set(1, 1, 1);
78    data_set.initialize_data(0.0);
79    data_set.set_training();
80 
81    NeuralNetwork::ForwardPropagation forward_propagation(batch_samples_number, &neural_network);
82 
83    LossIndex::BackPropagation back_propagation(batch_samples_number, &mean_squared_error);
84 
85    neural_network.forward_propagate(batch, forward_propagation);
86 
87    mean_squared_error.calculate_error(batch, forward_propagation, back_propagation);
88 
89    assert_true(back_propagation.error == 0.0, LOG);
90 
91    //Case2
92 
93    Tensor<type, 1> parameters_2;
94 
95    Tensor<Index, 1> architecture2(2);
96    architecture2.setValues({1,1});
97 
98    neural_network.set(NeuralNetwork::Approximation, architecture2);
99    neural_network.set_parameters_random();
100 
101    parameters_2 = neural_network.get_parameters();
102 
103    data_set.set(1, 2, 1);
104 
105    Tensor<type, 2> data(1, 3);
106    data.setValues({{1, 2, 3}});
107    data_set.set_data(data);
108 
109    neural_network.set_parameters_constant(1);
110 
111    NeuralNetwork::ForwardPropagation forward_propagation_2(batch_samples_number, &neural_network);
112 
113    LossIndex::BackPropagation back_propagation_2(batch_samples_number, &mean_squared_error);
114 
115    neural_network.forward_propagate(batch, forward_propagation_2);
116 
117    mean_squared_error.calculate_error(batch, forward_propagation_2, back_propagation_2);
118 
119    assert_true(abs(back_propagation_2.error - 1) < 1.0e-3, LOG);
120 
121    assert_true(back_propagation_2.error == 1.0, LOG);
122 
123 }
124 
125 
test_calculate_error_gradient()126 void MeanSquaredErrorTest::test_calculate_error_gradient()
127 {
128    cout << "test_calculate_error_gradient\n";
129 
130    NeuralNetwork neural_network;
131 
132    DataSet data_set;
133 
134    data_set.generate_Rosenbrock_data(100,2);
135    data_set.set_training();
136 
137    MeanSquaredError mean_squared_error(&neural_network, &data_set);
138 
139    Tensor<type, 1> error_gradient;
140    Tensor<type, 1> numerical_error_gradient;
141 
142    Index samples_number;
143    Index inputs_number;
144    Index hidden_neurons;
145    Index outputs_number;
146 
147 
148    PerceptronLayer* hidden_perceptron_layer = new PerceptronLayer();
149    PerceptronLayer* output_perceptron_layer = new PerceptronLayer();
150    ProbabilisticLayer* probabilistic_layer = new ProbabilisticLayer();
151    LongShortTermMemoryLayer* long_short_term_memory_layer = new LongShortTermMemoryLayer();
152 
153 
154    // Test trivial
155 
156 
157       samples_number = 10;
158       inputs_number = 1;
159       outputs_number = 1;
160 
161       data_set.set(samples_number, inputs_number, outputs_number);
162       data_set.initialize_data(0.0);
163       data_set.set_training();
164 
165       DataSet::Batch batch(samples_number, &data_set);
166 
167       Tensor<Index, 1> samples_indices = data_set.get_training_samples_indices();
168       const Tensor<Index, 1> input_indices = data_set.get_input_variables_indices();
169       const Tensor<Index, 1> target_indices = data_set.get_target_variables_indices();
170 
171       batch.fill(samples_indices, input_indices, target_indices);
172 
173       hidden_perceptron_layer->set(inputs_number, outputs_number);
174       neural_network.add_layer(hidden_perceptron_layer);
175 
176       neural_network.set_parameters_constant(0.0);
177 
178       MeanSquaredError mse(&neural_network, &data_set);
179 
180       mse.set_regularization_method(LossIndex::RegularizationMethod::NoRegularization);
181 
182       NeuralNetwork::ForwardPropagation forward_propagation(samples_number, &neural_network);
183       LossIndex::BackPropagation training_back_propagation(samples_number, &mse);
184 
185       neural_network.forward_propagate(batch, forward_propagation);
186 
187       mse.back_propagate(batch, forward_propagation, training_back_propagation);
188       error_gradient = training_back_propagation.gradient;
189 
190       numerical_error_gradient = mse.calculate_error_gradient_numerical_differentiation(&mse);
191 
192       const Tensor<type, 1> difference = error_gradient-numerical_error_gradient;
193 
194       assert_true((error_gradient.dimension(0) == neural_network.get_parameters_number()) , LOG);
195       assert_true(std::all_of(error_gradient.data(), error_gradient.data()+error_gradient.size(), [](type i) { return (i-static_cast<type>(0))<std::numeric_limits<type>::min(); }), LOG);
196 
197    // Test perceptron and probabilistic
198 {
199         samples_number = 10;
200         inputs_number = 3;
201         outputs_number = 3;
202         hidden_neurons = 2;
203 
204         DataSet data_set_2;
205 
206         data_set_2.set(samples_number, inputs_number, outputs_number);
207         data_set_2.set_training();
208         data_set_2.set_data_binary_random();
209 
210         DataSet::Batch batch_1(samples_number, &data_set_2);
211 
212         Tensor<Index, 1> samples_indices_1 = data_set_2.get_training_samples_indices();
213         const Tensor<Index, 1> input_indices_1 = data_set_2.get_input_variables_indices();
214         const Tensor<Index, 1> target_indices_1 = data_set_2.get_target_variables_indices();
215 
216         batch_1.fill(samples_indices_1, input_indices_1, target_indices_1);
217 
218         Tensor<Index, 1> architecture(3);
219         architecture[0] = inputs_number;
220         architecture[1] = hidden_neurons;
221         architecture[2] = outputs_number;
222 
223         NeuralNetwork neural_network_1(NeuralNetwork::Classification, architecture);
224 
225         MeanSquaredError mse_1(&neural_network_1, &data_set_2);
226 
227         mse_1.set_regularization_method(LossIndex::RegularizationMethod::NoRegularization);
228 
229         NeuralNetwork::ForwardPropagation forward_propagation(samples_number, &neural_network_1);
230         LossIndex::BackPropagation training_back_propagation(samples_number, &mse_1);
231 
232         neural_network_1.forward_propagate(batch_1, forward_propagation);
233 
234         mse_1.back_propagate(batch_1, forward_propagation, training_back_propagation);
235         error_gradient = training_back_propagation.gradient;
236 
237         numerical_error_gradient = mse_1.calculate_error_gradient_numerical_differentiation(&mse_1);
238 
239         const Tensor<type, 1> difference1 = error_gradient-numerical_error_gradient;
240 
241         assert_true(std::all_of(difference.data(), difference.data()+difference.size(), [](type i) { return (i)<static_cast<type>(1.0e-3); }), LOG);
242   }
243 
244   /* // Test lstm
245 
246 {
247        samples_number = 5;
248        inputs_number = 4;
249        outputs_number = 2;
250        hidden_neurons = 3;
251 
252        DataSet data_set_3;
253 
254        data_set_3.set(samples_number, inputs_number, outputs_number);
255 
256        data_set_3.set_data_random();
257        data_set_3.set_training();
258 
259        DataSet::Batch batch(samples_number, &data_set_3);
260 
261        Tensor<Index, 1> samples_indices = data_set_3.get_training_samples_indices();
262        const Tensor<Index, 1> input_indices = data_set_3.get_input_variables_indices();
263        const Tensor<Index, 1> target_indices = data_set_3.get_target_variables_indices();
264 
265        batch.fill(samples_indices, input_indices, target_indices);
266 
267        long_short_term_memory_layer->set(inputs_number, hidden_neurons);
268        output_perceptron_layer->set(hidden_neurons, outputs_number);
269 
270        cout << "hola" <<endl;
271 
272        neural_network.add_layer(long_short_term_memory_layer);
273        neural_network.add_layer(output_perceptron_layer);
274 
275        neural_network.set_parameters_random();
276 
277       // error_gradient = mean_squared_error.calculate_error_gradient();
278 
279       //numerical_error_gradient = mean_squared_error.calculate_error_gradient_numerical_differentiation();
280 
281       // assert_true(absolute_value(error_gradient - numerical_error_gradient) < 1.0e-3, LOG);
282 }
283 */
284 //   neural_network.set();
285 
286    // Test recurrent
287 {
288 //   samples_number = 92;
289 //   inputs_number = 3;
290 //   outputs_number = 1;
291 //   hidden_neurons = 4;
292 
293 //   data_set.set(samples_number, inputs_number, outputs_number);
294 
295 //   data_set.set_data_random();
296 
297 //   data_set.set_training();
298 
299 //   recurrent_layer->set(inputs_number, hidden_neurons);
300 //   recurrent_layer->set_timesteps(1);
301 
302 //   output_perceptron_layer->set(hidden_neurons, outputs_number);
303 
304 //   neural_network.add_layer(recurrent_layer);
305 //   neural_network.add_layer(output_perceptron_layer);
306 
307 //   neural_network.set_parameters_random();
308 
309 //   error_gradient = mean_squared_error.calculate_error_gradient();
310 
311 //   numerical_error_gradient = mean_squared_error.calculate_error_gradient_numerical_differentiation();
312 
313 //   assert_true(absolute_value(error_gradient - numerical_error_gradient) < 1.0e-3, LOG);
314 }
315 
316    // Test convolutional
317 {
318 //   samples_number = 5;
319 //   inputs_number = 147;
320 //   outputs_number = 1;
321 
322 //   data_set.set(samples_number, inputs_number, outputs_number);
323 //   data_set.set_input_variables_dimensions(Tensor<Index, 1>({3,7,7}));
324 //   data_set.set_target_variables_dimensions(Tensor<Index, 1>({1}));
325 //   data_set.set_data_random();
326 //   data_set.set_training();
327 
328 //   const type parameters_minimum = -100.0;
329 //   const type parameters_maximum = 100.0;
330 
331 //   ConvolutionalLayer* convolutional_layer_1 = new ConvolutionalLayer({3,7,7}, {2,2,2});
332 //   Tensor<type, 2> filters_1({2,3,2,2}, 0);
333 //   filters_1.setRandom(parameters_minimum,parameters_maximum);
334 //   convolutional_layer_1->set_synaptic_weights(filters_1);
335 //   Tensor<type, 1> biases_1(2, 0);
336 //   biases_1.setRandom(parameters_minimum, parameters_maximum);
337 //   convolutional_layer_1->set_biases(biases_1);
338 
339 //   ConvolutionalLayer* convolutional_layer_2 = new ConvolutionalLayer(convolutional_layer_1->get_outputs_dimensions(), {2,2,2});
340 //   convolutional_layer_2->set_padding_option(OpenNN::ConvolutionalLayer::Same);
341 //   Tensor<type, 2> filters_2({2,2,2,2}, 0);
342 //   filters_2.setRandom(parameters_minimum, parameters_maximum);
343 //   convolutional_layer_2->set_synaptic_weights(filters_2);
344 //   Tensor<type, 1> biases_2(2, 0);
345 //   biases_2.setRandom(parameters_minimum, parameters_maximum);
346 //   convolutional_layer_2->set_biases(biases_2);
347 
348 //   PoolingLayer* pooling_layer_1 = new PoolingLayer(convolutional_layer_2->get_outputs_dimensions(), {2,2});
349 
350 //   ConvolutionalLayer* convolutional_layer_3 = new ConvolutionalLayer(pooling_layer_1->get_outputs_dimensions(), {1,2,2});
351 //   convolutional_layer_3->set_padding_option(OpenNN::ConvolutionalLayer::Same);
352 //   Tensor<type, 2> filters_3({1,2,2,2}, 0);
353 //   filters_3.setRandom(parameters_minimum, parameters_maximum);
354 //   convolutional_layer_3->set_synaptic_weights(filters_3);
355 //   Tensor<type, 1> biases_3(1, 0);
356 //   biases_3.setRandom(parameters_minimum, parameters_maximum);
357 //   convolutional_layer_3->set_biases(biases_3);
358 
359 //   PoolingLayer* pooling_layer_2 = new PoolingLayer(convolutional_layer_3->get_outputs_dimensions(), {2,2});
360 //   pooling_layer_2->set_pooling_method(PoolingLayer::MaxPooling);
361 
362 //   PoolingLayer* pooling_layer_3 = new PoolingLayer(pooling_layer_2->get_outputs_dimensions(), {2,2});
363 //   pooling_layer_3->set_pooling_method(PoolingLayer::MaxPooling);
364 
365 //   PerceptronLayer* perceptron_layer = new PerceptronLayer(pooling_layer_3->get_outputs_dimensions().calculate_product(), 3, OpenNN::PerceptronLayer::ActivationFunction::Linear);
366 //   perceptron_layer->set_parameters_random(parameters_minimum, parameters_maximum);
367 
368 //   ProbabilisticLayer* probabilistic_layer = new ProbabilisticLayer(perceptron_layer->get_neurons_number(), outputs_number);
369 //   probabilistic_layer->set_parameters_random(parameters_minimum, parameters_maximum);
370 
371 //   neural_network.set();
372 //   neural_network.add_layer(convolutional_layer_1);
373 //   neural_network.add_layer(convolutional_layer_2);
374 //   neural_network.add_layer(pooling_layer_1);
375 //   neural_network.add_layer(convolutional_layer_3);
376 //   neural_network.add_layer(pooling_layer_2);
377 //   neural_network.add_layer(pooling_layer_3);
378 //   neural_network.add_layer(perceptron_layer);
379 //   neural_network.add_layer(probabilistic_layer);
380 
381 //   numerical_error_gradient = mean_squared_error.calculate_error_gradient_numerical_differentiation();
382 
383 //   error_gradient = mean_squared_error.calculate_error_gradient();
384 
385 //   assert_true(absolute_value(numerical_error_gradient - error_gradient) < 1e-3, LOG);
386 }
387 
388 }
389 
390 /*
391 void MeanSquaredErrorTest::test_calculate_error_terms()
392 {
393    cout << "test_calculate_error_terms\n";
394 
395    NeuralNetwork neural_network;
396    Tensor<Index, 1> hidden_layers_size;
397 
398    Index parameters;
399    DataSet data_set;
400 
401    MeanSquaredError mean_squared_error(&neural_network, &data_set);
402 
403    DataSet::Batch batch(1, &data_set);
404 
405 
406    Index batch_samples_number = batch.get_samples_number();
407 
408    Tensor<type, 1> error_terms;
409 
410    // Test
411 
412    Tensor<Index, 1> architecture(3);
413    architecture.setValues({1,1});
414 
415 
416    neural_network.set(NeuralNetwork::Approximation, architecture);
417    neural_network.set_parameters_random();
418 
419    data_set.set(1, 1, 1);
420    data_set.set_data_random();
421 
422    NeuralNetwork::ForwardPropagation forward_propagation(batch_samples_number, &neural_network);
423    LossIndex::SecondOrderLoss second_order_loss(parameters,batch_samples_number);
424 
425    neural_network.forward_propagate(batch, forward_propagation);
426 
427    mean_squared_error.calculate_error_terms(batch, forward_propagation, second_order_loss);
428    error_terms=second_order_loss.error_terms;
429 
430 //   Eigen::array<int, 2> vector_times_vector = {Eigen::array<int, 2> ({1,1})};
431 
432 //   const Tensor<type, 0> product_result = error_terms.contract(error_terms, vector_times_vector);
433 
434 //   assert_true(abs(product_result(0) - error) < 1.0e-3, LOG);
435 }
436 */
437 
test_calculate_error_terms_Jacobian()438 void MeanSquaredErrorTest::test_calculate_error_terms_Jacobian()
439 {
440    cout << "test_calculate_error_terms_Jacobian\n";
441 
442 //  NumericalDifferentiation nd;
443 
444 //  NeuralNetwork neural_network;
445 //  Tensor<Index, 1> architecture;
446 //  Tensor<type, 1> parameters;
447 
448 //  DataSet data_set;
449 
450 //  MeanSquaredError mean_squared_error(&neural_network, &data_set);
451 
452 //  Tensor<type, 1> error_gradient;
453 
454 //  Tensor<type, 1> error_terms;
455 //  Tensor<type, 2> terms_Jacobian;
456 //  Tensor<type, 2> numerical_Jacobian_terms;
457 
458 //  Tensor<type, 2> inputs;
459 //  Tensor<type, 2> targets;
460 //  Tensor<type, 2> outputs;
461 
462 //  Tensor<type, 2> output_gradient;
463 //  Tensor<Tensor<type, 2>, 1> layers_delta;
464 
465    // Test
466 
467 //   architecture.setValues({1,1});
468 
469 //   neural_network.set(NeuralNetwork::Approximation, architecture);
470 
471 //   neural_network.set_parameters_constant(0.0);
472 
473 //   data_set.set(1, 1, 1);
474 //  data_set.initialize_data(0.0);
475 
476 //  inputs = data_set.get_training_input_data();
477 // targets = data_set.get_training_target_data();
478 //   outputs = neural_network.calculate_outputs(inputs);
479 
480 //   Tensor<Layer::ForwardPropagation, 1> forward_propagation = neural_network.forward_propagate(inputs);
481 
482 //   output_gradient = mean_squared_error.calculate_output_gradient(outputs, targets);
483 
484 //   layers_delta = mean_squared_error.calculate_layers_delta(forward_propagation, output_gradient);
485 
486 //   terms_Jacobian = mean_squared_error.calculate_error_terms_Jacobian(inputs, forward_propagation, layers_delta);
487 
488 //   assert_true(terms_Jacobian.dimension(0) == data_set.get_training_samples_number(), LOG);
489 //   assert_true(terms_Jacobian.dimension(1) == neural_network.get_parameters_number(), LOG);
490 //   assert_true(terms_Jacobian == 0.0, LOG);
491 
492    // Test
493 
494 //   neural_network.set(NeuralNetwork::Approximation, {3, 4, 2});
495 //   neural_network.set_parameters_constant(0.0);
496 
497 //   data_set.set(3, 2, 5);
498 //   mean_squared_error.set(&neural_network, &data_set);
499 //   data_set.initialize_data(0.0);
500 
501 //   inputs = data_set.get_training_input_data();
502 //   targets = data_set.get_training_target_data();
503 //   outputs = neural_network.calculate_outputs(inputs);
504 
505    //forward_propagation = nn.forward_propagate(inputs);
506 
507 //   output_gradient = mean_squared_error.calculate_output_gradient(outputs, targets);
508 
509 //   layers_delta = mean_squared_error.calculate_layers_delta(forward_propagation, output_gradient);
510 
511 //   terms_Jacobian = mean_squared_error.calculate_error_terms_Jacobian(inputs, forward_propagation, layers_delta);
512 
513 //   assert_true(terms_Jacobian.dimension(0) == data_set.get_training_samples_number(), LOG);
514 //   assert_true(terms_Jacobian.dimension(1) == neural_network.get_parameters_number(), LOG);
515 //   assert_true(terms_Jacobian == 0.0, LOG);
516 
517    // Test
518 
519 //   architecture.resize(3);
520 //   architecture[0] = 2;
521 //   architecture[1] = 1;
522 //   architecture[2] = 2;
523 
524 //   neural_network.set(NeuralNetwork::Approximation, architecture);
525 //   neural_network.set_parameters_constant(0.0);
526 
527 //   data_set.set(2, 2, 5);
528 //   mean_squared_error.set(&neural_network, &data_set);
529 //   data_set.initialize_data(0.0);
530 
531 //   inputs = data_set.get_training_input_data();
532 //   targets = data_set.get_training_target_data();
533 //   outputs = neural_network.calculate_outputs(inputs);
534 
535 //   forward_propagation = neural_network.forward_propagate(inputs);
536 
537 //   output_gradient = mean_squared_error.calculate_output_gradient(outputs, targets);
538 
539 //   layers_delta = mean_squared_error.calculate_layers_delta(forward_propagation, output_gradient);
540 
541 //   terms_Jacobian = mean_squared_error.calculate_error_terms_Jacobian(inputs, forward_propagation, layers_delta);
542 
543 //   assert_true(terms_Jacobian.dimension(0) == data_set.get_training_samples_number(), LOG);
544 //   assert_true(terms_Jacobian.dimension(1) == neural_network.get_parameters_number(), LOG);
545 //   assert_true(terms_Jacobian == 0.0, LOG);
546 
547    // Test
548 
549 //   architecture.setValues({1,1});
550 
551 //   neural_network.set(NeuralNetwork::Approximation, architecture);
552 //   neural_network.set_parameters_constant(0.0);
553 //   //nn.set_layer_activation_function(0, PerceptronLayer::Linear);
554 ////   nn.set_parameters_random();
555 //   parameters = neural_network.get_parameters();
556 
557 //   data_set.set(1, 1, 1);
558 ////   data_set.set_data_random();
559 //   data_set.initialize_data(1.0);
560 
561 //   inputs = data_set.get_training_input_data();
562 //   targets = data_set.get_training_target_data();
563 //   outputs = neural_network.calculate_outputs(inputs);
564 
565 //   forward_propagation = nn.forward_propagate(inputs);
566 
567 //   output_gradient = mean_squared_error.calculate_output_gradient(outputs, targets);
568 
569 //   layers_delta = mean_squared_error.calculate_layers_delta(forward_propagation, output_gradient);
570 
571 //   cout << "layers delta: " << layers_delta << endl;
572 
573 //   terms_Jacobian = mean_squared_error.calculate_error_terms_Jacobian(inputs, forward_propagation, layers_delta);
574 
575 //   numerical_Jacobian_terms = nd.calculate_Jacobian(mean_squared_error, &MeanSquaredError::calculate_training_error_terms, parameters);
576 
577 //   cout << "Terms Jacobian: " << terms_Jacobian << endl;
578 //   cout << "Numerical: " << numerical_Jacobian_terms << endl;
579 
580 //   assert_true(absolute_value(terms_Jacobian-numerical_Jacobian_terms) < 1.0e-3, LOG);
581 
582    // Test
583 
584 //   architecture.setValues({1,1});
585 
586 //   neural_network.set(NeuralNetwork::Approximation, architecture);
587 //   neural_network.set_parameters_random();
588 //   parameters = neural_network.get_parameters();
589 
590 //   data_set.set(2, 2, 2);
591 //   data_set.set_data_random();
592 
593 //   terms_Jacobian = mean_squared_error.calculate_error_terms_Jacobian();
594 //   numerical_Jacobian_terms = nd.calculate_Jacobian(mean_squared_error, &MeanSquaredError::calculate_training_error_terms, parameters);
595 
596 //   assert_true(absolute_value(terms_Jacobian-numerical_Jacobian_terms) < 1.0e-3, LOG);
597 
598    // Test
599 
600 //   architecture.setValues({2,2,2});
601 
602 //   neural_network.set(NeuralNetwork::Approximation, architecture);
603 //   neural_network.set_parameters_random();
604 
605 //   data_set.set(2, 2, 2);
606 //   data_set.set_data_random();
607 
608 //   error_gradient = mean_squared_error.calculate_error_gradient({0, 1});
609 
610 //   error_terms = mean_squared_error.calculate_training_error_terms();
611 //   terms_Jacobian = mean_squared_error.calculate_error_terms_Jacobian();
612 
613 //   assert_true(absolute_value((terms_Jacobian.calculate_transpose()).dot(error_terms)*2.0 - error_gradient) < 1.0e-3, LOG);
614 }
615 
616 
test_to_XML()617 void MeanSquaredErrorTest::test_to_XML()
618 {
619    cout << "test_to_XML\n";
620 }
621 
622 
test_from_XML()623 void MeanSquaredErrorTest::test_from_XML()
624 {
625    cout << "test_from_XML\n";
626 }
627 
628 
run_test_case()629 void MeanSquaredErrorTest::run_test_case()
630 {
631    cout << "Running mean squared error test case...\n";
632 
633    // Constructor and destructor methods
634 
635    test_constructor();
636    test_destructor();
637 
638    // Get methods
639 
640    // Set methods
641 
642    // Error methods
643 
644    test_calculate_error();
645 
646    test_calculate_error_gradient();
647 
648 //   // Error terms methods
649 
650    //test_calculate_error_terms();
651    //test_calculate_error_terms_Jacobian();
652 
653 //   // Serialization methods
654 
655 //   test_to_XML();
656 //   test_from_XML();
657 
658    cout << "End of mean squared error test case.\n\n";
659 }
660 
661 
662 // OpenNN: Open Neural Networks Library.
663 // Copyright (C) 2005-2020 Artificial Intelligence Techniques, SL.
664 //
665 // This library is free software; you can redistribute it and/or
666 // modify it under the terms of the GNU Lemser General Public
667 // License as published by the Free Software Foundation; either
668 // version 2.1 of the License, or any later version.
669 //
670 // This library is distributed in the hope that it will be useful,
671 // but WITHOUT ANY WARRANTY; without even the implied warranty of
672 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
673 // Lemser General Public License for more details.
674 
675 // You should have received a copy of the GNU Lemser General Public
676 // License along with this library; if not, write to the Free Software
677 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
678