1 // OpenNN: Open Neural Networks Library
2 // www.opennn.net
3 //
4 // S U M S Q U A R E D E R R O R T E S T C L A S S
5 //
6 // Artificial Intelligence Techniques SL
7 // artelnics@artelnics.com
8
9 #include "sum_squared_error_test.h"
10 #include <omp.h>
11
SumSquaredErrorTest()12 SumSquaredErrorTest::SumSquaredErrorTest() : UnitTesting()
13 {
14 }
15
16
~SumSquaredErrorTest()17 SumSquaredErrorTest::~SumSquaredErrorTest()
18 {
19 }
20
21
test_constructor()22 void SumSquaredErrorTest::test_constructor()
23 {
24 cout << "test_constructor\n";
25
26 // Default
27
28 SumSquaredError sum_squared_error_1;
29
30 assert_true(sum_squared_error_1.has_neural_network() == false, LOG);
31 assert_true(sum_squared_error_1.has_data_set() == false, LOG);
32
33 // Neural network and data set
34
35 NeuralNetwork neural_network_2;
36 DataSet data_set;
37
38 NeuralNetwork neural_network_3;
39 SumSquaredError sum_squared_error_4(&neural_network_2, &data_set);
40
41 assert_true(sum_squared_error_4.has_neural_network() == true, LOG);
42 assert_true(sum_squared_error_4.has_data_set() == true, LOG);
43
44 }
45
46
test_destructor()47 void SumSquaredErrorTest::test_destructor()
48 {
49 cout << "test_destructor\n";
50 }
51
52
test_calculate_error()53 void SumSquaredErrorTest::test_calculate_error()
54 {
55 cout << "test_calculate_error\n";
56
57 NeuralNetwork neural_network;
58
59 Tensor<type, 1> parameters;
60
61 DataSet data_set;
62 Tensor<type, 2> data;
63
64 SumSquaredError sum_squared_error(&neural_network, &data_set);
65 sum_squared_error.set_regularization_method(LossIndex::RegularizationMethod::NoRegularization);
66
67 // Test 0
68
69 //Dataset
70
71 data_set.set(1, 2, 2);
72 data_set.initialize_data(0.0);
73 data_set.set_training();
74
75 DataSet::Batch batch(1, &data_set);
76
77 Tensor<Index,1> training_samples_indices = data_set.get_training_samples_indices();
78 Tensor<Index,1> inputs_indices = data_set.get_input_variables_indices();
79 Tensor<Index,1> targets_indices = data_set.get_target_variables_indices();
80
81 batch.fill(training_samples_indices, inputs_indices, targets_indices);
82
83 // Neural network
84
85 Index inputs_number = 2;
86 Index target_number = 2;
87 Tensor<Index, 1>architecture(2);
88 architecture.setValues({inputs_number,target_number});
89
90 neural_network.set(NeuralNetwork::Approximation, architecture);
91 // neural_network.set_thread_pool_device(thread_pool_device);
92 neural_network.set_parameters_constant(0.0);
93
94 NeuralNetwork::ForwardPropagation forward_propagation(data_set.get_training_samples_number(), &neural_network);
95 LossIndex::BackPropagation training_back_propagation(data_set.get_training_samples_number(), &sum_squared_error);
96
97 neural_network.forward_propagate(batch, forward_propagation);
98 sum_squared_error.back_propagate(batch, forward_propagation, training_back_propagation);
99
100 sum_squared_error.calculate_error(batch, forward_propagation, training_back_propagation);
101
102 assert_true(training_back_propagation.error == 0.0, LOG);
103
104 // Test 1
105
106 //Dataset
107
108 data_set.set(1, 2, 2);
109 data_set.initialize_data(1.0);
110 data_set.set_training();
111
112 DataSet::Batch batch_1(1, &data_set);
113
114 training_samples_indices = data_set.get_training_samples_indices();
115 inputs_indices = data_set.get_input_variables_indices();
116 targets_indices = data_set.get_target_variables_indices();
117
118 batch_1.fill(training_samples_indices, inputs_indices, targets_indices);
119
120 // Neural network
121
122 neural_network.set_parameters_constant(0.0);
123
124 NeuralNetwork::ForwardPropagation forward_propagation_1(data_set.get_training_samples_number(), &neural_network);
125 LossIndex::BackPropagation training_back_propagation_1(data_set.get_training_samples_number(), &sum_squared_error);
126
127 neural_network.forward_propagate(batch_1, forward_propagation_1);
128 sum_squared_error.back_propagate(batch_1, forward_propagation_1, training_back_propagation_1);
129
130 sum_squared_error.calculate_error(batch_1, forward_propagation_1, training_back_propagation_1);
131
132 assert_true(training_back_propagation_1.error == 2.0, LOG);
133
134 // Test 2
135
136 //Dataset
137
138 data_set.set(1, 1, 1);
139 data_set.initialize_data(0.0);
140 data_set.set_training();
141
142 DataSet::Batch batch_2(1, &data_set);
143
144 training_samples_indices = data_set.get_training_samples_indices();
145 inputs_indices = data_set.get_input_variables_indices();
146 targets_indices = data_set.get_target_variables_indices();
147
148 batch_2.fill(training_samples_indices, inputs_indices, targets_indices);
149
150 // Neural network
151
152 neural_network.set();
153
154 // RecurrentLayer* recurrent_layer = new RecurrentLayer(1, 1);
155 // neural_network.add_layer(recurrent_layer);
156 // PerceptronLayer* perceptron_layer = new PerceptronLayer(1,1);
157 // neural_network.add_layer(perceptron_layer);
158
159
160 // neural_network.set_parameters_constant(0.0);
161
162 // NeuralNetwork::ForwardPropagation forward_propagation_2(data_set.get_training_samples_number(), &neural_network);
163 // LossIndex::BackPropagation training_back_propagation_2(data_set.get_training_samples_number(), &sum_squared_error);
164
165 // neural_network.forward_propagate(batch_2, forward_propagation_2);
166 // sum_squared_error.back_propagate(batch_2, forward_propagation_2, training_back_propagation_2);
167
168 // sum_squared_error.calculate_error(batch_2, forward_propagation_2, training_back_propagation_2);
169
170 // assert_true(training_back_propagation_2.error == 0.0, LOG);
171
172 // Test 3
173
174 //Dataset
175
176 // data.resize(9,5);
177 // data.setValues({{-1,-1,1,-1,3},
178 // {-1,0,1,0,2},
179 // {-1,1,1,1,2},
180 // {0,-1,1,0,2},
181 // {0,0,1,1,1},
182 // {0,1,1,2,2},
183 // {1,-2,1,1,3},
184 // {1,0,1,2,2},
185 // {1,1,1,3,3}});
186 // data_set.set(data);
187 // data_set.set(9, 3, 2);
188 // data_set.set_training();
189
190 // DataSet::Batch batch_3(9, &data_set);
191
192 // training_samples_indices = data_set.get_training_samples_indices();
193 // inputs_indices = data_set.get_input_variables_indices();
194 // targets_indices = data_set.get_target_variables_indices();
195
196 // batch_3.fill(training_samples_indices, inputs_indices, targets_indices);
197
198 // // Neural network
199
200 // neural_network.set();
201
202 // architecture.setValues({3,1,2});
203 // neural_network.set(NeuralNetwork::Approximation, architecture);
204
205 // neural_network.set_parameters_constant(1.0);
206
207 // NeuralNetwork::ForwardPropagation forward_propagation_3(data_set.get_training_samples_number(), &neural_network);
208 // LossIndex::BackPropagation training_back_propagation_3(data_set.get_training_samples_number(), &sum_squared_error);
209
210 // neural_network.forward_propagate(batch_3, forward_propagation_3);
211 // sum_squared_error.back_propagate(batch_3, forward_propagation_3, training_back_propagation_3);
212
213 // forward_propagation_3.print();
214 // sum_squared_error.calculate_error(batch_3, forward_propagation_3, training_back_propagation_3);
215
216 // assert_true(training_back_propagation_3.error - 8.241 < 1e-3, LOG);
217
218 }
219
220
test_calculate_output_gradient()221 void SumSquaredErrorTest::test_calculate_output_gradient()
222 {
223 cout << "test_calculate_output_gradient\n";
224 NeuralNetwork neural_network;
225
226 Tensor<type, 1> parameters;
227
228 DataSet data_set;
229 Tensor<type, 2> data;
230
231 SumSquaredError sum_squared_error(&neural_network, &data_set);
232 sum_squared_error.set_regularization_method(LossIndex::RegularizationMethod::NoRegularization);
233
234 // Test 0
235
236 //Dataset
237
238 data_set.set(1, 2, 2);
239 data_set.initialize_data(0.0);
240 data_set.set_training();
241
242 DataSet::Batch batch(1, &data_set);
243
244 Tensor<Index,1> training_samples_indices = data_set.get_training_samples_indices();
245 Tensor<Index,1> inputs_indices = data_set.get_input_variables_indices();
246 Tensor<Index,1> targets_indices = data_set.get_target_variables_indices();
247
248 batch.fill(training_samples_indices, inputs_indices, targets_indices);
249
250 // Neural network
251
252 Index inputs_number = 2;
253 Index target_number = 2;
254 Tensor<Index, 1>architecture(2);
255 architecture.setValues({inputs_number,target_number});
256
257 neural_network.set(NeuralNetwork::Approximation, architecture);
258
259 neural_network.set_parameters_constant(0.0);
260
261 NeuralNetwork::ForwardPropagation forward_propagation(data_set.get_training_samples_number(), &neural_network);
262 LossIndex::BackPropagation training_back_propagation(data_set.get_training_samples_number(), &sum_squared_error);
263
264 neural_network.forward_propagate(batch, forward_propagation);
265 sum_squared_error.back_propagate(batch, forward_propagation, training_back_propagation);
266
267 sum_squared_error.calculate_output_gradient(batch, forward_propagation, training_back_propagation);
268
269 assert_true(training_back_propagation.output_gradient(0) == 0.0, LOG);
270 assert_true(training_back_propagation.output_gradient(1) == 0.0, LOG);
271
272 // Test 1
273
274 //Dataset
275
276 data_set.set(1, 2, 2);
277 data_set.initialize_data(1.0);
278 data_set.set_training();
279
280 DataSet::Batch batch_1(1, &data_set);
281
282 training_samples_indices = data_set.get_training_samples_indices();
283 inputs_indices = data_set.get_input_variables_indices();
284 targets_indices = data_set.get_target_variables_indices();
285
286 batch_1.fill(training_samples_indices, inputs_indices, targets_indices);
287
288 // Neural network
289
290 Tensor<type, 1> numerical_gradient;
291
292 neural_network.set_parameters_constant(0.0);
293
294 NeuralNetwork::ForwardPropagation forward_propagation_1(data_set.get_training_samples_number(), &neural_network);
295 LossIndex::BackPropagation training_back_propagation_1(data_set.get_training_samples_number(), &sum_squared_error);
296
297 neural_network.forward_propagate(batch_1, forward_propagation_1);
298 sum_squared_error.back_propagate(batch_1, forward_propagation_1, training_back_propagation_1);
299
300 sum_squared_error.calculate_output_gradient(batch_1, forward_propagation_1, training_back_propagation_1);
301
302 numerical_gradient = sum_squared_error.calculate_error_gradient_numerical_differentiation(&sum_squared_error);
303
304 assert_true(abs(training_back_propagation_1.output_gradient(0)-numerical_gradient(4)) < static_cast<type>(1e-3), LOG);
305 assert_true(abs(training_back_propagation_1.output_gradient(1)-numerical_gradient(5)) < static_cast<type>(1e-3), LOG);
306
307 // Test 2_1 / Perceptron
308
309 //Dataset
310 Index samples_number = 3;
311 inputs_number = 1;
312 Index hidden_neurons = 1;
313 Index outputs_number = 2;
314
315 data.resize(samples_number,inputs_number+outputs_number);
316 data.setValues({{-1,-1,3},{-1,0,2},{-1,1,2},});
317
318 data_set.set(data);
319 data_set.set(samples_number, inputs_number, outputs_number);
320 data_set.set_training();
321
322
323 DataSet::Batch batch_2(3, &data_set);
324
325 training_samples_indices = data_set.get_training_samples_indices();
326 inputs_indices = data_set.get_input_variables_indices();
327 targets_indices = data_set.get_target_variables_indices();
328
329 batch_2.fill(training_samples_indices, inputs_indices, targets_indices);
330
331 // Neural network
332
333 neural_network.set();
334
335 PerceptronLayer* perceptron_layer = new PerceptronLayer(hidden_neurons,outputs_number);
336 neural_network.add_layer(perceptron_layer);
337
338 Tensor<type, 1> parameters_(16);
339 parameters_.setValues({1,1,2,0, 1,2,1,1, 1,2,1,0, 1,1,2,1});
340 neural_network.set_parameters(parameters_);
341
342 NeuralNetwork::ForwardPropagation forward_propagation_2(data_set.get_training_samples_number(), &neural_network);
343 LossIndex::BackPropagation training_back_propagation_2(data_set.get_training_samples_number(), &sum_squared_error);
344
345 neural_network.forward_propagate(batch_2, forward_propagation_2);
346 sum_squared_error.back_propagate(batch_2, forward_propagation_2, training_back_propagation_2);
347
348 sum_squared_error.calculate_output_gradient(batch_2, forward_propagation_2, training_back_propagation_2);
349
350 numerical_gradient.resize(neural_network.get_parameters_number());
351 numerical_gradient = sum_squared_error.calculate_error_gradient_numerical_differentiation(&sum_squared_error);
352
353 assert_true(abs(training_back_propagation_2.output_gradient(0,1) + static_cast<type>(4.476)) < static_cast<type>(1e-3), LOG);
354 assert_true(abs(training_back_propagation_2.output_gradient(1,0) + static_cast<type>(1.523)) < static_cast<type>(1e-3), LOG);
355 assert_true(abs(training_back_propagation_2.output_gradient(2,1) + static_cast<type>(2.476)) < static_cast<type>(1e-3), LOG);
356
357 // // Test 2_2 / Recurrent
358
359 // // Neural network
360
361 // neural_network.set();
362
363 // RecurrentLayer* recurrent_layer = new RecurrentLayer(inputs_number, outputs_number);
364 // recurrent_layer->initialize_hidden_states(0.0);
365 // recurrent_layer->set_timesteps(10);
366 // neural_network.add_layer(recurrent_layer);
367
368 // neural_network.set_parameters(parameters_);
369
370 // NeuralNetwork::ForwardPropagation forward_propagation_2_2(data_set.get_training_samples_number(), &neural_network);
371 // LossIndex::BackPropagation training_back_propagation_2_2(data_set.get_training_samples_number(), &sum_squared_error);
372
373 // neural_network.forward_propagate(batch_2, forward_propagation_2_2);
374 // sum_squared_error.back_propagate(batch_2, forward_propagation_2_2, training_back_propagation_2_2);
375
376 // sum_squared_error.calculate_output_gradient(batch_2, forward_propagation_2_2, training_back_propagation_2_2);
377
378 // assert_true(abs(training_back_propagation_2_2.output_gradient(0,1) + 6) < static_cast<type>(1e-3), LOG);
379 // assert_true(abs(training_back_propagation_2_2.output_gradient(1,0) + 0) < static_cast<type>(1e-3), LOG);
380 // assert_true(abs(training_back_propagation_2_2.output_gradient(2,1) + 4) < static_cast<type>(1e-3), LOG);
381
382 }
383
384
test_calculate_Jacobian_gradient()385 void SumSquaredErrorTest::test_calculate_Jacobian_gradient() // @todo
386 {
387 cout << "test_calculate_Jacobian_gradient\n";
388 // NeuralNetwork neural_network;
389
390 // Tensor<type, 1> parameters;
391
392 // DataSet data_set;
393 // Tensor<type, 2> data;
394
395 // SumSquaredError sum_squared_error(&neural_network, &data_set);
396 // sum_squared_error.set_regularization_method(LossIndex::RegularizationMethod::NoRegularization);
397 // sum_squared_error.set_thread_pool_device(thread_pool_device);
398
399 // // Test 0
400
401 // //Dataset
402
403 // Index inputs_number = 2;
404 // Index target_number = 3;
405
406 // data_set.set(1, inputs_number, target_number);
407 // data_set.initialize_data(0.0);
408 // data_set.set_training();
409
410 // DataSet::Batch batch(1, &data_set);
411
412 // Tensor<Index,1> training_samples_indices = data_set.get_training_samples_indices();
413 // Tensor<Index,1> inputs_indices = data_set.get_input_variables_indices();
414 // Tensor<Index,1> targets_indices = data_set.get_target_variables_indices();
415
416 // batch.fill(training_samples_indices, inputs_indices, targets_indices);
417
418 // // Neural network
419
420 // Tensor<Index, 1>architecture(2);
421 // architecture.setValues({inputs_number,target_number});
422
423 // neural_network.set(NeuralNetwork::Approximation, architecture);
424 // neural_network.set_thread_pool_device(thread_pool_device);
425 // neural_network.set_parameters_constant(0.0);
426
427 // NeuralNetwork::ForwardPropagation forward_propagation(data_set.get_training_samples_number(), &neural_network);
428 // neural_network.forward_propagate(batch, forward_propagation);
429
430 // LossIndex::BackPropagation training_back_propagation(data_set.get_training_samples_number(), &sum_squared_error);
431 // sum_squared_error.back_propagate(batch, forward_propagation, training_back_propagation);
432
433 // LossIndex::SecondOrderLoss second_order_loss(neural_network.get_parameters_number(), training_samples_indices.size());
434 // sum_squared_error.calculate_error_terms_Jacobian(batch, forward_propagation, training_back_propagation, second_order_loss);
435 // sum_squared_error.calculate_Jacobian_gradient(batch, forward_propagation, second_order_loss);
436
437 // cout << second_order_loss.gradient << endl;
438
439 // assert_true(second_order_loss.gradient(0) == 0.0, LOG);
440 // assert_true(second_order_loss.gradient(1) == 0.0, LOG);
441
442 }
443
444
test_calculate_error_gradient()445 void SumSquaredErrorTest::test_calculate_error_gradient()
446 {
447 cout << "test_calculate_error_gradient\n";
448
449 DataSet data_set;
450 NeuralNetwork neural_network;
451 SumSquaredError sum_squared_error(&neural_network, &data_set);
452
453 Tensor<Index, 1> architecture;
454
455 Tensor<type, 1> parameters;
456 Tensor<type, 1> gradient;
457 Tensor<type, 1> numerical_gradient;
458 Tensor<type, 1> error;
459
460 Index inputs_number;
461 Index outputs_number;
462 Index samples_number;
463 Index hidden_neurons;
464
465 // Test lstm
466 {
467 samples_number = 10;
468 inputs_number = 3;
469 hidden_neurons = 2;
470 outputs_number = 4;
471
472 data_set.set(samples_number, inputs_number, outputs_number);
473
474 data_set.set_data_random();
475
476 data_set.set_training();
477
478 neural_network.set();
479
480 LongShortTermMemoryLayer* long_short_term_memory_layer = new LongShortTermMemoryLayer(inputs_number, hidden_neurons);
481
482 long_short_term_memory_layer->set_timesteps(8);
483
484 neural_network.add_layer(long_short_term_memory_layer);
485
486 PerceptronLayer* perceptron_layer = new PerceptronLayer(hidden_neurons,outputs_number);
487
488 neural_network.add_layer(perceptron_layer);
489
490 neural_network.set_parameters_random();
491
492 parameters = neural_network.get_parameters();
493
494 // numerical_gradient = sum_squared_error.calculate_error_gradient_numerical_differentiation();
495
496 // gradient = sum_squared_error.calculate_error_gradient();
497
498 // assert_true(numerical_gradient-gradient < 1.0e-3, LOG);
499 }
500
501 neural_network.set();
502
503 // Test recurrent
504 {
505 samples_number = 5;
506 inputs_number = 3;
507 hidden_neurons = 7;
508 outputs_number = 3;
509
510 data_set.set(samples_number, inputs_number, outputs_number);
511
512 data_set.set_data_random();
513
514 data_set.set_training();
515
516 neural_network.set();
517
518 RecurrentLayer* recurrent_layer = new RecurrentLayer(inputs_number, hidden_neurons);
519
520 recurrent_layer->initialize_hidden_states(0.0);
521 recurrent_layer->set_timesteps(10);
522
523 neural_network.add_layer(recurrent_layer);
524
525 PerceptronLayer* perceptron_layer = new PerceptronLayer(hidden_neurons,outputs_number);
526
527 neural_network.add_layer(perceptron_layer);
528
529 neural_network.set_parameters_random();
530
531 parameters = neural_network.get_parameters();
532
533 // numerical_gradient = sum_squared_error.calculate_error_gradient_numerical_differentiation();
534
535 // gradient = sum_squared_error.calculate_error_gradient();
536
537 // assert_true(numerical_gradient-gradient < 1.0e-3, LOG);
538 }
539
540 // Test perceptron
541
542 neural_network.set();
543 {
544 samples_number = 5;
545 inputs_number = 2;
546 hidden_neurons = 7;
547 outputs_number = 4;
548
549 data_set.set(samples_number, inputs_number, outputs_number);
550
551 data_set.set_data_random();
552
553 data_set.set_training();
554
555 PerceptronLayer* hidden_perceptron_layer = new PerceptronLayer(inputs_number, hidden_neurons);
556
557 neural_network.add_layer(hidden_perceptron_layer);
558
559 PerceptronLayer* output_perceptron_layer = new PerceptronLayer(hidden_neurons, outputs_number);
560
561 neural_network.add_layer(output_perceptron_layer);
562
563 // numerical_gradient = sum_squared_error.calculate_error_gradient_numerical_differentiation();
564
565 // gradient = sum_squared_error.calculate_error_gradient();
566
567 // assert_true(numerical_gradient-gradient < 1.0e-3, LOG);
568 }
569
570 // Test convolutional
571 {
572 samples_number = 5;
573 inputs_number = 147;
574 outputs_number = 1;
575
576 data_set.set(samples_number, inputs_number, outputs_number);
577 // data_set.set_input_variables_dimensions(Tensor<Index, 1>({3,7,7}));
578 // data_set.set_target_variables_dimensions(Tensor<Index, 1>({1}));
579 data_set.set_data_random();
580 data_set.set_training();
581
582 const type parameters_minimum = -100.0;
583 const type parameters_maximum = 100.0;
584
585 // ConvolutionalLayer* convolutional_layer_1 = new ConvolutionalLayer({3,7,7}, {2,2,2});
586 // Tensor<type, 2> filters_1({2,3,2,2}, 0);
587 // filters_1.setRandom(parameters_minimum,parameters_maximum);
588 // convolutional_layer_1->set_synaptic_weights(filters_1);
589 // Tensor<type, 1> biases_1(2, 0);
590 // biases_1.setRandom(parameters_minimum, parameters_maximum);
591 // convolutional_layer_1->set_biases(biases_1);
592
593 // ConvolutionalLayer* convolutional_layer_2 = new ConvolutionalLayer(convolutional_layer_1->get_outputs_dimensions(), {2,2,2});
594 // convolutional_layer_2->set_padding_option(OpenNN::ConvolutionalLayer::Same);
595 // Tensor<type, 2> filters_2({2,2,2,2}, 0);
596 // filters_2.setRandom(parameters_minimum, parameters_maximum);
597 // convolutional_layer_2->set_synaptic_weights(filters_2);
598 // Tensor<type, 1> biases_2(2, 0);
599 // biases_2.setRandom(parameters_minimum, parameters_maximum);
600 // convolutional_layer_2->set_biases(biases_2);
601
602 // PoolingLayer* pooling_layer_1 = new PoolingLayer(convolutional_layer_2->get_outputs_dimensions(), {2,2});
603
604 // ConvolutionalLayer* convolutional_layer_3 = new ConvolutionalLayer(pooling_layer_1->get_outputs_dimensions(), {1,2,2});
605 // convolutional_layer_3->set_padding_option(OpenNN::ConvolutionalLayer::Same);
606 // Tensor<type, 2> filters_3({1,2,2,2}, 0);
607 // filters_3.setRandom(parameters_minimum, parameters_maximum);
608 // convolutional_layer_3->set_synaptic_weights(filters_3);
609 // Tensor<type, 1> biases_3(1, 0);
610 // biases_3.setRandom(parameters_minimum, parameters_maximum);
611 // convolutional_layer_3->set_biases(biases_3);
612
613 // PoolingLayer* pooling_layer_2 = new PoolingLayer(convolutional_layer_3->get_outputs_dimensions(), {2,2});
614 // pooling_layer_2->set_pooling_method(PoolingLayer::MaxPooling);
615
616 // PoolingLayer* pooling_layer_3 = new PoolingLayer(pooling_layer_2->get_outputs_dimensions(), {2,2});
617 // pooling_layer_3->set_pooling_method(PoolingLayer::MaxPooling);
618
619 // PerceptronLayer* perceptron_layer = new PerceptronLayer(pooling_layer_3->get_outputs_dimensions().calculate_product(), 3, OpenNN::PerceptronLayer::ActivationFunction::Linear);
620 // perceptron_layer->set_parameters_random(parameters_minimum, parameters_maximum);
621
622 // ProbabilisticLayer* probabilistic_layer = new ProbabilisticLayer(perceptron_layer->get_neurons_number(), outputs_number);
623 // probabilistic_layer->set_parameters_random(parameters_minimum, parameters_maximum);
624
625 // neural_network.set();
626 // neural_network.add_layer(convolutional_layer_1);
627 // neural_network.add_layer(convolutional_layer_2);
628 // neural_network.add_layer(pooling_layer_1);
629 // neural_network.add_layer(convolutional_layer_3);
630 // neural_network.add_layer(pooling_layer_2);
631 // neural_network.add_layer(pooling_layer_3);
632 // neural_network.add_layer(perceptron_layer);
633 // neural_network.add_layer(probabilistic_layer);
634
635 // numerical_gradient = sum_squared_error.calculate_error_gradient_numerical_differentiation();
636
637 // gradient = sum_squared_error.calculate_error_gradient();
638
639 // assert_true(absolute_value(numerical_gradient - gradient) < 1e-3, LOG);
640 }
641 }
642
643
test_calculate_error_terms()644 void SumSquaredErrorTest::test_calculate_error_terms()
645 {
646 cout << "test_calculate_error_terms\n";
647 }
648
649
test_calculate_error_terms_Jacobian()650 void SumSquaredErrorTest::test_calculate_error_terms_Jacobian()
651 {
652 cout << "test_calculate_error_terms_Jacobian\n";
653
654 // NumericalDifferentiation nd;
655
656 // NeuralNetwork neural_network;
657 // Tensor<Index, 1> architecture;
658 // Tensor<type, 1> parameters;
659
660 // DataSet data_set;
661
662 // SumSquaredError sum_squared_error(&neural_network, &data_set);
663
664 // Tensor<type, 1> gradient;
665
666 // Tensor<type, 1> terms;
667 // Tensor<type, 2> terms_Jacobian;
668 // Tensor<type, 2> numerical_Jacobian_terms;
669
670 // Tensor<Index, 1> samples;
671
672 // Tensor<type, 2> inputs;
673 // Tensor<type, 2> targets;
674
675 // Tensor<type, 2> outputs;
676 // Tensor<type, 2> output_gradient;
677
678 // Tensor<Tensor<type, 2>, 1> layers_activations;
679
680 // Tensor<Tensor<type, 2>, 1> layers_activations_derivatives;
681
682 // Tensor<Tensor<type, 2>, 1> layers_delta;
683
684 // Test
685
686 // architecture.setValues({1, 1, 1});
687
688 // neural_network.set(NeuralNetwork::Approximation, architecture);
689
690 // neural_network.set_parameters_constant(0.0);
691
692 // data_set.set(1, 1, 1);
693
694 // data_set.initialize_data(0.0);
695
696 // samples.set(1,0);
697 //samples.initialize_sequential();
698
699 // inputs = data_set.get_input_data(samples);
700 // targets = data_set.get_target_data(samples);
701
702 // outputs = neural_network.calculate_outputs(inputs);
703 // output_gradient = sum_squared_error.calculate_output_gradient(outputs, targets);
704
705 // Tensor<Layer::ForwardPropagation, 1> forward_propagation = neural_network.forward_propagate(inputs);
706
707 // layers_delta = sum_squared_error.calculate_layers_delta(forward_propagation, output_gradient);
708
709 // terms_Jacobian = sum_squared_error.calculate_error_terms_Jacobian(inputs, forward_propagation, layers_delta);
710
711 // assert_true(terms_Jacobian.dimension(0) == data_set.get_samples_number(), LOG);
712 // assert_true(terms_Jacobian.dimension(1) == neural_network.get_parameters_number(), LOG);
713 // assert_true(terms_Jacobian == 0.0, LOG);
714
715 // Test
716
717 // architecture.setValues({3, 4, 2});
718
719 // neural_network.set(NeuralNetwork::Approximation, architecture);
720 // neural_network.set_parameters_constant(0.0);
721
722 // data_set.set(3, 2, 5);
723 // sum_squared_error.set(&neural_network, &data_set);
724 // data_set.initialize_data(0.0);
725
726 // terms_Jacobian = sum_squared_error.calculate_error_terms_Jacobian();
727
728 // assert_true(terms_Jacobian.dimension(0) == data_set.get_training_samples_number(), LOG);
729 // assert_true(terms_Jacobian.dimension(1) == neural_network.get_parameters_number(), LOG);
730 // assert_true(terms_Jacobian == 0.0, LOG);
731
732 // Test
733
734 // architecture.resize(3);
735 // architecture[0] = 5;
736 // architecture[1] = 1;
737 // architecture[2] = 2;
738
739 // neural_network.set(NeuralNetwork::Approximation, architecture);
740 // neural_network.set_parameters_constant(0.0);
741
742 // data_set.set(5, 2, 3);
743 // sum_squared_error.set(&neural_network, &data_set);
744 // data_set.initialize_data(0.0);
745
746 // terms_Jacobian = sum_squared_error.calculate_error_terms_Jacobian();
747
748 // assert_true(terms_Jacobian.dimension(0) == data_set.get_training_samples_number(), LOG);
749 // assert_true(terms_Jacobian.dimension(1) == neural_network.get_parameters_number(), LOG);
750 // assert_true(terms_Jacobian == 0.0, LOG);
751
752 // Test
753
754 // architecture.setValues({1, 1, 1});
755
756 // neural_network.set(NeuralNetwork::Approximation, architecture);
757 // neural_network.set_parameters_random();
758 // parameters = neural_network.get_parameters();
759
760 // data_set.set(1, 1, 1);
761 // data_set.set_data_random();
762
763 // terms_Jacobian = sum_squared_error.calculate_error_terms_Jacobian();
764 // numerical_Jacobian_terms = nd.calculate_Jacobian(sse, &SumSquaredError::calculate_training_terms, parameters);
765
766 // assert_true(absolute_value(terms_Jacobian-numerical_Jacobian_terms) < 1.0e-3, LOG);
767
768 // Test
769
770 // architecture.setValues({2, 2, 2});
771
772 // neural_network.set(NeuralNetwork::Approximation, architecture);
773 // neural_network.set_parameters_random();
774 // parameters = neural_network.get_parameters();
775
776 // data_set.set(2, 2, 2);
777 // data_set.set_data_random();
778
779 // terms_Jacobian = sum_squared_error.calculate_error_terms_Jacobian();
780 // numerical_Jacobian_terms = nd.calculate_Jacobian(sse, &SumSquaredError::calculate_training_terms, parameters);
781
782 // assert_true(absolute_value(terms_Jacobian-numerical_Jacobian_terms) < 1.0e-3, LOG);
783
784 // Test
785
786 // architecture.setValues({2, 2, 2});
787
788 // neural_network.set(NeuralNetwork::Approximation, architecture);
789 // neural_network.set_parameters_random();
790
791 // data_set.set(2, 2, 2);
792 // data_set.set_data_random();
793
794 // gradient = sum_squared_error.calculate_gradient();
795
796 // terms = sum_squared_error.calculate_training_error_terms();
797 // terms_Jacobian = sum_squared_error.calculate_error_terms_Jacobian();
798
799 // assert_true(absolute_value((terms_Jacobian.calculate_transpose()).dot(terms)*2.0 - gradient) < 1.0e-3, LOG);
800
801 }
802
803
test_calculate_squared_errors()804 void SumSquaredErrorTest::test_calculate_squared_errors()
805 {
806 cout << "test_calculate_squared_errors\n";
807
808 NeuralNetwork neural_network;
809 Tensor<Index, 1> architecture;
810
811 DataSet data_set;
812
813 SumSquaredError sum_squared_error(&neural_network, &data_set);
814
815 Tensor<type, 1> squared_errors;
816
817 // type error;
818
819 // Test
820
821 architecture.setValues({1,1,1});
822
823 neural_network.set(NeuralNetwork::Approximation, architecture);
824
825 neural_network.set_parameters_constant(0.0);
826
827 data_set.set(1,1,1);
828
829 data_set.initialize_data(0.0);
830
831 // squared_errors = sum_squared_error.calculate_squared_errors();
832
833 assert_true(squared_errors.size() == 1, LOG);
834 // assert_true(squared_errors == 0.0, LOG);
835
836 // Test
837
838 architecture.setValues({2,2,2});
839
840 neural_network.set(NeuralNetwork::Approximation, architecture);
841
842 neural_network.set_parameters_random();
843
844 data_set.set(2,2,2);
845
846 data_set.set_data_random();
847
848 // squared_errors = sum_squared_error.calculate_squared_errors();
849
850 // error = sum_squared_error.calculate_error();
851
852 // assert_true(abs(squared_errors.sum() - error) < 1.0e-12, LOG);
853 }
854
855
test_to_XML()856 void SumSquaredErrorTest::test_to_XML()
857 {
858 cout << "test_to_XML\n";
859
860 SumSquaredError sum_squared_error;
861
862 // tinyxml2::XMLDocument* document;
863
864 // Test
865
866 // document = sum_squared_error.to_XML();
867
868 // assert_true(document != nullptr, LOG);
869
870 // delete document;
871 }
872
873
test_from_XML()874 void SumSquaredErrorTest::test_from_XML()
875 {
876 cout << "test_from_XML\n";
877
878 SumSquaredError sum_squared_error1;
879 SumSquaredError sum_squared_error2;
880
881 tinyxml2::XMLDocument* document;
882
883 // Test
884
885 // sum_squared_error1.set_display(false);
886
887 // document = sum_squared_error1.to_XML();
888
889 // sum_squared_error2.from_XML(*document);
890
891 // delete document;
892
893 // assert_true(sum_squared_error2.get_display() == false, LOG);
894 }
895
896
run_test_case()897 void SumSquaredErrorTest::run_test_case()
898 {
899 cout << "Running sum squared error test case...\n";
900
901 // Constructor and destructor methods
902
903 test_constructor();
904 test_destructor();
905
906
907 // Get methods
908
909 // Set methods
910
911 // Error methods
912
913 test_calculate_error();
914
915 test_calculate_output_gradient();
916
917 test_calculate_Jacobian_gradient();
918
919
920 // Error terms methods
921
922 test_calculate_error_terms();
923
924 test_calculate_error_terms_Jacobian();
925
926
927 //Serialization methods
928
929 test_to_XML();
930
931 test_from_XML();
932
933 cout << "End of sum squared error test case.\n\n";
934 }
935
936
937 // OpenNN: Open Neural Networks Library.
938 // Copyright (C) 2005-2020 Artificial Intelligence Techniques, SL.
939 //
940 // This library is free software; you can redistribute it and/or
941 // modify it under the terms of the GNU Lesser General Public
942 // License as published by the Free Software Foundation; either
943 // version 2.1 of the License, or any later version.
944 //
945 // This library is distributed in the hope that it will be useful,
946 // but WITHOUT ANY WARRANTY; without even the implied warranty of
947 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
948 // Lesser General Public License for more details.
949
950 // You should have received a copy of the GNU Lesser General Public
951 // License along with this library; if not, write to the Free Software
952 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
953