1 // OpenNN: Open Neural Networks Library
2 // www.opennn.net
3 //
4 // W E I G H T E D S Q U A R E D E R R O R T E S T C L A S S
5 //
6 // Artificial Intelligence Techniques SL
7 // artelnics@artelnics.com
8
9 #include "weighted_squared_error_test.h"
10
11
WeightedSquaredErrorTest()12 WeightedSquaredErrorTest::WeightedSquaredErrorTest() : UnitTesting()
13 {
14 }
15
16
~WeightedSquaredErrorTest()17 WeightedSquaredErrorTest::~WeightedSquaredErrorTest()
18 {
19 }
20
21
test_constructor()22 void WeightedSquaredErrorTest::test_constructor()
23 {
24 cout << "test_constructor\n";
25
26 // Default
27
28 WeightedSquaredError wse1;
29
30 assert_true(wse1.has_neural_network() == false, LOG);
31 assert_true(wse1.has_data_set() == false, LOG);
32
33 // Neural network and data set
34
35 NeuralNetwork nn3;
36 DataSet ds3;
37 WeightedSquaredError wse3(&nn3, &ds3);
38
39 assert_true(wse3.has_neural_network() == true, LOG);
40 assert_true(wse3.has_data_set() == true, LOG);
41 }
42
43
test_destructor()44 void WeightedSquaredErrorTest::test_destructor()
45 {
46 }
47
48
test_calculate_error()49 void WeightedSquaredErrorTest::test_calculate_error()
50 {
51 cout << "test_calculate_error\n";
52
53 Tensor<Index, 1> architecture(2);
54 architecture.setValues({1, 2});
55 Tensor<type, 1> parameters;
56
57 NeuralNetwork neural_network(NeuralNetwork::Classification, architecture);
58
59 neural_network.set_parameters_constant(1);
60 DataSet data_set(1, 1, 1);
61
62 Index samples_number;
63 samples_number = 1;
64 Index inputs_number;
65 inputs_number = 1;
66 Index outputs_number;
67 outputs_number = 1;
68 Index hidden_neurons;
69 hidden_neurons = 1;
70
71 Tensor<type, 2> new_data(2, 2);
72 new_data(0,0) = 0.0;
73 new_data(0,1) = 0.0;
74 new_data(1,0) = 1.0;
75 new_data(1,1) = 1.0;
76
77 data_set.set_data(new_data);
78 data_set.set_training();
79
80 WeightedSquaredError wse(&neural_network, &data_set);
81
82 wse.set_weights();
83
84 DataSet::Batch batch(1, &data_set);
85
86 Tensor<Index,1> batch_samples_indices = data_set.get_used_samples_indices();
87 Tensor<Index,1> inputs_indices = data_set.get_input_variables_indices();
88 Tensor<Index,1> targets_indices = data_set.get_target_variables_indices();
89
90 batch.fill(batch_samples_indices, inputs_indices, targets_indices);
91 Index batch_samples_number = batch.get_samples_number();
92
93 NeuralNetwork::ForwardPropagation forward_propagation(batch_samples_number, &neural_network);
94
95 LossIndex::BackPropagation back_propagation(batch_samples_number, &wse);
96
97 neural_network.forward_propagate(batch, forward_propagation);
98
99 wse.calculate_error(batch, forward_propagation, back_propagation);
100
101 assert_true(back_propagation.error == 1, LOG);
102
103 // Test
104
105 architecture.setValues({3, 1});
106
107 neural_network.set(NeuralNetwork::Approximation, architecture);
108
109 neural_network.set_parameters_constant(0.0);
110
111 DataSet data_set_2;
112
113 data_set_2.set(3, 3, 1);
114
115 Tensor<type, 2> new_data_2(3, 3);
116 new_data_2(0,0) = 0.0;
117 new_data_2(0,1) = 0.0;
118 new_data_2(0,2) = 0.0;
119 new_data_2(1,0) = 1.0;
120 new_data_2(1,1) = 1.0;
121 new_data_2(1,2) = 1.0;
122 new_data_2(2,0) = 1.0;
123 new_data_2(2,1) = 0.0;
124 new_data_2(2,2) = 0.0;
125 data_set_2.set_data(new_data_2);
126
127 WeightedSquaredError wse_2(&neural_network, &data_set_2);
128 wse.set_weights();
129
130 assert_true(wse_2.get_positives_weight() != wse_2.get_negatives_weight(), LOG);
131 }
132
133
test_calculate_error_gradient()134 void WeightedSquaredErrorTest::test_calculate_error_gradient()
135 {
136 cout << "test_calculate_error_gradient\n";
137
138 NeuralNetwork neural_network;
139
140 DataSet data_set;
141
142 WeightedSquaredError wse(&neural_network, &data_set);
143
144 Tensor<type, 1> error_gradient;
145 Tensor<type, 1> numerical_error_gradient;
146
147 Index samples_number;
148 Index inputs_number;
149 Index outputs_number;
150 Index hidden_neurons;
151
152 ScalingLayer* scaling_layer = new ScalingLayer();
153
154 RecurrentLayer* recurrent_layer = new RecurrentLayer();
155
156 LongShortTermMemoryLayer* long_short_term_memory_layer = new LongShortTermMemoryLayer();
157
158 PerceptronLayer* hidden_perceptron_layer = new PerceptronLayer();
159 PerceptronLayer* output_perceptron_layer = new PerceptronLayer();
160
161 ProbabilisticLayer* probabilistic_layer = new ProbabilisticLayer();
162
163 // Test trivial
164 {
165
166 Tensor<Index, 1> architecture(2);
167 architecture.setValues({1, 1});
168 Tensor<type, 1> parameters;
169
170 NeuralNetwork neural_network(NeuralNetwork::Classification, architecture);
171
172 neural_network.set_parameters_constant(1);
173 DataSet data_set(1, 1, 1);
174
175 Index samples_number;
176 samples_number = 1;
177 Index inputs_number;
178 inputs_number = 1;
179 Index outputs_number;
180 outputs_number = 1;
181 Index hidden_neurons;
182 hidden_neurons = 1;
183
184 Tensor<type, 2> new_data(2, 2);
185 new_data(0,0) = 0.0;
186 new_data(0,1) = 0.0;
187 new_data(1,0) = 1.0;
188 new_data(1,1) = 1.0;
189
190 data_set.set_data(new_data);
191 data_set.set_training();
192
193 WeightedSquaredError wse(&neural_network, &data_set);
194
195 wse.set_weights();
196
197 DataSet::Batch batch(1, &data_set);
198
199 Tensor<Index,1> batch_samples_indices = data_set.get_used_samples_indices();
200 Tensor<Index,1> inputs_indices = data_set.get_input_variables_indices();
201 Tensor<Index,1> targets_indices = data_set.get_target_variables_indices();
202
203 batch.fill(batch_samples_indices, inputs_indices, targets_indices);
204
205
206 Index batch_samples_number = batch.get_samples_number();
207
208 NeuralNetwork::ForwardPropagation forward_propagation(batch_samples_number, &neural_network);
209
210 LossIndex::BackPropagation back_propagation(batch_samples_number, &wse);
211
212 neural_network.forward_propagate(batch, forward_propagation);
213 forward_propagation.print();
214 wse.back_propagate(batch, forward_propagation, back_propagation);
215 // wse.calculate_error(batch, forward_propagation, back_propagation);
216
217 numerical_error_gradient = wse.calculate_error_gradient_numerical_differentiation(&wse);
218
219 assert_true(back_propagation.gradient(0)-1.1499 < 1e-3, LOG); // @todo 1e-2 precission
220 assert_true(back_propagation.gradient(1)-0 < 1e-3, LOG);
221
222
223 }
224
225 // neural_network.set();
226
227 // Test perceptron and probabilistic
228 {
229 // samples_number = 10;
230 // inputs_number = 3;
231 // outputs_number = 1;
232 // hidden_neurons = 2;
233
234 // data_set.set(samples_number, inputs_number, outputs_number);
235
236 // Tensor<type, 2> inputs(samples_number,inputs_number);
237
238 // inputs.setRandom();
239
240
241 //// Tensor<type, 1> outputs(samples_number, outputs_number);
242 //// outputs[0] = 1.0;
243 //// outputs[1] = 0.0;
244
245 // for(Index i = 2; i < samples_number; i++)
246 // {
247 //// if((static_cast<Index>(inputs.calculate_row_sum(i))%2)) < numeric_limits<type>::min())
248 //// {
249 //// outputs[i] = 0.0;
250 //// }
251 //// else
252 //// {
253 //// outputs[i] = 1.0;
254 //// }
255 // }
256
257 //// const Tensor<type, 2> data = inputs.append_column(outputs);
258
259 //// data_set.set_data(data);
260
261 // data_set.set_training();
262
263 // hidden_perceptron_layer->set(inputs_number, hidden_neurons);
264 // output_perceptron_layer->set(hidden_neurons, outputs_number);
265 // probabilistic_layer->set(outputs_number, outputs_number);
266
267 // neural_network.add_layer(hidden_perceptron_layer);
268 // neural_network.add_layer(output_perceptron_layer);
269 // neural_network.add_layer(probabilistic_layer);
270
271 // neural_network.set_parameters_random();
272
273 //// error_gradient = wse.calculate_error_gradient();
274
275 //// numerical_error_gradient = wse.calculate_error_gradient_numerical_differentiation();
276
277 //// assert_true(absolute_value(error_gradient - numerical_error_gradient) < 1.0e-3, LOG);
278 }
279
280 // neural_network.set();
281
282 // Test lstm
283 {
284 // samples_number = 10;
285 // inputs_number = 3;
286 // outputs_number = 1;
287 // hidden_neurons = 2;
288
289 // data_set.set(samples_number, inputs_number, outputs_number);
290
291 // Tensor<type, 2> inputs(samples_number,inputs_number);
292
293 // inputs.setRandom();
294
295 //// Tensor<type, 1> outputs(samples_number, outputs_number);
296 //// outputs[0] = 1.0;
297 //// outputs[1] = 0.0;
298
299 // for(Index i = 2; i < samples_number; i++)
300 // {
301 //// if((static_cast<Index>(inputs.calculate_row_sum(i))%2)) < numeric_limits<type>::min())
302 //// {
303 //// outputs[i] = 0.0;
304 //// }
305 //// else
306 //// {
307 //// outputs[i] = 1.0;
308 //// }
309 // }
310
311 //// const Tensor<type, 2> data = inputs.append_column(outputs);
312
313 //// data_set.set_data(data);
314
315 // data_set.set_training();
316
317 // long_short_term_memory_layer->set(inputs_number, hidden_neurons);
318 // output_perceptron_layer->set(hidden_neurons, outputs_number);
319
320 // neural_network.add_layer(long_short_term_memory_layer);
321 // neural_network.add_layer(output_perceptron_layer);
322
323 // neural_network.set_parameters_random();
324
325 //// error_gradient = wse.calculate_error_gradient();
326
327 //// numerical_error_gradient = wse.calculate_error_gradient_numerical_differentiation();
328
329 //// assert_true(absolute_value(error_gradient - numerical_error_gradient) < 1.0e-3, LOG);
330 }
331
332 // neural_network.set();
333
334 // Test recurrent
335 {
336 // samples_number = 10;
337 // inputs_number = 3;
338 // outputs_number = 1;
339 // hidden_neurons = 2;
340
341 // data_set.set(samples_number, inputs_number, outputs_number);
342
343 // Tensor<type, 2> inputs(samples_number,inputs_number);
344
345 // inputs.setRandom();
346
347 //// Tensor<type, 1> outputs(samples_number, outputs_number);
348 //// outputs[0] = 1.0;
349 //// outputs[1] = 0.0;
350
351 // for(Index i = 2; i < samples_number; i++)
352 // {
353 //// if((static_cast<Index>(inputs.calculate_row_sum(i))%2)) < numeric_limits<type>::min())
354 //// {
355 //// outputs[i] = 0.0;
356 //// }
357 //// else
358 //// {
359 //// outputs[i] = 1.0;
360 //// }
361 // }
362
363 //// const Tensor<type, 2> data = inputs.append_column(outputs);
364
365 //// data_set.set_data(data);
366
367 // data_set.set_training();
368
369 // recurrent_layer->set(inputs_number, hidden_neurons);
370 // output_perceptron_layer->set(hidden_neurons, outputs_number);
371
372 // neural_network.add_layer(recurrent_layer);
373 // neural_network.add_layer(output_perceptron_layer);
374
375 // neural_network.set_parameters_random();
376
377 //// error_gradient = wse.calculate_error_gradient();
378
379 //// numerical_error_gradient = wse.calculate_error_gradient_numerical_differentiation();
380
381 //// assert_true(absolute_value(error_gradient - numerical_error_gradient) < 1.0e-3, LOG);
382 }
383
384 // Test convolutional
385 {
386 // samples_number = 5;
387 // inputs_number = 147;
388 // outputs_number = 1;
389
390 // data_set.set(samples_number, inputs_number, outputs_number);
391
392 // Tensor<type, 2> inputs(samples_number,inputs_number);
393 // inputs.setRandom();
394
395 //// Tensor<type, 1> outputs(samples_number, outputs_number);
396 //// outputs[0] = 1.0;
397 //// outputs[1] = 0.0;
398
399 // for(Index i = 2; i < samples_number; i++)
400 // {
401 //// if((static_cast<Index>(inputs.calculate_row_sum(i))%2)) < numeric_limits<type>::min())
402 //// {
403 //// outputs[i] = 0.0;
404 //// }
405 //// else
406 //// {
407 //// outputs[i] = 1.0;
408 //// }
409 // }
410
411 //// const Tensor<type, 2> data = inputs.append_column(outputs);
412
413 //// data_set.set_data(data);
414 //// data_set.set_input_variables_dimensions(Tensor<Index, 1>({3,7,7}));
415 //// data_set.set_target_variables_dimensions(Tensor<Index, 1>({1}));
416 //// data_set.set_training();
417
418 // const type parameters_minimum = -100.0;
419 // const type parameters_maximum = 100.0;
420
421 //// ConvolutionalLayer* convolutional_layer_1 = new ConvolutionalLayer({3,7,7}, {2,2,2});
422 //// Tensor<type, 2> filters_1({2,3,2,2}, 0);
423 //// filters_1.setRandom(parameters_minimum,parameters_maximum);
424 //// convolutional_layer_1->set_synaptic_weights(filters_1);
425 //// Tensor<type, 1> biases_1(2, 0);
426 //// biases_1.setRandom(parameters_minimum, parameters_maximum);
427 //// convolutional_layer_1->set_biases(biases_1);
428
429 //// ConvolutionalLayer* convolutional_layer_2 = new ConvolutionalLayer(convolutional_layer_1->get_outputs_dimensions(), {2,2,2});
430 //// convolutional_layer_2->set_padding_option(OpenNN::ConvolutionalLayer::Same);
431 //// Tensor<type, 2> filters_2({2,2,2,2}, 0);
432 //// filters_2.setRandom(parameters_minimum, parameters_maximum);
433 //// convolutional_layer_2->set_synaptic_weights(filters_2);
434 //// Tensor<type, 1> biases_2(2, 0);
435 //// biases_2.setRandom(parameters_minimum, parameters_maximum);
436 //// convolutional_layer_2->set_biases(biases_2);
437
438 //// PoolingLayer* pooling_layer_1 = new PoolingLayer(convolutional_layer_2->get_outputs_dimensions(), {2,2});
439
440 //// ConvolutionalLayer* convolutional_layer_3 = new ConvolutionalLayer(pooling_layer_1->get_outputs_dimensions(), {1,2,2});
441 //// convolutional_layer_3->set_padding_option(OpenNN::ConvolutionalLayer::Same);
442 //// Tensor<type, 2> filters_3({1,2,2,2}, 0);
443 //// filters_3.setRandom(parameters_minimum, parameters_maximum);
444 //// convolutional_layer_3->set_synaptic_weights(filters_3);
445 //// Tensor<type, 1> biases_3(1, 0);
446 //// biases_3.setRandom(parameters_minimum, parameters_maximum);
447 //// convolutional_layer_3->set_biases(biases_3);
448
449 //// PoolingLayer* pooling_layer_2 = new PoolingLayer(convolutional_layer_3->get_outputs_dimensions(), {2,2});
450 //// pooling_layer_2->set_pooling_method(PoolingLayer::MaxPooling);
451
452 //// PoolingLayer* pooling_layer_3 = new PoolingLayer(pooling_layer_2->get_outputs_dimensions(), {2,2});
453 //// pooling_layer_3->set_pooling_method(PoolingLayer::MaxPooling);
454
455 //// PerceptronLayer* perceptron_layer = new PerceptronLayer(pooling_layer_3->get_outputs_dimensions().calculate_product(), 3, OpenNN::PerceptronLayer::ActivationFunction::Linear);
456 //// perceptron_layer->set_parameters_random(parameters_minimum, parameters_maximum);
457
458 //// ProbabilisticLayer* probabilistic_layer = new ProbabilisticLayer(perceptron_layer->get_neurons_number(), outputs_number);
459 //// probabilistic_layer->set_parameters_random(parameters_minimum, parameters_maximum);
460
461 //// neural_network.set();
462 //// neural_network.add_layer(convolutional_layer_1);
463 //// neural_network.add_layer(convolutional_layer_2);
464 //// neural_network.add_layer(pooling_layer_1);
465 //// neural_network.add_layer(convolutional_layer_3);
466 //// neural_network.add_layer(pooling_layer_2);
467 //// neural_network.add_layer(pooling_layer_3);
468 //// neural_network.add_layer(perceptron_layer);
469 //// neural_network.add_layer(probabilistic_layer);
470
471 //// numerical_error_gradient = wse.calculate_error_gradient_numerical_differentiation();
472
473 //// error_gradient = wse.calculate_error_gradient();
474
475 //// assert_true(absolute_value(numerical_error_gradient - error_gradient) < 1e-3, LOG);
476 }
477 }
478
479
test_calculate_error_terms()480 void WeightedSquaredErrorTest::test_calculate_error_terms()
481 {
482 cout << "test_calculate_error_terms\n";
483
484 // NeuralNetwork neural_network;
485 // Tensor<Index, 1> hidden_layers_size;
486 // Tensor<type, 1> parameters;
487
488 // DataSet data_set;
489
490 // WeightedSquaredError wse(&neural_network, &data_set);
491
492 // type error;
493
494 // Tensor<type, 1> error_terms;
495
496 // Test
497
498 // Tensor<Index, 1> architecture;
499 // architecture.setValues({2, 1});
500
501 // neural_network.set(NeuralNetwork::Approximation, architecture);
502 // neural_network.set_parameters_random();
503
504 // data_set.set(3, 2, 2);
505 // data_set.generate_data_binary_classification(3, 2);
506
507 // error = wse.calculate_error();
508
509 // error_terms = wse.calculate_training_error_terms();
510
511 // assert_true(abs((error_terms*error_terms).sum() - error) < 1.0e-3, LOG);
512
513 // Test
514
515 // architecture.setValues({3, 1});
516
517 // neural_network.set(NeuralNetwork::Approximation, architecture);
518 // neural_network.set_parameters_random();
519
520 // data_set.set(9, 3, 1);
521 // data_set.generate_data_binary_classification(9, 3);
522
523 // error = wse.calculate_error();
524
525 // error_terms = wse.calculate_training_error_terms();
526
527 // assert_true(abs((error_terms*error_terms).sum() - error) < 1.0e-3, LOG);
528 }
529
530
test_calculate_error_terms_Jacobian()531 void WeightedSquaredErrorTest::test_calculate_error_terms_Jacobian()
532 {
533 cout << "test_calculate_error_terms_Jacobian\n";
534
535 // NumericalDifferentiation nd;
536
537 // NeuralNetwork neural_network;
538 // Tensor<Index, 1> architecture;
539 // Tensor<type, 1> parameters;
540
541 // DataSet data_set;
542
543 // WeightedSquaredError wse(&neural_network, &data_set);
544
545 // Tensor<type, 1> error_gradient;
546
547 // Tensor<type, 1> error_terms;
548 // Tensor<type, 2> terms_Jacobian;
549 // Tensor<type, 2> numerical_Jacobian_terms;
550
551 // Test
552
553 // architecture.setValues({1, 1});
554
555 // neural_network.set(NeuralNetwork::Approximation, architecture);
556
557 // neural_network.set_parameters_constant(0.0);
558
559 // data_set.set(1, 1, 1);
560
561 // data_set.generate_data_binary_classification(3, 1);
562
563 // terms_Jacobian = wse.calculate_error_terms_Jacobian();
564
565 // assert_true(terms_Jacobian.dimension(0) == data_set.get_training_samples_number(), LOG);
566 // assert_true(terms_Jacobian.dimension(1) == neural_network.get_parameters_number(), LOG);
567 // assert_true(terms_Jacobian == 0.0, LOG);
568
569 // Test
570
571 // architecture.setValues({3, 4, 2});
572
573 // neural_network.set(NeuralNetwork::Approximation, architecture);
574 // neural_network.set_parameters_constant(0.0);
575
576 // data_set.set(3, 2, 5);
577 // wse.set(&neural_network, &data_set);
578 // data_set.generate_data_binary_classification(3, 3);
579
580 // terms_Jacobian = wse.calculate_error_terms_Jacobian();
581
582 // assert_true(terms_Jacobian.dimension(0) == data_set.get_training_samples_number(), LOG);
583 // assert_true(terms_Jacobian.dimension(1) == neural_network.get_parameters_number(), LOG);
584 // assert_true(terms_Jacobian == 0.0, LOG);
585
586 // Test
587
588 // architecture.resize(3);
589 // architecture[0] = 2;
590 // architecture[1] = 1;
591 // architecture[2] = 2;
592
593 // neural_network.set(NeuralNetwork::Approximation, architecture);
594 // neural_network.set_parameters_constant(0.0);
595
596 // data_set.set(2, 2, 5);
597 // wse.set(&neural_network, &data_set);
598 // data_set.generate_data_binary_classification(3, 2);
599
600 // terms_Jacobian = wse.calculate_error_terms_Jacobian();
601
602 // assert_true(terms_Jacobian.dimension(0) == data_set.get_training_samples_number(), LOG);
603 // assert_true(terms_Jacobian.dimension(1) == neural_network.get_parameters_number(), LOG);
604 // assert_true(terms_Jacobian == 0.0, LOG);
605
606 // Test
607
608 // architecture.setValues({1, 1, 1});
609
610 // neural_network.set(NeuralNetwork::Approximation, architecture);
611 // neural_network.set_parameters_random();
612 // parameters = neural_network.get_parameters();
613
614 // data_set.set(3, 1, 1);
615 // data_set.generate_data_binary_classification(3, 1);
616
617 // terms_Jacobian = wse.calculate_error_terms_Jacobian();
618 // numerical_Jacobian_terms = nd.calculate_Jacobian(wse, &WeightedSquaredError::calculate_training_error_terms, parameters);
619
620 //assert_true(absolute_value(terms_Jacobian-numerical_Jacobian_terms) < 1.0e-3, LOG);
621
622 // Test
623
624 // architecture.setValues({2, 2, 1});
625
626 // neural_network.set(NeuralNetwork::Approximation, architecture);
627 // neural_network.set_parameters_random();
628 // parameters = neural_network.get_parameters();
629
630 // data_set.set(2, 2, 1);
631 // data_set.generate_data_binary_classification(2, 2);
632
633 // terms_Jacobian = wse.calculate_error_terms_Jacobian();
634 // numerical_Jacobian_terms = nd.calculate_Jacobian(wse, &WeightedSquaredError::calculate_training_error_terms, parameters);
635
636 // assert_true(absolute_value(terms_Jacobian-numerical_Jacobian_terms) < 1.0e-3, LOG);
637
638 // Test
639
640 // nn.set(2, 2, 2);
641 // nn.set_parameters_random();
642
643 // data_set.set(2, 2, 2);
644 // data_set.generate_data_binary_classification(4, 2);
645
646 // error_gradient = wse.calculate_gradient();
647
648 // error_terms = wse.calculate_training_error_terms();
649 // terms_Jacobian = wse.calculate_error_terms_Jacobian();
650
651 // cout << (terms_Jacobian.calculate_transpose()).dot(error_terms)*2.0 << endl;
652 // cout << error_gradient << endl;
653
654 // assert_true(absolute_value((terms_Jacobian.calculate_transpose()).dot(error_terms)*2.0 - error_gradient) < 1.0e-3, LOG);
655 }
656
657
test_to_XML()658 void WeightedSquaredErrorTest::test_to_XML()
659 {
660 cout << "test_to_XML\n";
661 }
662
663
test_from_XML()664 void WeightedSquaredErrorTest::test_from_XML()
665 {
666 cout << "test_from_XML\n";
667 }
668
669
run_test_case()670 void WeightedSquaredErrorTest::run_test_case()
671 {
672 cout << "Running weighted squared error test case...\n";
673
674 // Constructor and destructor methods
675
676 // test_constructor();
677 // test_destructor();
678
679
680 // Get methods
681
682 // Set methods
683
684 // Error methods
685
686 test_calculate_error();
687
688 test_calculate_error_gradient();
689
690
691 // Error terms methods
692
693 // test_calculate_error_terms();
694 // test_calculate_error_terms_Jacobian();
695
696
697 // // Loss hessian methods
698
699 // // Serialization methods
700
701 // test_to_XML();
702 // test_from_XML();
703
704 cout << "End of weighted squared error test case.\n\n";
705 }
706
707
708 // OpenNN: Open Neural Networks Library.
709 // Copyright (C) 2005-2020 Artificial Intelligence Techniques, SL.
710 //
711 // This library is free software; you can redistribute it and/or
712 // modify it under the terms of the GNU Lewser General Public
713 // License as published by the Free Software Foundation; either
714 // version 2.1 of the License, or any later version.
715 //
716 // This library is distributed in the hope that it will be useful,
717 // but WITHOUT ANY WARRANTY; without even the implied warranty of
718 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
719 // Lewser General Public License for more details.
720
721 // You should have received a copy of the GNU Lewser General Public
722 // License along with this library; if not, write to the Free Software
723 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
724