1 //   OpenNN: Open Neural Networks Library
2 //   www.opennn.net
3 //
4 //   L E A R N I N G   R A T E   A L G O R I T H M   T E S T   C L A S S
5 //
6 //   Artificial Intelligence Techniques SL
7 //   artelnics@artelnics.com
8 
9 #include "learning_rate_algorithm_test.h"
10 
11 
LearningRateAlgorithmTest()12 LearningRateAlgorithmTest::LearningRateAlgorithmTest() : UnitTesting()
13 {
14 }
15 
16 
~LearningRateAlgorithmTest()17 LearningRateAlgorithmTest::~LearningRateAlgorithmTest()
18 {
19 }
20 
21 
test_constructor()22 void LearningRateAlgorithmTest::test_constructor()
23 {
24    cout << "test_constructor\n";
25 
26    SumSquaredError sum_squared_error;
27 
28    LearningRateAlgorithm tra1(&sum_squared_error);
29 
30    assert_true(tra1.has_loss_index() == true, LOG);
31 
32    LearningRateAlgorithm tra2;
33 
34    assert_true(tra2.has_loss_index() == false, LOG);
35 }
36 
37 
test_destructor()38 void LearningRateAlgorithmTest::test_destructor()
39 {
40    cout << "test_destructor\n";
41 }
42 
43 
test_get_loss_index_pointer()44 void LearningRateAlgorithmTest::test_get_loss_index_pointer()
45 {
46    cout << "test_get_loss_index_pointer\n";
47 
48    SumSquaredError sum_squared_error;
49 
50    LearningRateAlgorithm tra(&sum_squared_error);
51 
52    LossIndex* pfp = tra.get_loss_index_pointer();
53 
54    assert_true(pfp != nullptr, LOG);
55 }
56 
57 
test_get_learning_rate_method()58 void LearningRateAlgorithmTest::test_get_learning_rate_method()
59 {
60    cout << "test_get_learning_rate_method\n";
61 
62    LearningRateAlgorithm tra;
63 
64    tra.set_learning_rate_method(LearningRateAlgorithm::GoldenSection);
65    assert_true(tra.get_learning_rate_method() == LearningRateAlgorithm::GoldenSection, LOG);
66 
67    tra.set_learning_rate_method(LearningRateAlgorithm::BrentMethod);
68    assert_true(tra.get_learning_rate_method() == LearningRateAlgorithm::BrentMethod, LOG);
69 }
70 
71 
test_get_learning_rate_method_name()72 void LearningRateAlgorithmTest::test_get_learning_rate_method_name()
73 {
74    cout << "test_get_learning_rate_method_name\n";
75 }
76 
77 
test_get_display()78 void LearningRateAlgorithmTest::test_get_display()
79 {
80    cout << "test_get_display\n";
81 
82    LearningRateAlgorithm tra;
83 
84    tra.set_display(false);
85 
86    assert_true(tra.get_display() == false, LOG);
87 }
88 
89 
test_get_loss_tolerance()90 void LearningRateAlgorithmTest::test_get_loss_tolerance()
91 {
92    cout << "test_get_loss_tolerance\n";
93 }
94 
95 
test_set()96 void LearningRateAlgorithmTest::test_set()
97 {
98    cout << "test_set\n";
99 }
100 
101 
test_set_default()102 void LearningRateAlgorithmTest::test_set_default()
103 {
104    cout << "test_set_default\n";
105 }
106 
107 
test_set_loss_index_pointer()108 void LearningRateAlgorithmTest::test_set_loss_index_pointer()
109 {
110    cout << "test_set_loss_index_pointer\n";
111 }
112 
113 
test_set_display()114 void LearningRateAlgorithmTest::test_set_display()
115 {
116    cout << "test_set_display\n";
117 }
118 
119 
test_set_learning_rate_method()120 void LearningRateAlgorithmTest::test_set_learning_rate_method()
121 {
122    cout << "test_set_learning_rate_method\n";
123 }
124 
125 
test_set_loss_tolerance()126 void LearningRateAlgorithmTest::test_set_loss_tolerance()
127 {
128    cout << "test_set_loss_tolerance\n";
129 }
130 
131 
test_calculate_directional_point()132 void LearningRateAlgorithmTest::test_calculate_directional_point()
133 {
134    cout << "test_calculate_directional_point\n";
135 }
136 
137 
test_calculate_fixed_directional_point()138 void LearningRateAlgorithmTest::test_calculate_fixed_directional_point()
139 {
140    cout << "test_calculate_fixed_directional_point\n";
141 
142 //   Tensor<Index, 1> indices;
143 
144 //   NeuralNetwork neural_network;
145 
146 //   Tensor<Index, 1> architecture;
147 //   Tensor<type, 1> parameters;
148 
149 //   SumSquaredError sum_squared_error(&neural_network);
150 
151 //   type loss;
152 //   Tensor<type, 1> gradient;
153 
154 //   LearningRateAlgorithm tra(&sum_squared_error);
155 
156 //   Tensor<type, 1> training_direction;
157 //   type learning_rate;
158 
159 //   pair<type,type> directional_point;
160 
161 //   // Test
162 
163 //   architecture.setValues({1,1});
164 
165 //   neural_network.set(NeuralNetwork::Approximation, architecture);
166 
167 //   neural_network.set_parameters_constant(1.0);
168 
169 ////   loss = sum_squared_error.calculate_training_loss();
170 
171 ////   gradient = sum_squared_error.calculate_training_loss_gradient();
172 
173 //   training_direction = -gradient;
174 //   learning_rate = 0.001;
175 
176 ////   directional_point = tra.calculate_fixed_directional_point(loss, training_direction, learning_rate);
177 
178 //   assert_true(directional_point.second < loss, LOG);
179 
180 ////   assert_true(abs(directional_point.second - sum_squared_error.calculate_training_loss(training_direction, learning_rate)) <= numeric_limits<type>::min(), LOG);
181 
182 //   parameters = neural_network.get_parameters();
183 
184 ////   neural_network.set_parameters(parameters + training_direction*learning_rate);
185 
186 ////   assert_true(abs(directional_point.second - sum_squared_error.calculate_training_loss()) <= numeric_limits<type>::min(), LOG);
187 
188 //   // Test
189 
190 //   architecture.setValues({1,1});
191 
192 //   neural_network.set(NeuralNetwork::Approximation, architecture);
193 
194 //   neural_network.set_parameters_constant(1.0);
195 
196 ////   training_direction.set(2, -1.0);
197 ////   learning_rate = 1.0;
198 
199 ////   directional_point = tra.calculate_fixed_directional_point(3.14, training_direction, learning_rate);
200 
201 ////   assert_true(directional_point.first == 1.0, LOG);
202 ////   assert_true(directional_point.second == 0.0, LOG);
203 }
204 
205 
test_calculate_bracketing_triplet()206 void LearningRateAlgorithmTest::test_calculate_bracketing_triplet() // @todo
207 {
208     cout << "test_calculate_bracketing_triplet\n";
209 
210 //    DataSet data_set(2, 1, 1);
211 
212 //    data_set.set_data_random();
213 
214 //    Tensor<Index, 1> samples_indices(0, 1, data_set.get_samples_number()-1);
215 
216 //    Tensor<Index, 1> architecture;
217 
218 //    architecture.setValues({1,1});
219 
220 //    NeuralNetwork neural_network(NeuralNetwork::Approximation, architecture);
221 
222 //    SumSquaredError sum_squared_error(&neural_network, &data_set);
223 
224 //    LearningRateAlgorithm tra(&sum_squared_error);
225 
226 //    type loss = 0.0;
227 //    Tensor<type, 1> training_direction;
228 //    type initial_learning_rate = 0.0;
229 
230 //    LearningRateAlgorithm::Triplet triplet;
231 
232 //    // Test
233 
234 //    sum_squared_error.set_regularization_method(LossIndex::L2);
235 
236 //    neural_network.set_parameters_random();
237 
238 //    loss = sum_squared_error.calculate_training_loss();
239 //    training_direction = sum_squared_error.calculate_training_loss_gradient()*(-1.0);
240 //    initial_learning_rate = 0.01;
241 
242 //    triplet = tra.calculate_bracketing_triplet(loss, training_direction, initial_learning_rate);
243 
244 //    assert_true(triplet.A.first <= triplet.U.first, LOG);
245 //    assert_true(triplet.U.first <= triplet.B.first, LOG);
246 //    assert_true(triplet.A.second >= triplet.U.second, LOG);
247 //    assert_true(triplet.U.second <= triplet.B.second, LOG);
248 
249 //    // Test
250 
251 //    neural_network.set_parameters_constant(0.0);
252 
253 ////    loss = sum_squared_error.calculate_training_loss();
254 ////    training_direction = sum_squared_error.calculate_training_loss_gradient()*(-1.0);
255 ////    initial_learning_rate = 0.01;
256 
257 ////    triplet = tra.calculate_bracketing_triplet(loss, training_direction, initial_learning_rate);
258 
259 //    /// @todo test fails
260 
261 //    assert_true(triplet.has_length_zero(), LOG);
262 
263 //    // Test
264 
265 //    neural_network.set_parameters_constant(1.0);
266 
267 ////    loss = sum_squared_error.calculate_training_loss();
268 ////    training_direction = sum_squared_error.calculate_training_loss_gradient()*(-1.0);
269 ////    initial_learning_rate = 0.0;
270 
271 ////    triplet = tra.calculate_bracketing_triplet(loss, training_direction, initial_learning_rate);
272 
273 //    assert_true(triplet.has_length_zero(), LOG);
274 
275 //    // Test
276 
277 //    data_set.set(1, 1, 1);
278 //    data_set.set_data_random();
279 
280 ////    samples_indices.set(0, 1, data_set.get_samples_number()-1);
281 
282 ////    Tensor<Index, 1> architecture;
283 
284 //    architecture.setValues({1,1});
285 
286 //    neural_network.set(NeuralNetwork::Approximation, architecture);
287 //    neural_network.set_parameters_random();
288 
289 ////    loss = sum_squared_error.calculate_training_loss();
290 ////    training_direction = sum_squared_error.calculate_training_loss_gradient()*(-1.0);
291 //    initial_learning_rate = 0.001;
292 
293 ////    triplet = tra.calculate_bracketing_triplet(loss, training_direction, initial_learning_rate);
294 
295 //    assert_true(triplet.A.first <= triplet.U.first, LOG);
296 //    assert_true(triplet.U.first <= triplet.B.first, LOG);
297 //    assert_true(triplet.A.second >= triplet.U.second, LOG);
298 //    assert_true(triplet.U.second <= triplet.B.second, LOG);
299 
300 //    // Test
301 
302 //    data_set.set(3, 1, 1);
303 //    data_set.set_data_random();
304 
305 ////    samples_indices.set(0, 1, data_set.get_samples_number()-1);
306 
307 //    architecture.setValues({1,1});
308 
309 //    neural_network.set(NeuralNetwork::Approximation, architecture);
310 //    neural_network.set_parameters_random();
311 
312 ////    loss = sum_squared_error.calculate_training_loss();
313 ////    training_direction = sum_squared_error.calculate_training_loss_gradient()*(-1.0);
314 ////    initial_learning_rate = 0.001;
315 
316 ////    triplet = tra.calculate_bracketing_triplet(loss, training_direction, initial_learning_rate);
317 
318 //    assert_true(triplet.A.first <= triplet.U.first, LOG);
319 //    assert_true(triplet.U.first <= triplet.B.first, LOG);
320 //    assert_true(triplet.A.second >= triplet.U.second, LOG);
321 //    assert_true(triplet.U.second <= triplet.B.second, LOG);
322 
323 }
324 
325 
test_calculate_golden_section_directional_point()326 void LearningRateAlgorithmTest::test_calculate_golden_section_directional_point()
327 {
328    cout << "test_calculate_golden_section_directional_point\n";
329 
330 //   DataSet data_set(1, 1, 1);
331 //   Tensor<Index, 1> indices(1,1,data_set.get_samples_number()-1);
332 
333 //   Tensor<Index, 1> architecture;
334 
335 //   architecture.setValues({1,1});
336 
337 //   NeuralNetwork neural_network(NeuralNetwork::Approximation, architecture);
338 
339 //   SumSquaredError sum_squared_error(&neural_network);
340 
341 //   LearningRateAlgorithm tra(&sum_squared_error);
342 
343 //   neural_network.set_parameters_constant(1.0);
344 
345 //   type loss = sum_squared_error.calculate_training_loss();
346 //   Tensor<type, 1> gradient = sum_squared_error.calculate_training_loss_gradient();
347 
348 //   Tensor<type, 1> training_direction = gradient*(-1.0);
349 //   type initial_learning_rate = 0.001;
350 
351 //   type loss_tolerance = 1.0e-6;
352 //   tra.set_loss_tolerance(loss_tolerance);
353 
354 //   pair<type,type> directional_point
355 //   = tra.calculate_golden_section_directional_point(loss, training_direction, initial_learning_rate);
356 
357 //   assert_true(directional_point.first >= 0.0, LOG);
358 //   assert_true(directional_point.second < loss, LOG);
359 }
360 
361 
test_calculate_Brent_method_directional_point()362 void LearningRateAlgorithmTest::test_calculate_Brent_method_directional_point()
363 {
364    cout << "test_calculate_Brent_method_directional_point\n";
365 
366    DataSet data_set(1, 1, 1);
367    Tensor<Index, 1> indices(3);
368    indices.setValues({1,1,data_set.get_samples_number()-1});
369 
370    Tensor<Index, 1> architecture(2);
371 
372    architecture.setValues({1,1});
373 
374    NeuralNetwork neural_network(NeuralNetwork::Approximation, architecture);
375 
376    neural_network.set_parameters_constant(1.0);
377 //   type loss = sum_squared_error.calculate_training_loss();
378 //   Tensor<type, 1> gradient = sum_squared_error.calculate_training_loss_gradient();
379 
380 //   Tensor<type, 1> training_direction = gradient*(-1.0);
381 //   type initial_learning_rate = 0.001;
382 
383 //   type loss_tolerance = 1.0e-6;
384 //   tra.set_loss_tolerance(loss_tolerance);
385 
386 //   pair<type,type> directional_point
387 //   = tra.calculate_Brent_method_directional_point(loss, training_direction, initial_learning_rate);
388 
389 //   assert_true(directional_point.first >= 0.0, LOG);
390 //   assert_true(directional_point.second < loss, LOG);
391 }
392 
393 
test_to_XML()394 void LearningRateAlgorithmTest::test_to_XML()
395 {
396    cout << "test_to_XML\n";
397 
398    LearningRateAlgorithm  tra;
399 
400 //   tinyxml2::XMLDocument* document = tra.to_XML();
401 
402 //   assert_true(document != nullptr, LOG);
403 
404 //   delete document;
405 }
406 
407 
run_test_case()408 void LearningRateAlgorithmTest::run_test_case()
409 {
410    cout << "Running training rate algorithm test case...\n";
411 
412    // Constructor and destructor methods
413 
414    test_constructor();
415    test_destructor();
416 
417    // Get methods
418 
419    test_get_loss_index_pointer();
420 
421    // Training operators
422 
423    test_get_learning_rate_method();
424    test_get_learning_rate_method_name();
425 
426    // Training parameters
427 
428    test_get_loss_tolerance();
429 
430    // Utilities
431 
432    test_get_display();
433 
434    // Set methods
435 
436    test_set();
437    test_set_default();
438    test_set_loss_index_pointer();
439 
440    // Training operators
441 
442    test_set_learning_rate_method();
443 
444    // Training parameters
445 
446    test_set_loss_tolerance();
447 
448    // Utilities
449 
450    test_set_display();
451 
452    // Training methods
453 
454    test_calculate_bracketing_triplet();
455    test_calculate_fixed_directional_point();
456    test_calculate_golden_section_directional_point();
457    test_calculate_Brent_method_directional_point();
458    test_calculate_directional_point();
459 
460    // Serialization methods
461 
462    test_to_XML();
463 
464    cout << "End of training rate algorithm test case.\n\n";
465 }
466 
467 // OpenNN: Open Neural Networks Library.
468 // Copyright (C) 2005-2020 Artificial Intelligence Techniques, SL.
469 //
470 // This library is free software; you can redistribute it and/or
471 // modify it under the terms of the GNU Lesser General Public
472 // License as published by the Free Software Foundation; either
473 // version 2.1 of the License, or any later version.
474 //
475 // This library is distributed in the hope that it will be useful,
476 // but WITHOUT ANY WARRANTY; without even the implied warranty of
477 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
478 // Lesser General Public License for more details.
479 
480 // You should have received a copy of the GNU Lesser General Public
481 // License along with this library; if not, write to the Free Software
482 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
483