1 // Copyright (C) 2008  Davis E. King (davis@dlib.net)
2 // License: Boost Software License   See LICENSE.txt for the full license.
3 #ifndef DLIB_OPTIMIZATIOn_H_
4 #define DLIB_OPTIMIZATIOn_H_
5 
6 #include <cmath>
7 #include <limits>
8 #include "optimization_abstract.h"
9 #include "optimization_search_strategies.h"
10 #include "optimization_stop_strategies.h"
11 #include "optimization_line_search.h"
12 
13 namespace dlib
14 {
15 
16 // ----------------------------------------------------------------------------------------
17 // ----------------------------------------------------------------------------------------
18 //                    Functions that transform other functions
19 // ----------------------------------------------------------------------------------------
20 // ----------------------------------------------------------------------------------------
21 
22     template <typename funct>
23     class central_differences
24     {
25     public:
f(f_)26         central_differences(const funct& f_, double eps_ = 1e-7) : f(f_), eps(eps_){}
27 
28         template <typename T>
operator()29         typename T::matrix_type operator()(const T& x) const
30         {
31             // T must be some sort of dlib matrix
32             COMPILE_TIME_ASSERT(is_matrix<T>::value);
33 
34             typename T::matrix_type der(x.size());
35             typename T::matrix_type e(x);
36             for (long i = 0; i < x.size(); ++i)
37             {
38                 const double old_val = e(i);
39 
40                 e(i) += eps;
41                 const double delta_plus = f(e);
42                 e(i) = old_val - eps;
43                 const double delta_minus = f(e);
44 
45                 der(i) = (delta_plus - delta_minus)/((old_val+eps)-(old_val-eps));
46 
47                 // and finally restore the old value of this element
48                 e(i) = old_val;
49             }
50 
51             return der;
52         }
53 
54         template <typename T, typename U>
operator()55         typename U::matrix_type operator()(const T& item, const U& x) const
56         {
57             // U must be some sort of dlib matrix
58             COMPILE_TIME_ASSERT(is_matrix<U>::value);
59 
60             typename U::matrix_type der(x.size());
61             typename U::matrix_type e(x);
62             for (long i = 0; i < x.size(); ++i)
63             {
64                 const double old_val = e(i);
65 
66                 e(i) += eps;
67                 const double delta_plus = f(item,e);
68                 e(i) = old_val - eps;
69                 const double delta_minus = f(item,e);
70 
71                 der(i) = (delta_plus - delta_minus)/((old_val+eps)-(old_val-eps));
72 
73                 // and finally restore the old value of this element
74                 e(i) = old_val;
75             }
76 
77             return der;
78         }
79 
80 
operator()81         double operator()(const double& x) const
82         {
83             return (f(x+eps)-f(x-eps))/((x+eps)-(x-eps));
84         }
85 
86     private:
87         const funct& f;
88         const double eps;
89     };
90 
91     template <typename funct>
derivative(const funct & f)92     const central_differences<funct> derivative(const funct& f) { return central_differences<funct>(f); }
93     template <typename funct>
derivative(const funct & f,double eps)94     const central_differences<funct> derivative(const funct& f, double eps)
95     {
96         DLIB_ASSERT (
97             eps > 0,
98             "\tcentral_differences derivative(f,eps)"
99             << "\n\tYou must give an epsilon > 0"
100             << "\n\teps:     " << eps
101         );
102         return central_differences<funct>(f,eps);
103     }
104 
105 // ----------------------------------------------------------------------------------------
106 
107     template <typename funct, typename EXP1, typename EXP2>
108     struct clamped_function_object
109     {
clamped_function_objectclamped_function_object110         clamped_function_object(
111             const funct& f_,
112             const matrix_exp<EXP1>& x_lower_,
113             const matrix_exp<EXP2>& x_upper_
114         ) : f(f_), x_lower(x_lower_), x_upper(x_upper_)
115         {
116         }
117 
118         template <typename T>
operatorclamped_function_object119         double operator() (
120             const T& x
121         ) const
122         {
123             return f(clamp(x,x_lower,x_upper));
124         }
125 
126         const funct& f;
127         const matrix_exp<EXP1>& x_lower;
128         const matrix_exp<EXP2>& x_upper;
129     };
130 
131     template <typename funct, typename EXP1, typename EXP2>
clamp_function(const funct & f,const matrix_exp<EXP1> & x_lower,const matrix_exp<EXP2> & x_upper)132     clamped_function_object<funct,EXP1,EXP2> clamp_function(
133         const funct& f,
134         const matrix_exp<EXP1>& x_lower,
135         const matrix_exp<EXP2>& x_upper
136     ) { return clamped_function_object<funct,EXP1,EXP2>(f,x_lower,x_upper); }
137 
138 // ----------------------------------------------------------------------------------------
139 
140 // ----------------------------------------------------------------------------------------
141 // ----------------------------------------------------------------------------------------
142 //                    Functions that perform unconstrained optimization
143 // ----------------------------------------------------------------------------------------
144 // ----------------------------------------------------------------------------------------
145 
146     template <
147         typename search_strategy_type,
148         typename stop_strategy_type,
149         typename funct,
150         typename funct_der,
151         typename T
152         >
find_min(search_strategy_type search_strategy,stop_strategy_type stop_strategy,const funct & f,const funct_der & der,T & x,double min_f)153     double find_min (
154         search_strategy_type search_strategy,
155         stop_strategy_type stop_strategy,
156         const funct& f,
157         const funct_der& der,
158         T& x,
159         double min_f
160     )
161     {
162         COMPILE_TIME_ASSERT(is_matrix<T>::value);
163         // The starting point (i.e. x) must be a column vector.
164         COMPILE_TIME_ASSERT(T::NC <= 1);
165 
166         DLIB_CASSERT (
167             is_col_vector(x),
168             "\tdouble find_min()"
169             << "\n\tYou have to supply column vectors to this function"
170             << "\n\tx.nc():    " << x.nc()
171         );
172 
173 
174         T g, s;
175 
176         double f_value = f(x);
177         g = der(x);
178 
179         if (!is_finite(f_value))
180             throw error("The objective function generated non-finite outputs");
181         if (!is_finite(g))
182             throw error("The objective function generated non-finite outputs");
183 
184         while(stop_strategy.should_continue_search(x, f_value, g) && f_value > min_f)
185         {
186             s = search_strategy.get_next_direction(x, f_value, g);
187 
188             double alpha = line_search(
189                         make_line_search_function(f,x,s, f_value),
190                         f_value,
191                         make_line_search_function(der,x,s, g),
192                         dot(g,s), // compute initial gradient for the line search
193                         search_strategy.get_wolfe_rho(), search_strategy.get_wolfe_sigma(), min_f,
194                         search_strategy.get_max_line_search_iterations());
195 
196             // Take the search step indicated by the above line search
197             x += alpha*s;
198 
199             if (!is_finite(f_value))
200                 throw error("The objective function generated non-finite outputs");
201             if (!is_finite(g))
202                 throw error("The objective function generated non-finite outputs");
203         }
204 
205         return f_value;
206     }
207 
208 // ----------------------------------------------------------------------------------------
209 
210     template <
211         typename search_strategy_type,
212         typename stop_strategy_type,
213         typename funct,
214         typename funct_der,
215         typename T
216         >
find_max(search_strategy_type search_strategy,stop_strategy_type stop_strategy,const funct & f,const funct_der & der,T & x,double max_f)217     double find_max (
218         search_strategy_type search_strategy,
219         stop_strategy_type stop_strategy,
220         const funct& f,
221         const funct_der& der,
222         T& x,
223         double max_f
224     )
225     {
226         COMPILE_TIME_ASSERT(is_matrix<T>::value);
227         // The starting point (i.e. x) must be a column vector.
228         COMPILE_TIME_ASSERT(T::NC <= 1);
229 
230         DLIB_CASSERT (
231             is_col_vector(x),
232             "\tdouble find_max()"
233             << "\n\tYou have to supply column vectors to this function"
234             << "\n\tx.nc():    " << x.nc()
235         );
236 
237         T g, s;
238 
239         // This function is basically just a copy of find_min() but with - put in the right places
240         // to flip things around so that it ends up looking for the max rather than the min.
241 
242         double f_value = -f(x);
243         g = -der(x);
244 
245         if (!is_finite(f_value))
246             throw error("The objective function generated non-finite outputs");
247         if (!is_finite(g))
248             throw error("The objective function generated non-finite outputs");
249 
250         while(stop_strategy.should_continue_search(x, f_value, g) && f_value > -max_f)
251         {
252             s = search_strategy.get_next_direction(x, f_value, g);
253 
254             double alpha = line_search(
255                         negate_function(make_line_search_function(f,x,s, f_value)),
256                         f_value,
257                         negate_function(make_line_search_function(der,x,s, g)),
258                         dot(g,s), // compute initial gradient for the line search
259                         search_strategy.get_wolfe_rho(), search_strategy.get_wolfe_sigma(), -max_f,
260                         search_strategy.get_max_line_search_iterations()
261                         );
262 
263             // Take the search step indicated by the above line search
264             x += alpha*s;
265 
266             // Don't forget to negate these outputs from the line search since they are
267             // from the unnegated versions of f() and der()
268             g *= -1;
269             f_value *= -1;
270 
271             if (!is_finite(f_value))
272                 throw error("The objective function generated non-finite outputs");
273             if (!is_finite(g))
274                 throw error("The objective function generated non-finite outputs");
275 
276             // Gradient is zero, no more progress is possible.  So stop.
277             if (alpha == 0)
278                 break;
279         }
280 
281         return -f_value;
282     }
283 
284 // ----------------------------------------------------------------------------------------
285 
286     template <
287         typename search_strategy_type,
288         typename stop_strategy_type,
289         typename funct,
290         typename T
291         >
292     double find_min_using_approximate_derivatives (
293         search_strategy_type search_strategy,
294         stop_strategy_type stop_strategy,
295         const funct& f,
296         T& x,
297         double min_f,
298         double derivative_eps = 1e-7
299     )
300     {
301         COMPILE_TIME_ASSERT(is_matrix<T>::value);
302         // The starting point (i.e. x) must be a column vector.
303         COMPILE_TIME_ASSERT(T::NC <= 1);
304 
305         DLIB_CASSERT (
306             is_col_vector(x) && derivative_eps > 0,
307             "\tdouble find_min_using_approximate_derivatives()"
308             << "\n\tYou have to supply column vectors to this function"
309             << "\n\tx.nc():         " << x.nc()
310             << "\n\tderivative_eps: " << derivative_eps
311         );
312 
313         T g, s;
314 
315         double f_value = f(x);
316         g = derivative(f,derivative_eps)(x);
317 
318         if (!is_finite(f_value))
319             throw error("The objective function generated non-finite outputs");
320         if (!is_finite(g))
321             throw error("The objective function generated non-finite outputs");
322 
323         while(stop_strategy.should_continue_search(x, f_value, g) && f_value > min_f)
324         {
325             s = search_strategy.get_next_direction(x, f_value, g);
326 
327             double alpha = line_search(
328                         make_line_search_function(f,x,s,f_value),
329                         f_value,
330                         derivative(make_line_search_function(f,x,s),derivative_eps),
331                         dot(g,s),  // Sometimes the following line is a better way of determining the initial gradient.
332                         //derivative(make_line_search_function(f,x,s),derivative_eps)(0),
333                         search_strategy.get_wolfe_rho(), search_strategy.get_wolfe_sigma(), min_f,
334                         search_strategy.get_max_line_search_iterations()
335                         );
336 
337             // Take the search step indicated by the above line search
338             x += alpha*s;
339 
340             g = derivative(f,derivative_eps)(x);
341 
342             if (!is_finite(f_value))
343                 throw error("The objective function generated non-finite outputs");
344             if (!is_finite(g))
345                 throw error("The objective function generated non-finite outputs");
346         }
347 
348         return f_value;
349     }
350 
351 // ----------------------------------------------------------------------------------------
352 
353     template <
354         typename search_strategy_type,
355         typename stop_strategy_type,
356         typename funct,
357         typename T
358         >
359     double find_max_using_approximate_derivatives (
360         search_strategy_type search_strategy,
361         stop_strategy_type stop_strategy,
362         const funct& f,
363         T& x,
364         double max_f,
365         double derivative_eps = 1e-7
366     )
367     {
368         COMPILE_TIME_ASSERT(is_matrix<T>::value);
369         // The starting point (i.e. x) must be a column vector.
370         COMPILE_TIME_ASSERT(T::NC <= 1);
371 
372         DLIB_CASSERT (
373             is_col_vector(x) && derivative_eps > 0,
374             "\tdouble find_max_using_approximate_derivatives()"
375             << "\n\tYou have to supply column vectors to this function"
376             << "\n\tx.nc():         " << x.nc()
377             << "\n\tderivative_eps: " << derivative_eps
378         );
379 
380         // Just negate the necessary things and call the find_min version of this function.
381         return -find_min_using_approximate_derivatives(
382             search_strategy,
383             stop_strategy,
384             negate_function(f),
385             x,
386             -max_f,
387             derivative_eps
388         );
389     }
390 
391 // ----------------------------------------------------------------------------------------
392 // ----------------------------------------------------------------------------------------
393 //                      Functions for box constrained optimization
394 // ----------------------------------------------------------------------------------------
395 // ----------------------------------------------------------------------------------------
396 
397     template <typename T, typename U, typename V>
zero_bounded_variables(const double eps,T vect,const T & x,const T & gradient,const U & x_lower,const V & x_upper)398     T zero_bounded_variables (
399         const double eps,
400         T vect,
401         const T& x,
402         const T& gradient,
403         const U& x_lower,
404         const V& x_upper
405     )
406     {
407         for (long i = 0; i < gradient.size(); ++i)
408         {
409             const double tol = eps*std::abs(x(i));
410             // if x(i) is an active bound constraint
411             if (x_lower(i)+tol >= x(i) && gradient(i) > 0)
412                 vect(i) = 0;
413             else if (x_upper(i)-tol <= x(i) && gradient(i) < 0)
414                 vect(i) = 0;
415         }
416         return vect;
417     }
418 
419 // ----------------------------------------------------------------------------------------
420 
421     template <typename T, typename U, typename V>
gap_step_assign_bounded_variables(const double eps,T vect,const T & x,const T & gradient,const U & x_lower,const V & x_upper)422     T gap_step_assign_bounded_variables (
423         const double eps,
424         T vect,
425         const T& x,
426         const T& gradient,
427         const U& x_lower,
428         const V& x_upper
429     )
430     {
431         for (long i = 0; i < gradient.size(); ++i)
432         {
433             const double tol = eps*std::abs(x(i));
434             // If x(i) is an active bound constraint then we should set its search
435             // direction such that a single step along the direction either does nothing or
436             // closes the gap of size tol before hitting the bound exactly.
437             if (x_lower(i)+tol >= x(i) && gradient(i) > 0)
438                 vect(i) = x_lower(i)-x(i);
439             else if (x_upper(i)-tol <= x(i) && gradient(i) < 0)
440                 vect(i) = x_upper(i)-x(i);
441         }
442         return vect;
443     }
444 
445 // ----------------------------------------------------------------------------------------
446 
447     template <
448         typename search_strategy_type,
449         typename stop_strategy_type,
450         typename funct,
451         typename funct_der,
452         typename T,
453         typename EXP1,
454         typename EXP2
455         >
find_min_box_constrained(search_strategy_type search_strategy,stop_strategy_type stop_strategy,const funct & f,const funct_der & der,T & x,const matrix_exp<EXP1> & x_lower,const matrix_exp<EXP2> & x_upper)456     double find_min_box_constrained (
457         search_strategy_type search_strategy,
458         stop_strategy_type stop_strategy,
459         const funct& f,
460         const funct_der& der,
461         T& x,
462         const matrix_exp<EXP1>& x_lower,
463         const matrix_exp<EXP2>& x_upper
464     )
465     {
466         /*
467             The implementation of this function is more or less based on the discussion in
468             the paper Projected Newton-type Methods in Machine Learning by Mark Schmidt, et al.
469         */
470 
471         // make sure the requires clause is not violated
472         COMPILE_TIME_ASSERT(is_matrix<T>::value);
473         // The starting point (i.e. x) must be a column vector.
474         COMPILE_TIME_ASSERT(T::NC <= 1);
475 
476         DLIB_CASSERT (
477             is_col_vector(x) && is_col_vector(x_lower) && is_col_vector(x_upper) &&
478             x.size() == x_lower.size() && x.size() == x_upper.size(),
479             "\tdouble find_min_box_constrained()"
480             << "\n\t The inputs to this function must be equal length column vectors."
481             << "\n\t is_col_vector(x):       " << is_col_vector(x)
482             << "\n\t is_col_vector(x_upper): " << is_col_vector(x_upper)
483             << "\n\t is_col_vector(x_upper): " << is_col_vector(x_upper)
484             << "\n\t x.size():               " << x.size()
485             << "\n\t x_lower.size():         " << x_lower.size()
486             << "\n\t x_upper.size():         " << x_upper.size()
487         );
488         DLIB_ASSERT (
489             min(x_upper-x_lower) >= 0,
490             "\tdouble find_min_box_constrained()"
491             << "\n\t You have to supply proper box constraints to this function."
492             << "\n\r min(x_upper-x_lower): " << min(x_upper-x_lower)
493         );
494 
495 
496         T g, s;
497         double f_value = f(x);
498         g = der(x);
499 
500         if (!is_finite(f_value))
501             throw error("The objective function generated non-finite outputs");
502         if (!is_finite(g))
503             throw error("The objective function generated non-finite outputs");
504 
505         // gap_eps determines how close we have to get to a bound constraint before we
506         // start basically dropping it from the optimization and consider it to be an
507         // active constraint.
508         const double gap_eps = 1e-8;
509 
510         double last_alpha = 1;
511         while(stop_strategy.should_continue_search(x, f_value, g))
512         {
513             s = search_strategy.get_next_direction(x, f_value, zero_bounded_variables(gap_eps, g, x, g, x_lower, x_upper));
514             s = gap_step_assign_bounded_variables(gap_eps, s, x, g, x_lower, x_upper);
515 
516             double alpha = backtracking_line_search(
517                         make_line_search_function(clamp_function(f,x_lower,x_upper), x, s, f_value),
518                         f_value,
519                         dot(g,s), // compute gradient for the line search
520                         last_alpha,
521                         search_strategy.get_wolfe_rho(),
522                         search_strategy.get_max_line_search_iterations());
523 
524             // Do a trust region style thing for alpha.  The idea is that if we take a
525             // small step then we are likely to take another small step.  So we reuse the
526             // alpha from the last iteration unless the line search didn't shrink alpha at
527             // all, in that case, we start with a bigger alpha next time.
528             if (alpha == last_alpha)
529                 last_alpha = std::min(last_alpha*10,1.0);
530             else
531                 last_alpha = alpha;
532 
533             // Take the search step indicated by the above line search
534             x = dlib::clamp(x + alpha*s, x_lower, x_upper);
535             g = der(x);
536 
537             if (!is_finite(f_value))
538                 throw error("The objective function generated non-finite outputs");
539             if (!is_finite(g))
540                 throw error("The objective function generated non-finite outputs");
541         }
542 
543         return f_value;
544     }
545 
546 // ----------------------------------------------------------------------------------------
547 
548     template <
549         typename search_strategy_type,
550         typename stop_strategy_type,
551         typename funct,
552         typename funct_der,
553         typename T
554         >
find_min_box_constrained(search_strategy_type search_strategy,stop_strategy_type stop_strategy,const funct & f,const funct_der & der,T & x,double x_lower,double x_upper)555     double find_min_box_constrained (
556         search_strategy_type search_strategy,
557         stop_strategy_type stop_strategy,
558         const funct& f,
559         const funct_der& der,
560         T& x,
561         double x_lower,
562         double x_upper
563     )
564     {
565         // The starting point (i.e. x) must be a column vector.
566         COMPILE_TIME_ASSERT(T::NC <= 1);
567 
568         typedef typename T::type scalar_type;
569         return find_min_box_constrained(search_strategy,
570                                         stop_strategy,
571                                         f,
572                                         der,
573                                         x,
574                                         uniform_matrix<scalar_type>(x.size(),1,x_lower),
575                                         uniform_matrix<scalar_type>(x.size(),1,x_upper) );
576     }
577 
578 // ----------------------------------------------------------------------------------------
579 
580     template <
581         typename search_strategy_type,
582         typename stop_strategy_type,
583         typename funct,
584         typename funct_der,
585         typename T,
586         typename EXP1,
587         typename EXP2
588         >
find_max_box_constrained(search_strategy_type search_strategy,stop_strategy_type stop_strategy,const funct & f,const funct_der & der,T & x,const matrix_exp<EXP1> & x_lower,const matrix_exp<EXP2> & x_upper)589     double find_max_box_constrained (
590         search_strategy_type search_strategy,
591         stop_strategy_type stop_strategy,
592         const funct& f,
593         const funct_der& der,
594         T& x,
595         const matrix_exp<EXP1>& x_lower,
596         const matrix_exp<EXP2>& x_upper
597     )
598     {
599         // make sure the requires clause is not violated
600         COMPILE_TIME_ASSERT(is_matrix<T>::value);
601         // The starting point (i.e. x) must be a column vector.
602         COMPILE_TIME_ASSERT(T::NC <= 1);
603 
604         DLIB_CASSERT (
605             is_col_vector(x) && is_col_vector(x_lower) && is_col_vector(x_upper) &&
606             x.size() == x_lower.size() && x.size() == x_upper.size(),
607             "\tdouble find_max_box_constrained()"
608             << "\n\t The inputs to this function must be equal length column vectors."
609             << "\n\t is_col_vector(x):       " << is_col_vector(x)
610             << "\n\t is_col_vector(x_upper): " << is_col_vector(x_upper)
611             << "\n\t is_col_vector(x_upper): " << is_col_vector(x_upper)
612             << "\n\t x.size():               " << x.size()
613             << "\n\t x_lower.size():         " << x_lower.size()
614             << "\n\t x_upper.size():         " << x_upper.size()
615         );
616         DLIB_ASSERT (
617             min(x_upper-x_lower) >= 0,
618             "\tdouble find_max_box_constrained()"
619             << "\n\t You have to supply proper box constraints to this function."
620             << "\n\r min(x_upper-x_lower): " << min(x_upper-x_lower)
621         );
622 
623         // This function is basically just a copy of find_min_box_constrained() but with - put
624         // in the right places to flip things around so that it ends up looking for the max
625         // rather than the min.
626 
627         T g, s;
628         double f_value = -f(x);
629         g = -der(x);
630 
631         if (!is_finite(f_value))
632             throw error("The objective function generated non-finite outputs");
633         if (!is_finite(g))
634             throw error("The objective function generated non-finite outputs");
635 
636         // gap_eps determines how close we have to get to a bound constraint before we
637         // start basically dropping it from the optimization and consider it to be an
638         // active constraint.
639         const double gap_eps = 1e-8;
640 
641         double last_alpha = 1;
642         while(stop_strategy.should_continue_search(x, f_value, g))
643         {
644             s = search_strategy.get_next_direction(x, f_value, zero_bounded_variables(gap_eps, g, x, g, x_lower, x_upper));
645             s = gap_step_assign_bounded_variables(gap_eps, s, x, g, x_lower, x_upper);
646 
647             double alpha = backtracking_line_search(
648                         negate_function(make_line_search_function(clamp_function(f,x_lower,x_upper), x, s, f_value)),
649                         f_value,
650                         dot(g,s), // compute gradient for the line search
651                         last_alpha,
652                         search_strategy.get_wolfe_rho(),
653                         search_strategy.get_max_line_search_iterations());
654 
655             // Do a trust region style thing for alpha.  The idea is that if we take a
656             // small step then we are likely to take another small step.  So we reuse the
657             // alpha from the last iteration unless the line search didn't shrink alpha at
658             // all, in that case, we start with a bigger alpha next time.
659             if (alpha == last_alpha)
660                 last_alpha = std::min(last_alpha*10,1.0);
661             else
662                 last_alpha = alpha;
663 
664             // Take the search step indicated by the above line search
665             x = dlib::clamp(x + alpha*s, x_lower, x_upper);
666             g = -der(x);
667 
668             // Don't forget to negate the output from the line search since it is  from the
669             // unnegated version of f()
670             f_value *= -1;
671 
672             if (!is_finite(f_value))
673                 throw error("The objective function generated non-finite outputs");
674             if (!is_finite(g))
675                 throw error("The objective function generated non-finite outputs");
676         }
677 
678         return -f_value;
679     }
680 
681 // ----------------------------------------------------------------------------------------
682 
683     template <
684         typename search_strategy_type,
685         typename stop_strategy_type,
686         typename funct,
687         typename funct_der,
688         typename T
689         >
find_max_box_constrained(search_strategy_type search_strategy,stop_strategy_type stop_strategy,const funct & f,const funct_der & der,T & x,double x_lower,double x_upper)690     double find_max_box_constrained (
691         search_strategy_type search_strategy,
692         stop_strategy_type stop_strategy,
693         const funct& f,
694         const funct_der& der,
695         T& x,
696         double x_lower,
697         double x_upper
698     )
699     {
700         // The starting point (i.e. x) must be a column vector.
701         COMPILE_TIME_ASSERT(T::NC <= 1);
702 
703         typedef typename T::type scalar_type;
704         return find_max_box_constrained(search_strategy,
705                                         stop_strategy,
706                                         f,
707                                         der,
708                                         x,
709                                         uniform_matrix<scalar_type>(x.size(),1,x_lower),
710                                         uniform_matrix<scalar_type>(x.size(),1,x_upper) );
711     }
712 
713 // ----------------------------------------------------------------------------------------
714 
715 }
716 
717 #endif // DLIB_OPTIMIZATIOn_H_
718 
719