1#----------------------------------------------------------------------
2#
3# math/optimize.tcl --
4#
5#	This file contains functions for optimization of a function
6#	or expression.
7#
8# Copyright (c) 2004, by Arjen Markus.
9# Copyright (c) 2004, 2005 by Kevin B. Kenny.  All rights reserved.
10#
11# See the file "license.terms" for information on usage and redistribution
12# of this file, and for a DISCLAIMER OF ALL WARRANTIES.
13#
14# RCS: @(#) $Id: optimize.tcl,v 1.12 2011/01/18 07:49:53 arjenmarkus Exp $
15#
16#----------------------------------------------------------------------
17
18package require Tcl 8.4
19
20# math::optimize --
21#    Namespace for the commands
22#
23namespace eval ::math::optimize {
24   namespace export minimum  maximum solveLinearProgram linearProgramMaximum
25   namespace export min_bound_1d min_unbound_1d
26
27   # Possible extension: minimumExpr, maximumExpr
28}
29
30# minimum --
31#    Minimize a given function over a given interval
32#
33# Arguments:
34#    begin       Start of the interval
35#    end         End of the interval
36#    func        Name of the function to be minimized (takes one
37#                argument)
38#    maxerr      Maximum relative error (defaults to 1.0e-4)
39# Return value:
40#    Computed value for which the function is minimal
41# Notes:
42#    The function needs not to be differentiable, but it is supposed
43#    to be continuous. There is no provision for sub-intervals where
44#    the function is constant (this might happen when the maximum
45#    error is very small, < 1.0e-15)
46#
47# Warning:
48#    This procedure is deprecated - use min_bound_1d instead
49#
50proc ::math::optimize::minimum { begin end func {maxerr 1.0e-4} } {
51
52   set nosteps  [expr {3+int(-log($maxerr)/log(2.0))}]
53   set delta    [expr {0.5*($end-$begin)*$maxerr}]
54
55   for { set step 0 } { $step < $nosteps } { incr step } {
56      set x1 [expr {($end+$begin)/2.0}]
57      set x2 [expr {$x1+$delta}]
58
59      set fx1 [uplevel 1 $func $x1]
60      set fx2 [uplevel 1 $func $x2]
61
62      if {$fx1 < $fx2} {
63         set end   $x1
64      } else {
65         set begin $x1
66      }
67   }
68   return $x1
69}
70
71# maximum --
72#    Maximize a given function over a given interval
73#
74# Arguments:
75#    begin       Start of the interval
76#    end         End of the interval
77#    func        Name of the function to be maximized (takes one
78#                argument)
79#    maxerr      Maximum relative error (defaults to 1.0e-4)
80# Return value:
81#    Computed value for which the function is maximal
82# Notes:
83#    The function needs not to be differentiable, but it is supposed
84#    to be continuous. There is no provision for sub-intervals where
85#    the function is constant (this might happen when the maximum
86#    error is very small, < 1.0e-15)
87#
88# Warning:
89#    This procedure is deprecated - use max_bound_1d instead
90#
91proc ::math::optimize::maximum { begin end func {maxerr 1.0e-4} } {
92
93   set nosteps  [expr {3+int(-log($maxerr)/log(2.0))}]
94   set delta    [expr {0.5*($end-$begin)*$maxerr}]
95
96   for { set step 0 } { $step < $nosteps } { incr step } {
97      set x1 [expr {($end+$begin)/2.0}]
98      set x2 [expr {$x1+$delta}]
99
100      set fx1 [uplevel 1 $func $x1]
101      set fx2 [uplevel 1 $func $x2]
102
103      if {$fx1 > $fx2} {
104         set end   $x1
105      } else {
106         set begin $x1
107      }
108   }
109   return $x1
110}
111
112#----------------------------------------------------------------------
113#
114# min_bound_1d --
115#
116#       Find a local minimum of a function between two given
117#       abscissae. Derivative of f is not required.
118#
119# Usage:
120#       min_bound_1d f x1 x2 ?-option value?,,,
121#
122# Parameters:
123#       f - Function to minimize.  Must be expressed as a Tcl
124#           command, to which will be appended the value at which
125#           to evaluate the function.
126#       x1 - Lower bound of the interval in which to search for a
127#            minimum
128#       x2 - Upper bound of the interval in which to search for a minimum
129#
130# Options:
131#       -relerror value
132#               Gives the tolerance desired for the returned
133#               abscissa.  Default is 1.0e-7.  Should never be less
134#               than the square root of the machine precision.
135#       -maxiter n
136#               Constrains minimize_bound_1d to evaluate the function
137#               no more than n times.  Default is 100.  If convergence
138#               is not achieved after the specified number of iterations,
139#               an error is thrown.
140#       -guess value
141#               Gives a point between x1 and x2 that is an initial guess
142#               for the minimum.  f(guess) must be at most f(x1) or
143#               f(x2).
144#        -fguess value
145#                Gives the value of the ordinate at the value of '-guess'
146#                if known.  Default is to evaluate the function
147#       -abserror value
148#               Gives the desired absolute error for the returned
149#               abscissa.  Default is 1.0e-10.
150#       -trace boolean
151#               A true value causes a trace to the standard output
152#               of the function evaluations. Default is 0.
153#
154# Results:
155#       Returns a two-element list comprising the abscissa at which
156#       the function reaches a local minimum within the interval,
157#       and the value of the function at that point.
158#
159# Side effects:
160#       Whatever side effects arise from evaluating the given function.
161#
162#----------------------------------------------------------------------
163
164proc ::math::optimize::min_bound_1d { f x1 x2 args } {
165
166    set f [lreplace $f 0 0 [uplevel 1 [list namespace which [lindex $f 0]]]]
167
168    set phim1 0.6180339887498949
169    set twomphi 0.3819660112501051
170
171    array set params {
172        -relerror 1.0e-7
173        -abserror 1.0e-10
174        -maxiter 100
175        -trace 0
176        -fguess {}
177    }
178    set params(-guess) [expr { $phim1 * $x1 + $twomphi * $x2 }]
179
180    if { ( [llength $args] % 2 ) != 0 } {
181        return -code error -errorcode [list min_bound_1d wrongNumArgs] \
182            "wrong \# args, should be\
183                 \"[lreplace [info level 0] 1 end f x1 x2 ?-option value?...]\""
184    }
185    foreach { key value } $args {
186        if { ![info exists params($key)] } {
187            return -code error -errorcode [list min_bound_1d badoption $key] \
188                "unknown option \"$key\",\
189                     should be -abserror,\
190                     -fguess, -guess, -initial, -maxiter, -relerror,\
191                     or -trace"
192        }
193	set params($key) $value
194    }
195
196    # a and b presumably bracket the minimum of the function.  Make sure
197    # they're in ascending order.
198
199    if { $x1 < $x2 } {
200        set a $x1; set b $x2
201    } else {
202        set b $x1; set a $x2
203    }
204
205    set x $params(-guess);              # Best abscissa found so far
206    set w $x;                           # Second best abscissa found so far
207    set v $x;                           # Most recent earlier value of w
208
209    set e 0.0;                          # Distance moved on the step before
210					# last.
211
212    # Evaluate the function at the initial guess
213
214    if { $params(-fguess) ne {} } {
215        set fx $params(-fguess)
216    } else {
217        set s $f; lappend s $x; set fx [eval $s]
218        if { $params(-trace) } {
219            puts stdout "f($x) = $fx (initialisation)"
220        }
221    }
222    set fw $fx
223    set fv $fx
224
225    for { set iter 0 } { $iter < $params(-maxiter) } { incr iter } {
226
227        # Find the midpoint of the current interval
228
229        set xm [expr { 0.5 * ( $a + $b ) }]
230
231        # Compute the current tolerance for x, and twice its value
232
233        set tol [expr { $params(-relerror) * abs($x) + $params(-abserror) }]
234        set tol2 [expr { $tol + $tol }]
235        if { abs( $x - $xm ) <= $tol2 - 0.5 * ($b - $a) } {
236            return [list $x $fx]
237        }
238        set golden 1
239        if { abs($e) > $tol } {
240
241            # Use parabolic interpolation to find a minimum determined
242            # by the evaluations at x, v, and w.  The size of the step
243            # to take will be $p/$q.
244
245            set r [expr { ( $x - $w ) * ( $fx - $fv ) }]
246            set q [expr { ( $x - $v ) * ( $fx - $fw ) }]
247            set p [expr { ( $x - $v ) * $q - ( $x - $w ) * $r }]
248            set q [expr { 2. * ( $q - $r ) }]
249            if { $q > 0 } {
250                set p [expr { - $p }]
251            } else {
252                set q [expr { - $q }]
253            }
254            set olde $e
255            set e $d
256
257            # Test if parabolic interpolation results in less than half
258            # the movement of the step two steps ago.
259
260            if { abs($p) < abs( .5 * $q * $olde )
261                 && $p > $q * ( $a - $x )
262                 && $p < $q * ( $b - $x ) } {
263
264                set d [expr { $p / $q }]
265                set u [expr { $x + $d }]
266                if { ( $u - $a ) < $tol2 || ( $b - $u ) < $tol2 } {
267                    if { $xm-$x < 0 } {
268                        set d [expr { - $tol }]
269                    } else {
270                        set d $tol
271                    }
272                }
273                set golden 0
274            }
275        }
276
277        # If parabolic interpolation didn't come up with an acceptable
278        # result, use Golden Section instead.
279
280        if { $golden } {
281            if { $x >= $xm } {
282                set e [expr { $a - $x }]
283            } else {
284                set e [expr { $b - $x }]
285            }
286            set d [expr { $twomphi * $e }]
287        }
288
289        # At this point, d is the size of the step to take.  Make sure
290        # that it's at least $tol.
291
292        if { abs($d) >= $tol } {
293            set u [expr { $x + $d }]
294        } elseif { $d < 0 } {
295            set u [expr { $x - $tol }]
296        } else {
297            set u [expr { $x + $tol }]
298        }
299
300        # Evaluate the function
301
302        set s $f; lappend s $u; set fu [eval $s]
303        if { $params(-trace) } {
304            if { $golden } {
305                puts stdout "f($u)=$fu (golden section)"
306            } else {
307                puts stdout "f($u)=$fu (parabolic interpolation)"
308            }
309        }
310
311        if { $fu <= $fx } {
312            # We've the best abscissa so far.
313
314            if { $u >= $x } {
315                set a $x
316            } else {
317                set b $x
318            }
319            set v $w
320            set fv $fw
321            set w $x
322            set fw $fx
323            set x $u
324            set fx $fu
325        } else {
326
327            if { $u < $x } {
328                set a $u
329            } else {
330                set b $u
331            }
332            if { $fu <= $fw || $w == $x } {
333                # We've the second-best abscissa so far
334                set v $w
335                set fv $fw
336                set w $u
337                set fw $fu
338            } elseif { $fu <= $fv || $v == $x || $v == $w } {
339                # We've the third-best so far
340                set v $u
341                set fv $fu
342            }
343        }
344    }
345
346    return -code error -errorcode [list min_bound_1d noconverge $iter] \
347        "[lindex [info level 0] 0] failed to converge after $iter steps."
348
349}
350
351#----------------------------------------------------------------------
352#
353# brackmin --
354#
355#       Find a place along the number line where a given function has
356#       a local minimum.
357#
358# Usage:
359#       brackmin f x1 x2 ?trace?
360#
361# Parameters:
362#       f - Function to minimize
363#       x1 - Abscissa thought to be near the minimum
364#       x2 - Additional abscissa thought to be near the minimum
365#	trace - Boolean variable that, if true,
366#               causes 'brackmin' to print a trace of its function
367#               evaluations to the standard output.  Default is 0.
368#
369# Results:
370#       Returns a three element list {x1 y1 x2 y2 x3 y3} where
371#       y1=f(x1), y2=f(x2), y3=f(x3).  x2 lies between x1 and x3, and
372#       y1>y2, y3>y2, proving that there is a local minimum somewhere
373#       in the interval (x1,x3).
374#
375# Side effects:
376#       Whatever effects the evaluation of f has.
377#
378#----------------------------------------------------------------------
379
380proc ::math::optimize::brackmin { f x1 x2 {trace 0} } {
381
382    set f [lreplace $f 0 0 [uplevel 1 [list namespace which [lindex $f 0]]]]
383
384    set phi 1.6180339887498949
385    set epsilon 1.0e-20
386    set limit 50.
387
388    # Choose a and b so that f(a) < f(b)
389
390    set cmd $f; lappend cmd $x1; set fx1 [eval $cmd]
391    if { $trace } {
392        puts "f($x1) = $fx1 (initialisation)"
393    }
394    set cmd $f; lappend cmd $x2; set fx2 [eval $cmd]
395    if { $trace } {
396        puts "f($x2) = $fx2 (initialisation)"
397    }
398    if { $fx1 > $fx2 } {
399        set a $x1; set fa $fx1
400        set b $x2; set fb $fx2
401    } else {
402        set a $x2; set fa $fx2
403        set b $x1; set fb $fx1
404    }
405
406    # Choose a c in the downhill direction
407
408    set c [expr { $b + $phi * ($b - $a) }]
409    set cmd $f; lappend cmd $c; set fc [eval $cmd]
410    if { $trace } {
411        puts "f($c) = $fc (initial dilatation by phi)"
412    }
413
414    while { $fb >= $fc } {
415
416        # Try to do parabolic extrapolation to the minimum
417
418        set r [expr { ($b - $a) * ($fb - $fc) }]
419        set q [expr { ($b - $c) * ($fb - $fa) }]
420        if { abs( $q - $r ) > $epsilon } {
421            set denom [expr { $q - $r }]
422        } elseif { $q > $r } {
423            set denom $epsilon
424        } else {
425            set denom -$epsilon
426        }
427        set u [expr { $b - ( (($b - $c) * $q - ($b - $a) * $r)
428                             / (2. * $denom) ) }]
429        set ulimit [expr { $b + $limit * ( $c - $b ) }]
430
431        # Test the extrapolated abscissa
432
433        if { ($b - $u) * ($u - $c) > 0 } {
434
435            # u lies between b and c.  Try to interpolate
436
437            set cmd $f; lappend cmd $u; set fu [eval $cmd]
438            if { $trace } {
439                puts "f($u) = $fu (parabolic interpolation)"
440            }
441
442            if { $fu < $fc } {
443
444                # fb > fu and fc > fu, so there is a minimum between b and c
445                # with u as a starting guess.
446
447                return [list $b $fb $u $fu $c $fc]
448
449            }
450
451            if { $fu > $fb } {
452
453                # fb < fu, fb < fa, and u cannot lie between a and b
454                # (because it lies between a and c).  There is a minimum
455                # somewhere between a and u, with b a starting guess.
456
457                return [list $a $fa $b $fb $u $fu]
458
459            }
460
461            # Parabolic interpolation was useless. Expand the
462            # distance by a factor of phi and try again.
463
464            set u [expr { $c + $phi * ($c - $b) }]
465            set cmd $f; lappend cmd $u; set fu [eval $cmd]
466            if { $trace } {
467                puts "f($u) = $fu (parabolic interpolation failed)"
468            }
469
470
471        } elseif { ( $c - $u ) * ( $u - $ulimit ) > 0 } {
472
473            # u lies between $c and $ulimit.
474
475            set cmd $f; lappend cmd $u; set fu [eval $cmd]
476            if { $trace } {
477                puts "f($u) = $fu (parabolic extrapolation)"
478            }
479
480            if { $fu > $fc } {
481
482                # minimum lies between b and u, with c an initial guess.
483
484                return [list $b $fb $c $fc $u $fu]
485
486            }
487
488            # function is still decreasing fa > fb > fc > fu. Take
489            # another factor-of-phi step.
490
491            set b $c; set fb $fc
492            set c $u; set fc $fu
493            set u [expr { $c + $phi * ( $c - $b ) }]
494            set cmd $f; lappend cmd $u; set fu [eval $cmd]
495            if { $trace } {
496                puts "f($u) = $fu (parabolic extrapolation ok)"
497            }
498
499        } elseif { ($u - $ulimit) * ( $ulimit - $c ) >= 0 } {
500
501            # u went past ulimit.  Pull in to ulimit and evaluate there.
502
503            set u $ulimit
504            set cmd $f; lappend cmd $u; set fu [eval $cmd]
505            if { $trace } {
506                puts "f($u) = $fu (limited step)"
507            }
508
509        } else {
510
511            # parabolic extrapolation gave a useless value.
512
513            set u [expr { $c + $phi * ( $c - $b ) }]
514            set cmd $f; lappend cmd $u; set fu [eval $cmd]
515            if { $trace } {
516                puts "f($u) = $fu (parabolic extrapolation failed)"
517            }
518
519        }
520
521        set a $b; set fa $fb
522        set b $c; set fb $fc
523        set c $u; set fc $fu
524    }
525
526    return [list $a $fa $b $fb $c $fc]
527}
528
529#----------------------------------------------------------------------
530#
531# min_unbound_1d --
532#
533#	Minimize a function of one variable, unconstrained, derivatives
534#	not required.
535#
536# Usage:
537#       min_bound_1d f x1 x2 ?-option value?,,,
538#
539# Parameters:
540#       f - Function to minimize.  Must be expressed as a Tcl
541#           command, to which will be appended the value at which
542#           to evaluate the function.
543#       x1 - Initial guess at the minimum
544#       x2 - Second initial guess at the minimum, used to set the
545#	     initial length scale for the search.
546#
547# Options:
548#       -relerror value
549#               Gives the tolerance desired for the returned
550#               abscissa.  Default is 1.0e-7.  Should never be less
551#               than the square root of the machine precision.
552#       -maxiter n
553#               Constrains min_bound_1d to evaluate the function
554#               no more than n times.  Default is 100.  If convergence
555#               is not achieved after the specified number of iterations,
556#               an error is thrown.
557#       -abserror value
558#               Gives the desired absolute error for the returned
559#               abscissa.  Default is 1.0e-10.
560#       -trace boolean
561#               A true value causes a trace to the standard output
562#               of the function evaluations. Default is 0.
563#
564#----------------------------------------------------------------------
565
566proc ::math::optimize::min_unbound_1d { f x1 x2 args } {
567
568    set f [lreplace $f 0 0 [uplevel 1 [list namespace which [lindex $f 0]]]]
569
570    array set params {
571	-relerror 1.0e-7
572	-abserror 1.0e-10
573	-maxiter 100
574        -trace 0
575    }
576    if { ( [llength $args] % 2 ) != 0 } {
577        return -code error -errorcode [list min_unbound_1d wrongNumArgs] \
578            "wrong \# args, should be\
579                 \"[lreplace [info level 0] 1 end \
580                         f x1 x2 ?-option value?...]\""
581    }
582    foreach { key value } $args {
583        if { ![info exists params($key)] } {
584            return -code error -errorcode [list min_unbound_1d badoption $key] \
585                "unknown option \"$key\",\
586                     should be -trace"
587        }
588        set params($key) $value
589    }
590    foreach { a fa b fb c fc } [brackmin $f $x1 $x2 $params(-trace)] {
591	break
592    }
593    return [eval [linsert [array get params] 0 \
594		      min_bound_1d $f $a $c -guess $b -fguess $fb]]
595}
596
597#----------------------------------------------------------------------
598#
599# nelderMead --
600#
601#	Attempt to minimize/maximize a function using the downhill
602#	simplex method of Nelder and Mead.
603#
604# Usage:
605#	nelderMead f x ?-keyword value?
606#
607# Parameters:
608#	f - The function to minimize.  The function must be an incomplete
609#	    Tcl command, to which will be appended N parameters.
610#	x - The starting guess for the minimum; a vector of N parameters
611#	    to be passed to the function f.
612#
613# Options:
614#	-scale xscale
615#		Initial guess as to the problem scale.  If '-scale' is
616#		supplied, then the parameters will be varied by the
617#	        specified amounts.  The '-scale' parameter must of the
618#		same dimension as the 'x' vector, and all elements must
619#		be nonzero.  Default is 0.0001 times the 'x' vector,
620#		or 0.0001 for zero elements in the 'x' vector.
621#
622#	-ftol epsilon
623#		Requested tolerance in the function value; nelderMead
624#		returns if N+1 consecutive iterates all differ by less
625#		than the -ftol value.  Default is 1.0e-7
626#
627#	-maxiter N
628#		Maximum number of iterations to attempt.  Default is
629#		500.
630#
631#	-trace flag
632#		If '-trace 1' is supplied, nelderMead writes a record
633#		of function evaluations to the standard output as it
634#		goes.  Default is 0.
635#
636#----------------------------------------------------------------------
637
638proc ::math::optimize::nelderMead { f startx args } {
639    array set params {
640	-ftol 1.e-7
641	-maxiter 500
642	-scale {}
643	-trace 0
644    }
645
646    # Check arguments
647
648    if { ( [llength $args] % 2 ) != 0 } {
649        return -code error -errorcode [list nelderMead wrongNumArgs] \
650            "wrong \# args, should be\
651                 \"[lreplace [info level 0] 1 end \
652                         f x1 x2 ?-option value?...]\""
653    }
654    foreach { key value } $args {
655        if { ![info exists params($key)] } {
656            return -code error -errorcode [list nelderMead badoption $key] \
657                "unknown option \"$key\",\
658                     should be -ftol, -maxiter, -scale or -trace"
659        }
660        set params($key) $value
661    }
662
663    # Construct the initial simplex
664
665    set vertices [list $startx]
666    if { [llength $params(-scale)] == 0 } {
667	set i 0
668	foreach x0 $startx {
669	    if { $x0 == 0 } {
670		set x1 0.0001
671	    } else {
672		set x1 [expr {1.0001 * $x0}]
673	    }
674	    lappend vertices [lreplace $startx $i $i $x1]
675	    incr i
676	}
677    } elseif { [llength $params(-scale)] != [llength $startx] } {
678	return -code error -errorcode [list nelderMead badOption -scale] \
679	    "-scale vector must be of same size as starting x vector"
680    } else {
681	set i 0
682	foreach x0 $startx s $params(-scale) {
683	    lappend vertices [lreplace $startx $i $i [expr { $x0 + $s }]]
684	    incr i
685	}
686    }
687
688    # Evaluate at the initial points
689
690    set n [llength $startx]
691    foreach x $vertices {
692	set cmd $f
693	foreach xx $x {
694	    lappend cmd $xx
695	}
696	set y [uplevel 1 $cmd]
697	if {$params(-trace)} {
698	    puts "nelderMead: evaluating initial point: x=[list $x] y=$y"
699	}
700	lappend yvec $y
701    }
702
703
704    # Loop adjusting the simplex in the 'vertices' array.
705
706    set nIter 0
707    while { 1 } {
708
709	# Find the highest, next highest, and lowest value in y,
710	# and save the indices.
711
712	set iBot 0
713	set yBot [lindex $yvec 0]
714	set iTop -1
715	set yTop [lindex $yvec 0]
716	set iNext -1
717	set i 0
718	foreach y $yvec {
719	    if { $y <= $yBot } {
720		set yBot $y
721		set iBot $i
722	    }
723	    if { $iTop < 0 || $y >= $yTop } {
724		set iNext $iTop
725		set yNext $yTop
726		set iTop $i
727		set yTop $y
728	    } elseif { $iNext < 0 || $y >= $yNext } {
729		set iNext $i
730		set yNext $y
731	    }
732	    incr i
733	}
734
735	# Return if the relative error is within an acceptable range
736
737	set rerror [expr { 2. * abs( $yTop - $yBot )
738			   / ( abs( $yTop ) + abs( $yBot ) + $params(-ftol) ) }]
739	if { $rerror < $params(-ftol) } {
740	    set status ok
741	    break
742	}
743
744	# Count iterations
745
746	if { [incr nIter] > $params(-maxiter) } {
747	    set status too-many-iterations
748	    break
749	}
750	incr nIter
751
752	# Find the centroid of the face opposite the vertex that
753	# maximizes the function value.
754
755	set centroid {}
756	for { set i 0 } { $i < $n } { incr i } {
757	    lappend centroid 0.0
758	}
759	set i 0
760	foreach v $vertices {
761	    if { $i != $iTop } {
762		set newCentroid {}
763		foreach x0 $centroid x1 $v {
764		    lappend newCentroid [expr { $x0 + $x1 }]
765		}
766		set centroid $newCentroid
767	    }
768	    incr i
769	}
770	set newCentroid {}
771	foreach x $centroid {
772	    lappend newCentroid [expr { $x / $n }]
773	}
774	set centroid $newCentroid
775
776	# The first trial point is a reflection of the high point
777	# around the centroid
778
779	set trial {}
780	foreach x0 [lindex $vertices $iTop] x1 $centroid {
781	    lappend trial [expr {$x1 + ($x1 - $x0)}]
782	}
783	set cmd $f
784	foreach xx $trial {
785	    lappend cmd $xx
786	}
787	set yTrial [uplevel 1 $cmd]
788	if { $params(-trace) } {
789	    puts "nelderMead: trying reflection: x=[list $trial] y=$yTrial"
790	}
791
792	# If that reflection yields a new minimum, replace the high point,
793	# and additionally try dilating in the same direction.
794
795	if { $yTrial < $yBot } {
796	    set trial2 {}
797	    foreach x0 $centroid x1 $trial {
798		lappend trial2 [expr { $x1 + ($x1 - $x0) }]
799	    }
800	    set cmd $f
801	    foreach xx $trial2 {
802		lappend cmd $xx
803	    }
804	    set yTrial2 [uplevel 1 $cmd]
805	    if { $params(-trace) } {
806		puts "nelderMead: trying dilated reflection:\
807                      x=[list $trial2] y=$y"
808	    }
809	    if { $yTrial2 < $yBot } {
810
811		# Additional dilation yields a new minimum
812
813		lset vertices $iTop $trial2
814		lset yvec $iTop $yTrial2
815	    } else {
816
817		# Additional dilation failed, but we can still use
818		# the first trial point.
819
820		lset vertices $iTop $trial
821		lset yvec $iTop $yTrial
822
823	    }
824	} elseif { $yTrial < $yNext } {
825
826	    # The reflected point isn't a new minimum, but it's
827	    # better than the second-highest.  Replace the old high
828	    # point and try again.
829
830	    lset vertices $iTop $trial
831	    lset yvec $iTop $yTrial
832
833	} else {
834
835	    # The reflected point is worse than the second-highest point.
836	    # If it's better than the highest, keep it... but in any case,
837	    # we want to try contracting the simplex, because a further
838	    # reflection will simply bring us back to the starting point.
839
840	    if { $yTrial < $yTop } {
841		lset vertices $iTop $trial
842		lset yvec $iTop $yTrial
843		set yTop $yTrial
844	    }
845	    set trial {}
846	    foreach x0 [lindex $vertices $iTop] x1 $centroid {
847		lappend trial [expr { ( $x0 + $x1 ) / 2. }]
848	    }
849	    set cmd $f
850	    foreach xx $trial {
851		lappend cmd $xx
852	    }
853	    set yTrial [uplevel 1 $cmd]
854	    if { $params(-trace) } {
855		puts "nelderMead: contracting from high point:\
856                      x=[list $trial] y=$y"
857	    }
858	    if { $yTrial < $yTop } {
859
860		# Contraction gave an improvement, so continue with
861		# the smaller simplex
862
863		lset vertices $iTop $trial
864		lset yvec $iTop $yTrial
865
866	    } else {
867
868		# Contraction gave no improvement either; we seem to
869		# be in a valley of peculiar topology.  Contract the
870		# simplex about the low point and try again.
871
872		set newVertices {}
873		set newYvec {}
874		set i 0
875		foreach v $vertices y $yvec {
876		    if { $i == $iBot } {
877			lappend newVertices $v
878			lappend newYvec $y
879		    } else {
880			set newv {}
881			foreach x0 $v x1 [lindex $vertices $iBot] {
882			    lappend newv [expr { ($x0 + $x1) / 2. }]
883			}
884			lappend newVertices $newv
885			set cmd $f
886			foreach xx $newv {
887			    lappend cmd $xx
888			}
889			lappend newYvec [uplevel 1 $cmd]
890			if { $params(-trace) } {
891			    puts "nelderMead: contracting about low point:\
892                                  x=[list $newv] y=$y"
893			}
894		    }
895		    incr i
896		}
897		set vertices $newVertices
898		set yvec $newYvec
899	    }
900
901	}
902
903    }
904    return [list y $yBot x [lindex $vertices $iBot] vertices $vertices yvec $yvec nIter $nIter status $status]
905
906}
907
908# solveLinearProgram
909#    Solve a linear program in standard form
910#
911# Arguments:
912#    objective     Vector defining the objective function
913#    constraints   Matrix of constraints (as a list of lists)
914#
915# Return value:
916#    Computed values for the coordinates or "unbounded" or "infeasible"
917#
918proc ::math::optimize::solveLinearProgram { objective constraints } {
919    #
920    # Check the arguments first and then put them in a more convenient
921    # form
922    #
923
924    foreach {nconst nvars matrix} \
925        [SimplexPrepareMatrix $objective $constraints] {break}
926
927    set solution [SimplexSolve $nconst nvars $matrix]
928
929    if { [llength $solution] > 1 } {
930        return [lrange $solution 0 [expr {$nvars-1}]]
931    } else {
932        return $solution
933    }
934}
935
936# linearProgramMaximum --
937#    Compute the value attained at the optimum
938#
939# Arguments:
940#    objective     The coefficients of the objective function
941#    result        The coordinate values as obtained by solving the program
942#
943# Return value:
944#    Value at the maximum point
945#
946proc ::math::optimize::linearProgramMaximum {objective result} {
947
948    set value    0.0
949
950    foreach coeff $objective coord $result {
951        set value [expr {$value+$coeff*$coord}]
952    }
953
954    return $value
955}
956
957# SimplexPrintMatrix
958#    Debugging routine: print the matrix in easy to read form
959#
960# Arguments:
961#    matrix        Matrix to be printed
962#
963# Return value:
964#    None
965#
966# Note:
967#    The tableau should be transposed ...
968#
969proc ::math::optimize::SimplexPrintMatrix {matrix} {
970    puts "\nBasis:\t[join [lindex $matrix 0] \t]"
971    foreach col [lrange $matrix 1 end] {
972        puts "      \t[join $col \t]"
973    }
974}
975
976# SimplexPrepareMatrix
977#    Prepare the standard tableau from all program data
978#
979# Arguments:
980#    objective     Vector defining the objective function
981#    constraints   Matrix of constraints (as a list of lists)
982#
983# Return value:
984#    List of values as a standard tableau and two values
985#    for the sizes
986#
987proc ::math::optimize::SimplexPrepareMatrix {objective constraints} {
988
989    #
990    # Check the arguments first
991    #
992    set nconst [llength $constraints]
993    set ncols {}
994    foreach row $constraints {
995        if { $ncols == {} } {
996            set ncols [llength $row]
997        } else {
998            if { $ncols != [llength $row] } {
999                return -code error -errorcode ARGS "Incorrectly formed constraints matrix"
1000            }
1001        }
1002    }
1003
1004    set nvars [expr {$ncols-1}]
1005
1006    if { [llength $objective] != $nvars } {
1007        return -code error -errorcode ARGS "Incorrect length for objective vector"
1008    }
1009
1010    #
1011    # Set up the tableau:
1012    # Easiest manipulations if we store the columns first
1013    # So:
1014    # - First column is the list of variable indices in the basis
1015    # - Second column is the list of maximum values
1016    # - "nvars" columns that follow: the coefficients for the actual
1017    #   variables
1018    # - last "nconst" columns: the slack variables
1019    #
1020    set matrix   [list]
1021    set lastrow  [concat $objective [list 0.0]]
1022
1023    set newcol   [list]
1024    for {set idx 0} {$idx < $nconst} {incr idx} {
1025        lappend newcol [expr {$nvars+$idx}]
1026    }
1027    lappend newcol "?"
1028    lappend matrix $newcol
1029
1030    set zvector [list]
1031    foreach row $constraints {
1032        lappend zvector [lindex $row end]
1033    }
1034    lappend zvector 0.0
1035    lappend matrix $zvector
1036
1037    for {set idx 0} {$idx < $nvars} {incr idx} {
1038        set newcol [list]
1039        foreach row $constraints {
1040            lappend newcol [expr {double([lindex $row $idx])}]
1041        }
1042        lappend newcol [expr {-double([lindex $lastrow $idx])}]
1043         lappend matrix $newcol
1044    }
1045
1046    #
1047    # Add the columns for the slack variables
1048    #
1049    set zeros {}
1050    for {set idx 0} {$idx <= $nconst} {incr idx} {
1051        lappend zeros 0.0
1052    }
1053    for {set idx 0} {$idx < $nconst} {incr idx} {
1054        lappend matrix [lreplace $zeros $idx $idx 1.0]
1055    }
1056
1057    return [list $nconst $nvars $matrix]
1058}
1059
1060# SimplexSolve --
1061#    Solve the given linear program using the simplex method
1062#
1063# Arguments:
1064#    nconst        Number of constraints
1065#    nvars         Number of actual variables
1066#    tableau       Standard tableau (as a list of columns)
1067#
1068# Return value:
1069#    List of values for the actual variables
1070#
1071proc ::math::optimize::SimplexSolve {nconst nvars tableau} {
1072    set end 0
1073    while { !$end } {
1074
1075        #
1076        # Find the new variable to put in the basis
1077        #
1078        set nextcol [SimplexFindNextColumn $tableau]
1079        if { $nextcol == -1 } {
1080            set end 1
1081            continue
1082        }
1083
1084        #
1085        # Now determine which one should leave
1086        # TODO: is a lack of a proper row indeed an
1087        #       indication of the infeasibility?
1088        #
1089        set nextrow [SimplexFindNextRow $tableau $nextcol]
1090        if { $nextrow == -1 } {
1091            return "unbounded"
1092        }
1093
1094        #
1095        # Make the vector for sweeping through the tableau
1096        #
1097        set vector [SimplexMakeVector $tableau $nextcol $nextrow]
1098
1099        #
1100        # Sweep through the tableau
1101        #
1102        set tableau [SimplexNewTableau $tableau $nextcol $nextrow $vector]
1103    }
1104
1105    #
1106    # Now we can return the result
1107    #
1108    SimplexResult $tableau
1109}
1110
1111# SimplexResult --
1112#    Reconstruct the result vector
1113#
1114# Arguments:
1115#    tableau       Standard tableau (as a list of columns)
1116#
1117# Return value:
1118#    Vector of values representing the maximum point
1119#
1120proc ::math::optimize::SimplexResult {tableau} {
1121    set result {}
1122
1123    set firstcol  [lindex $tableau 0]
1124    set secondcol [lindex $tableau 1]
1125    set result    {}
1126
1127    set nvars     [expr {[llength $tableau]-2}]
1128    for {set i 0} {$i < $nvars } { incr i } {
1129        lappend result 0.0
1130    }
1131
1132    set idx 0
1133    foreach col [lrange $firstcol 0 end-1] {
1134        set value [lindex $secondcol $idx]
1135        if { $value >= 0.0 } {
1136            set result [lreplace $result $col $col [lindex $secondcol $idx]]
1137            incr idx
1138        } else {
1139            # If a negative component, then the problem was not feasible
1140            return "infeasible"
1141        }
1142    }
1143
1144    return $result
1145}
1146
1147# SimplexFindNextColumn --
1148#    Find the next column - the one with the largest negative
1149#    coefficient
1150#
1151# Arguments:
1152#    tableau       Standard tableau (as a list of columns)
1153#
1154# Return value:
1155#    Index of the column
1156#
1157proc ::math::optimize::SimplexFindNextColumn {tableau} {
1158    set idx        0
1159    set minidx    -1
1160    set mincoeff   0.0
1161
1162    foreach col [lrange $tableau 2 end] {
1163        set coeff [lindex $col end]
1164        if { $coeff < 0.0 } {
1165            if { $coeff < $mincoeff } {
1166                set minidx $idx
1167               set mincoeff $coeff
1168            }
1169        }
1170        incr idx
1171    }
1172
1173    return $minidx
1174}
1175
1176# SimplexFindNextRow --
1177#    Find the next row - the one with the largest negative
1178#    coefficient
1179#
1180# Arguments:
1181#    tableau       Standard tableau (as a list of columns)
1182#    nextcol       Index of the variable that will replace this one
1183#
1184# Return value:
1185#    Index of the row
1186#
1187proc ::math::optimize::SimplexFindNextRow {tableau nextcol} {
1188    set idx        0
1189    set minidx    -1
1190    set mincoeff   {}
1191
1192    set bvalues [lrange [lindex $tableau 1] 0 end-1]
1193    set yvalues [lrange [lindex $tableau [expr {2+$nextcol}]] 0 end-1]
1194
1195    foreach rowcoeff $bvalues divcoeff $yvalues {
1196        if { $divcoeff > 0.0 } {
1197            set coeff [expr {$rowcoeff/$divcoeff}]
1198
1199            if { $mincoeff == {} || $coeff < $mincoeff } {
1200                set minidx $idx
1201                set mincoeff $coeff
1202            }
1203        }
1204        incr idx
1205    }
1206
1207    return $minidx
1208}
1209
1210# SimplexMakeVector --
1211#    Make the "sweep" vector
1212#
1213# Arguments:
1214#    tableau       Standard tableau (as a list of columns)
1215#    nextcol       Index of the variable that will replace this one
1216#    nextrow       Index of the variable in the base that will be replaced
1217#
1218# Return value:
1219#    Vector to be used to update the coefficients of the tableau
1220#
1221proc ::math::optimize::SimplexMakeVector {tableau nextcol nextrow} {
1222
1223    set idx      0
1224    set vector   {}
1225    set column   [lindex $tableau [expr {2+$nextcol}]]
1226    set divcoeff [lindex $column $nextrow]
1227
1228    foreach colcoeff $column {
1229        if { $idx != $nextrow } {
1230            set coeff [expr {-$colcoeff/$divcoeff}]
1231        } else {
1232            set coeff [expr {1.0/$divcoeff-1.0}]
1233        }
1234        lappend vector $coeff
1235        incr idx
1236    }
1237
1238    return $vector
1239}
1240
1241# SimplexNewTableau --
1242#    Sweep through the tableau and create the new one
1243#
1244# Arguments:
1245#    tableau       Standard tableau (as a list of columns)
1246#    nextcol       Index of the variable that will replace this one
1247#    nextrow       Index of the variable in the base that will be replaced
1248#    vector        Vector to sweep with
1249#
1250# Return value:
1251#    New tableau
1252#
1253proc ::math::optimize::SimplexNewTableau {tableau nextcol nextrow vector} {
1254
1255    #
1256    # The first column: replace the nextrow-th element
1257    # The second column: replace the value at the nextrow-th element
1258    # For all the others: the same receipe
1259    #
1260    set firstcol   [lreplace [lindex $tableau 0] $nextrow $nextrow $nextcol]
1261    set newtableau [list $firstcol]
1262
1263    #
1264    # The rest of the matrix
1265    #
1266    foreach column [lrange $tableau 1 end] {
1267        set yval   [lindex $column $nextrow]
1268        set newcol {}
1269        foreach c $column vcoeff $vector {
1270            set newval [expr {$c+$yval*$vcoeff}]
1271            lappend newcol $newval
1272        }
1273        lappend newtableau $newcol
1274    }
1275
1276    return $newtableau
1277}
1278
1279# Now we can announce our presence
1280package provide math::optimize 1.0.1
1281
1282if { ![info exists ::argv0] || [string compare $::argv0 [info script]] } {
1283    return
1284}
1285
1286namespace import math::optimize::min_bound_1d
1287namespace import math::optimize::maximum
1288namespace import math::optimize::nelderMead
1289
1290proc f {x y} {
1291    set xx [expr { $x - 3.1415926535897932 / 2. }]
1292    set v1 [expr { 0.3 * exp( -$xx*$xx / 2. ) }]
1293    set d [expr { 10. * $y - sin(9. * $x) }]
1294    set v2 [expr { exp(-10.*$d*$d)}]
1295    set rv [expr { -$v1 - $v2 }]
1296    return $rv
1297}
1298
1299proc g {a b} {
1300    set x1 [expr {0.1 - $a + $b}]
1301    set x2 [expr {$a + $b - 1.}]
1302    set x3 [expr {3.-8.*$a+8.*$a*$a-8.*$b+8.*$b*$b}]
1303    set x4 [expr {$a/10. + $b/10. + $x1*$x1/3. + $x2*$x2 - $x2 * exp(1-$x3*$x3)}]
1304    return $x4
1305}
1306
1307set prec $::tcl_precision
1308if {![package vsatisfies [package provide Tcl] 8.5]} {
1309    set ::tcl_precision 17
1310} else {
1311    set ::tcl_precision 0
1312}
1313
1314puts "f"
1315puts [math::optimize::nelderMead f {1. 0.} -scale {0.1 0.01} -trace 1]
1316puts "g"
1317puts [math::optimize::nelderMead g {0. 0.} -scale {1. 1.} -trace 1]
1318
1319set ::tcl_precision $prec
1320