131914882SAlex Richardson#!/usr/bin/env julia
231914882SAlex Richardson# -*- julia -*-
331914882SAlex Richardson
431914882SAlex Richardson# remez.jl - implementation of the Remez algorithm for polynomial approximation
531914882SAlex Richardson#
631914882SAlex Richardson# Copyright (c) 2015-2019, Arm Limited.
7*072a4ba8SAndrew Turner# SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
831914882SAlex Richardson
931914882SAlex Richardsonimport Base.\
1031914882SAlex Richardson
1131914882SAlex Richardson# ----------------------------------------------------------------------
1231914882SAlex Richardson# Helper functions to cope with different Julia versions.
1331914882SAlex Richardsonif VERSION >= v"0.7.0"
1431914882SAlex Richardson    array1d(T, d) = Array{T, 1}(undef, d)
1531914882SAlex Richardson    array2d(T, d1, d2) = Array{T, 2}(undef, d1, d2)
1631914882SAlex Richardsonelse
1731914882SAlex Richardson    array1d(T, d) = Array(T, d)
1831914882SAlex Richardson    array2d(T, d1, d2) = Array(T, d1, d2)
1931914882SAlex Richardsonend
2031914882SAlex Richardsonif VERSION < v"0.5.0"
2131914882SAlex Richardson    String = ASCIIString
2231914882SAlex Richardsonend
2331914882SAlex Richardsonif VERSION >= v"0.6.0"
2431914882SAlex Richardson    # Use Base.invokelatest to run functions made using eval(), to
2531914882SAlex Richardson    # avoid "world age" error
2631914882SAlex Richardson    run(f, x...) = Base.invokelatest(f, x...)
2731914882SAlex Richardsonelse
2831914882SAlex Richardson    # Prior to 0.6.0, invokelatest doesn't exist (but fortunately the
2931914882SAlex Richardson    # world age problem also doesn't seem to exist)
3031914882SAlex Richardson    run(f, x...) = f(x...)
3131914882SAlex Richardsonend
3231914882SAlex Richardson
3331914882SAlex Richardson# ----------------------------------------------------------------------
3431914882SAlex Richardson# Global variables configured by command-line options.
3531914882SAlex Richardsonfloatsuffix = "" # adjusted by --floatsuffix
3631914882SAlex Richardsonxvarname = "x" # adjusted by --variable
3731914882SAlex Richardsonepsbits = 256 # adjusted by --bits
3831914882SAlex Richardsondebug_facilities = Set() # adjusted by --debug
3931914882SAlex Richardsonfull_output = false # adjusted by --full
4031914882SAlex Richardsonarray_format = false # adjusted by --array
4131914882SAlex Richardsonpreliminary_commands = array1d(String, 0) # adjusted by --pre
4231914882SAlex Richardson
4331914882SAlex Richardson# ----------------------------------------------------------------------
4431914882SAlex Richardson# Diagnostic and utility functions.
4531914882SAlex Richardson
4631914882SAlex Richardson# Enable debugging printouts from a particular subpart of this
4731914882SAlex Richardson# program.
4831914882SAlex Richardson#
4931914882SAlex Richardson# Arguments:
5031914882SAlex Richardson#    facility   Name of the facility to debug. For a list of facility names,
5131914882SAlex Richardson#               look through the code for calls to debug().
5231914882SAlex Richardson#
5331914882SAlex Richardson# Return value is a BigFloat.
5431914882SAlex Richardsonfunction enable_debug(facility)
5531914882SAlex Richardson    push!(debug_facilities, facility)
5631914882SAlex Richardsonend
5731914882SAlex Richardson
5831914882SAlex Richardson# Print a diagnostic.
5931914882SAlex Richardson#
6031914882SAlex Richardson# Arguments:
6131914882SAlex Richardson#    facility   Name of the facility for which this is a debug message.
6231914882SAlex Richardson#    printargs  Arguments to println() if debugging of that facility is
6331914882SAlex Richardson#               enabled.
6431914882SAlex Richardsonmacro debug(facility, printargs...)
6531914882SAlex Richardson    printit = quote
6631914882SAlex Richardson        print("[", $facility, "] ")
6731914882SAlex Richardson    end
6831914882SAlex Richardson    for arg in printargs
6931914882SAlex Richardson        printit = quote
7031914882SAlex Richardson            $printit
7131914882SAlex Richardson            print($(esc(arg)))
7231914882SAlex Richardson        end
7331914882SAlex Richardson    end
7431914882SAlex Richardson    return quote
7531914882SAlex Richardson        if $facility in debug_facilities
7631914882SAlex Richardson            $printit
7731914882SAlex Richardson            println()
7831914882SAlex Richardson        end
7931914882SAlex Richardson    end
8031914882SAlex Richardsonend
8131914882SAlex Richardson
8231914882SAlex Richardson# Evaluate a polynomial.
8331914882SAlex Richardson
8431914882SAlex Richardson# Arguments:
8531914882SAlex Richardson#    coeffs   Array of BigFloats giving the coefficients of the polynomial.
8631914882SAlex Richardson#             Starts with the constant term, i.e. coeffs[i] is the
8731914882SAlex Richardson#             coefficient of x^(i-1) (because Julia arrays are 1-based).
8831914882SAlex Richardson#    x        Point at which to evaluate the polynomial.
8931914882SAlex Richardson#
9031914882SAlex Richardson# Return value is a BigFloat.
9131914882SAlex Richardsonfunction poly_eval(coeffs::Array{BigFloat}, x::BigFloat)
9231914882SAlex Richardson    n = length(coeffs)
9331914882SAlex Richardson    if n == 0
9431914882SAlex Richardson        return BigFloat(0)
9531914882SAlex Richardson    elseif n == 1
9631914882SAlex Richardson        return coeffs[1]
9731914882SAlex Richardson    else
9831914882SAlex Richardson        return coeffs[1] + x * poly_eval(coeffs[2:n], x)
9931914882SAlex Richardson    end
10031914882SAlex Richardsonend
10131914882SAlex Richardson
10231914882SAlex Richardson# Evaluate a rational function.
10331914882SAlex Richardson
10431914882SAlex Richardson# Arguments:
10531914882SAlex Richardson#    ncoeffs  Array of BigFloats giving the coefficients of the numerator.
10631914882SAlex Richardson#             Starts with the constant term, and 1-based, as above.
10731914882SAlex Richardson#    dcoeffs  Array of BigFloats giving the coefficients of the denominator.
10831914882SAlex Richardson#             Starts with the constant term, and 1-based, as above.
10931914882SAlex Richardson#    x        Point at which to evaluate the function.
11031914882SAlex Richardson#
11131914882SAlex Richardson# Return value is a BigFloat.
11231914882SAlex Richardsonfunction ratfn_eval(ncoeffs::Array{BigFloat}, dcoeffs::Array{BigFloat},
11331914882SAlex Richardson                    x::BigFloat)
11431914882SAlex Richardson    return poly_eval(ncoeffs, x) / poly_eval(dcoeffs, x)
11531914882SAlex Richardsonend
11631914882SAlex Richardson
11731914882SAlex Richardson# Format a BigFloat into an appropriate output format.
11831914882SAlex Richardson# Arguments:
11931914882SAlex Richardson#    x        BigFloat to format.
12031914882SAlex Richardson#
12131914882SAlex Richardson# Return value is a string.
12231914882SAlex Richardsonfunction float_to_str(x)
12331914882SAlex Richardson    return string(x) * floatsuffix
12431914882SAlex Richardsonend
12531914882SAlex Richardson
12631914882SAlex Richardson# Format a polynomial into an arithmetic expression, for pasting into
12731914882SAlex Richardson# other tools such as gnuplot.
12831914882SAlex Richardson
12931914882SAlex Richardson# Arguments:
13031914882SAlex Richardson#    coeffs   Array of BigFloats giving the coefficients of the polynomial.
13131914882SAlex Richardson#             Starts with the constant term, and 1-based, as above.
13231914882SAlex Richardson#
13331914882SAlex Richardson# Return value is a string.
13431914882SAlex Richardsonfunction poly_to_string(coeffs::Array{BigFloat})
13531914882SAlex Richardson    n = length(coeffs)
13631914882SAlex Richardson    if n == 0
13731914882SAlex Richardson        return "0"
13831914882SAlex Richardson    elseif n == 1
13931914882SAlex Richardson        return float_to_str(coeffs[1])
14031914882SAlex Richardson    else
14131914882SAlex Richardson        return string(float_to_str(coeffs[1]), "+", xvarname, "*(",
14231914882SAlex Richardson                      poly_to_string(coeffs[2:n]), ")")
14331914882SAlex Richardson    end
14431914882SAlex Richardsonend
14531914882SAlex Richardson
14631914882SAlex Richardson# Format a rational function into a string.
14731914882SAlex Richardson
14831914882SAlex Richardson# Arguments:
14931914882SAlex Richardson#    ncoeffs  Array of BigFloats giving the coefficients of the numerator.
15031914882SAlex Richardson#             Starts with the constant term, and 1-based, as above.
15131914882SAlex Richardson#    dcoeffs  Array of BigFloats giving the coefficients of the denominator.
15231914882SAlex Richardson#             Starts with the constant term, and 1-based, as above.
15331914882SAlex Richardson#
15431914882SAlex Richardson# Return value is a string.
15531914882SAlex Richardsonfunction ratfn_to_string(ncoeffs::Array{BigFloat}, dcoeffs::Array{BigFloat})
15631914882SAlex Richardson    if length(dcoeffs) == 1 && dcoeffs[1] == 1
15731914882SAlex Richardson        # Special case: if the denominator is just 1, leave it out.
15831914882SAlex Richardson        return poly_to_string(ncoeffs)
15931914882SAlex Richardson    else
16031914882SAlex Richardson        return string("(", poly_to_string(ncoeffs), ")/(",
16131914882SAlex Richardson                      poly_to_string(dcoeffs), ")")
16231914882SAlex Richardson    end
16331914882SAlex Richardsonend
16431914882SAlex Richardson
16531914882SAlex Richardson# Format a list of x,y pairs into a string.
16631914882SAlex Richardson
16731914882SAlex Richardson# Arguments:
16831914882SAlex Richardson#    xys      Array of (x,y) pairs of BigFloats.
16931914882SAlex Richardson#
17031914882SAlex Richardson# Return value is a string.
17131914882SAlex Richardsonfunction format_xylist(xys::Array{Tuple{BigFloat,BigFloat}})
17231914882SAlex Richardson    return ("[\n" *
17331914882SAlex Richardson            join(["  "*string(x)*" -> "*string(y) for (x,y) in xys], "\n") *
17431914882SAlex Richardson            "\n]")
17531914882SAlex Richardsonend
17631914882SAlex Richardson
17731914882SAlex Richardson# ----------------------------------------------------------------------
17831914882SAlex Richardson# Matrix-equation solver for matrices of BigFloat.
17931914882SAlex Richardson#
18031914882SAlex Richardson# I had hoped that Julia's type-genericity would allow me to solve the
18131914882SAlex Richardson# matrix equation Mx=V by just writing 'M \ V'. Unfortunately, that
18231914882SAlex Richardson# works by translating the inputs into double precision and handing
18331914882SAlex Richardson# off to an optimised library, which misses the point when I have a
18431914882SAlex Richardson# matrix and vector of BigFloat and want my result in _better_ than
18531914882SAlex Richardson# double precision. So I have to implement my own specialisation of
18631914882SAlex Richardson# the \ operator for that case.
18731914882SAlex Richardson#
18831914882SAlex Richardson# Fortunately, the point of using BigFloats is that we have precision
18931914882SAlex Richardson# to burn, so I can do completely naïve Gaussian elimination without
19031914882SAlex Richardson# worrying about instability.
19131914882SAlex Richardson
19231914882SAlex Richardson# Arguments:
19331914882SAlex Richardson#    matrix_in    2-dimensional array of BigFloats, representing a matrix M
19431914882SAlex Richardson#                 in row-first order, i.e. matrix_in[r,c] represents the
19531914882SAlex Richardson#                 entry in row r col c.
19631914882SAlex Richardson#    vector_in    1-dimensional array of BigFloats, representing a vector V.
19731914882SAlex Richardson#
19831914882SAlex Richardson# Return value: a 1-dimensional array X of BigFloats, satisfying M X = V.
19931914882SAlex Richardson#
20031914882SAlex Richardson# Expects the input to be an invertible square matrix and a vector of
20131914882SAlex Richardson# the corresponding size, on pain of failing an assertion.
20231914882SAlex Richardsonfunction \(matrix_in :: Array{BigFloat,2},
20331914882SAlex Richardson           vector_in :: Array{BigFloat,1})
20431914882SAlex Richardson    # Copy the inputs, because we'll be mutating them as we go.
20531914882SAlex Richardson    M = copy(matrix_in)
20631914882SAlex Richardson    V = copy(vector_in)
20731914882SAlex Richardson
20831914882SAlex Richardson    # Input consistency criteria: matrix is square, and vector has
20931914882SAlex Richardson    # length to match.
21031914882SAlex Richardson    n = length(V)
21131914882SAlex Richardson    @assert(n > 0)
21231914882SAlex Richardson    @assert(size(M) == (n,n))
21331914882SAlex Richardson
21431914882SAlex Richardson    @debug("gausselim", "starting, n=", n)
21531914882SAlex Richardson
21631914882SAlex Richardson    for i = 1:1:n
21731914882SAlex Richardson        # Straightforward Gaussian elimination: find the largest
21831914882SAlex Richardson        # non-zero entry in column i (and in a row we haven't sorted
21931914882SAlex Richardson        # out already), swap it into row i, scale that row to
22031914882SAlex Richardson        # normalise it to 1, then zero out the rest of the column by
22131914882SAlex Richardson        # subtracting a multiple of that row from each other row.
22231914882SAlex Richardson
22331914882SAlex Richardson        @debug("gausselim", "matrix=", repr(M))
22431914882SAlex Richardson        @debug("gausselim", "vector=", repr(V))
22531914882SAlex Richardson
22631914882SAlex Richardson        # Find the best pivot.
22731914882SAlex Richardson        bestrow = 0
22831914882SAlex Richardson        bestval = 0
22931914882SAlex Richardson        for j = i:1:n
23031914882SAlex Richardson            if abs(M[j,i]) > bestval
23131914882SAlex Richardson                bestrow = j
23231914882SAlex Richardson                bestval = M[j,i]
23331914882SAlex Richardson            end
23431914882SAlex Richardson        end
23531914882SAlex Richardson        @assert(bestrow > 0) # make sure we did actually find one
23631914882SAlex Richardson
23731914882SAlex Richardson        @debug("gausselim", "bestrow=", bestrow)
23831914882SAlex Richardson
23931914882SAlex Richardson        # Swap it into row i.
24031914882SAlex Richardson        if bestrow != i
24131914882SAlex Richardson            for k = 1:1:n
24231914882SAlex Richardson                M[bestrow,k],M[i,k] = M[i,k],M[bestrow,k]
24331914882SAlex Richardson            end
24431914882SAlex Richardson            V[bestrow],V[i] = V[i],V[bestrow]
24531914882SAlex Richardson        end
24631914882SAlex Richardson
24731914882SAlex Richardson        # Scale that row so that M[i,i] becomes 1.
24831914882SAlex Richardson        divisor = M[i,i]
24931914882SAlex Richardson        for k = 1:1:n
25031914882SAlex Richardson            M[i,k] = M[i,k] / divisor
25131914882SAlex Richardson        end
25231914882SAlex Richardson        V[i] = V[i] / divisor
25331914882SAlex Richardson        @assert(M[i,i] == 1)
25431914882SAlex Richardson
25531914882SAlex Richardson        # Zero out all other entries in column i, by subtracting
25631914882SAlex Richardson        # multiples of this row.
25731914882SAlex Richardson        for j = 1:1:n
25831914882SAlex Richardson            if j != i
25931914882SAlex Richardson                factor = M[j,i]
26031914882SAlex Richardson                for k = 1:1:n
26131914882SAlex Richardson                    M[j,k] = M[j,k] - M[i,k] * factor
26231914882SAlex Richardson                end
26331914882SAlex Richardson                V[j] = V[j] - V[i] * factor
26431914882SAlex Richardson                @assert(M[j,i] == 0)
26531914882SAlex Richardson            end
26631914882SAlex Richardson        end
26731914882SAlex Richardson    end
26831914882SAlex Richardson
26931914882SAlex Richardson    @debug("gausselim", "matrix=", repr(M))
27031914882SAlex Richardson    @debug("gausselim", "vector=", repr(V))
27131914882SAlex Richardson    @debug("gausselim", "done!")
27231914882SAlex Richardson
27331914882SAlex Richardson    # Now we're done: M is the identity matrix, so the equation Mx=V
27431914882SAlex Richardson    # becomes just x=V, i.e. V is already exactly the vector we want
27531914882SAlex Richardson    # to return.
27631914882SAlex Richardson    return V
27731914882SAlex Richardsonend
27831914882SAlex Richardson
27931914882SAlex Richardson# ----------------------------------------------------------------------
28031914882SAlex Richardson# Least-squares fitting of a rational function to a set of (x,y)
28131914882SAlex Richardson# points.
28231914882SAlex Richardson#
28331914882SAlex Richardson# We use this to get an initial starting point for the Remez
28431914882SAlex Richardson# iteration. Therefore, it doesn't really need to be particularly
28531914882SAlex Richardson# accurate; it only needs to be good enough to wiggle back and forth
28631914882SAlex Richardson# across the target function the right number of times (so as to give
28731914882SAlex Richardson# enough error extrema to start optimising from) and not have any
28831914882SAlex Richardson# poles in the target interval.
28931914882SAlex Richardson#
29031914882SAlex Richardson# Least-squares fitting of a _polynomial_ is actually a sensible thing
29131914882SAlex Richardson# to do, and minimises the rms error. Doing the following trick with a
29231914882SAlex Richardson# rational function P/Q is less sensible, because it cannot be made to
29331914882SAlex Richardson# minimise the error function (P/Q-f)^2 that you actually wanted;
29431914882SAlex Richardson# instead it minimises (P-fQ)^2. But that should be good enough to
29531914882SAlex Richardson# have the properties described above.
29631914882SAlex Richardson#
29731914882SAlex Richardson# Some theory: suppose you're trying to choose a set of parameters a_i
29831914882SAlex Richardson# so as to minimise the sum of squares of some error function E_i.
29931914882SAlex Richardson# Basic calculus says, if you do this in one variable, just
30031914882SAlex Richardson# differentiate and solve for zero. In this case, that works fine even
30131914882SAlex Richardson# with multiple variables, because you _partially_ differentiate with
30231914882SAlex Richardson# respect to each a_i, giving a system of equations, and that system
30331914882SAlex Richardson# turns out to be linear so we just solve it as a matrix.
30431914882SAlex Richardson#
30531914882SAlex Richardson# In this case, our parameters are the coefficients of P and Q; to
30631914882SAlex Richardson# avoid underdetermining the system we'll fix Q's constant term at 1,
30731914882SAlex Richardson# so that our error function (as described above) is
30831914882SAlex Richardson#
30931914882SAlex Richardson# E = \sum (p_0 + p_1 x + ... + p_n x^n - y - y q_1 x - ... - y q_d x^d)^2
31031914882SAlex Richardson#
31131914882SAlex Richardson# where the sum is over all (x,y) coordinate pairs. Setting dE/dp_j=0
31231914882SAlex Richardson# (for each j) gives an equation of the form
31331914882SAlex Richardson#
31431914882SAlex Richardson# 0 = \sum 2(p_0 + p_1 x + ... + p_n x^n - y - y q_1 x - ... - y q_d x^d) x^j
31531914882SAlex Richardson#
31631914882SAlex Richardson# and setting dE/dq_j=0 gives one of the form
31731914882SAlex Richardson#
31831914882SAlex Richardson# 0 = \sum 2(p_0 + p_1 x + ... + p_n x^n - y - y q_1 x - ... - y q_d x^d) y x^j
31931914882SAlex Richardson#
32031914882SAlex Richardson# And both of those row types, treated as multivariate linear
32131914882SAlex Richardson# equations in the p,q values, have each coefficient being a value of
32231914882SAlex Richardson# the form \sum x^i, \sum y x^i or \sum y^2 x^i, for various i. (Times
32331914882SAlex Richardson# a factor of 2, but we can throw that away.) So we can go through the
32431914882SAlex Richardson# list of input coordinates summing all of those things, and then we
32531914882SAlex Richardson# have enough information to construct our matrix and solve it
32631914882SAlex Richardson# straight off for the rational function coefficients.
32731914882SAlex Richardson
32831914882SAlex Richardson# Arguments:
32931914882SAlex Richardson#    f        The function to be approximated. Maps BigFloat -> BigFloat.
33031914882SAlex Richardson#    xvals    Array of BigFloats, giving the list of x-coordinates at which
33131914882SAlex Richardson#             to evaluate f.
33231914882SAlex Richardson#    n        Degree of the numerator polynomial of the desired rational
33331914882SAlex Richardson#             function.
33431914882SAlex Richardson#    d        Degree of the denominator polynomial of the desired rational
33531914882SAlex Richardson#             function.
33631914882SAlex Richardson#    w        Error-weighting function. Takes two BigFloat arguments x,y
33731914882SAlex Richardson#             and returns a scaling factor for the error at that location.
33831914882SAlex Richardson#             A larger value indicates that the error should be given
33931914882SAlex Richardson#             greater weight in the square sum we try to minimise.
34031914882SAlex Richardson#             If unspecified, defaults to giving everything the same weight.
34131914882SAlex Richardson#
34231914882SAlex Richardson# Return values: a pair of arrays of BigFloats (N,D) giving the
34331914882SAlex Richardson# coefficients of the returned rational function. N has size n+1; D
34431914882SAlex Richardson# has size d+1. Both start with the constant term, i.e. N[i] is the
34531914882SAlex Richardson# coefficient of x^(i-1) (because Julia arrays are 1-based). D[1] will
34631914882SAlex Richardson# be 1.
34731914882SAlex Richardsonfunction ratfn_leastsquares(f::Function, xvals::Array{BigFloat}, n, d,
34831914882SAlex Richardson                            w = (x,y)->BigFloat(1))
34931914882SAlex Richardson    # Accumulate sums of x^i y^j, for j={0,1,2} and a range of x.
35031914882SAlex Richardson    # Again because Julia arrays are 1-based, we'll have sums[i,j]
35131914882SAlex Richardson    # being the sum of x^(i-1) y^(j-1).
35231914882SAlex Richardson    maxpow = max(n,d) * 2 + 1
35331914882SAlex Richardson    sums = zeros(BigFloat, maxpow, 3)
35431914882SAlex Richardson    for x = xvals
35531914882SAlex Richardson        y = f(x)
35631914882SAlex Richardson        weight = w(x,y)
35731914882SAlex Richardson        for i = 1:1:maxpow
35831914882SAlex Richardson            for j = 1:1:3
35931914882SAlex Richardson                sums[i,j] += x^(i-1) * y^(j-1) * weight
36031914882SAlex Richardson            end
36131914882SAlex Richardson        end
36231914882SAlex Richardson    end
36331914882SAlex Richardson
36431914882SAlex Richardson    @debug("leastsquares", "sums=", repr(sums))
36531914882SAlex Richardson
36631914882SAlex Richardson    # Build the matrix. We're solving n+d+1 equations in n+d+1
36731914882SAlex Richardson    # unknowns. (We actually have to return n+d+2 coefficients, but
36831914882SAlex Richardson    # one of them is hardwired to 1.)
36931914882SAlex Richardson    matrix = array2d(BigFloat, n+d+1, n+d+1)
37031914882SAlex Richardson    vector = array1d(BigFloat, n+d+1)
37131914882SAlex Richardson    for i = 0:1:n
37231914882SAlex Richardson        # Equation obtained by differentiating with respect to p_i,
37331914882SAlex Richardson        # i.e. the numerator coefficient of x^i.
37431914882SAlex Richardson        row = 1+i
37531914882SAlex Richardson        for j = 0:1:n
37631914882SAlex Richardson            matrix[row, 1+j] = sums[1+i+j, 1]
37731914882SAlex Richardson        end
37831914882SAlex Richardson        for j = 1:1:d
37931914882SAlex Richardson            matrix[row, 1+n+j] = -sums[1+i+j, 2]
38031914882SAlex Richardson        end
38131914882SAlex Richardson        vector[row] = sums[1+i, 2]
38231914882SAlex Richardson    end
38331914882SAlex Richardson    for i = 1:1:d
38431914882SAlex Richardson        # Equation obtained by differentiating with respect to q_i,
38531914882SAlex Richardson        # i.e. the denominator coefficient of x^i.
38631914882SAlex Richardson        row = 1+n+i
38731914882SAlex Richardson        for j = 0:1:n
38831914882SAlex Richardson            matrix[row, 1+j] = sums[1+i+j, 2]
38931914882SAlex Richardson        end
39031914882SAlex Richardson        for j = 1:1:d
39131914882SAlex Richardson            matrix[row, 1+n+j] = -sums[1+i+j, 3]
39231914882SAlex Richardson        end
39331914882SAlex Richardson        vector[row] = sums[1+i, 3]
39431914882SAlex Richardson    end
39531914882SAlex Richardson
39631914882SAlex Richardson    @debug("leastsquares", "matrix=", repr(matrix))
39731914882SAlex Richardson    @debug("leastsquares", "vector=", repr(vector))
39831914882SAlex Richardson
39931914882SAlex Richardson    # Solve the matrix equation.
40031914882SAlex Richardson    all_coeffs = matrix \ vector
40131914882SAlex Richardson
40231914882SAlex Richardson    @debug("leastsquares", "all_coeffs=", repr(all_coeffs))
40331914882SAlex Richardson
40431914882SAlex Richardson    # And marshal the results into two separate polynomial vectors to
40531914882SAlex Richardson    # return.
40631914882SAlex Richardson    ncoeffs = all_coeffs[1:n+1]
40731914882SAlex Richardson    dcoeffs = vcat([1], all_coeffs[n+2:n+d+1])
40831914882SAlex Richardson    return (ncoeffs, dcoeffs)
40931914882SAlex Richardsonend
41031914882SAlex Richardson
41131914882SAlex Richardson# ----------------------------------------------------------------------
41231914882SAlex Richardson# Golden-section search to find a maximum of a function.
41331914882SAlex Richardson
41431914882SAlex Richardson# Arguments:
41531914882SAlex Richardson#    f        Function to be maximised/minimised. Maps BigFloat -> BigFloat.
41631914882SAlex Richardson#    a,b,c    BigFloats bracketing a maximum of the function.
41731914882SAlex Richardson#
41831914882SAlex Richardson# Expects:
41931914882SAlex Richardson#    a,b,c are in order (either a<=b<=c or c<=b<=a)
42031914882SAlex Richardson#    a != c             (but b can equal one or the other if it wants to)
42131914882SAlex Richardson#    f(a) <= f(b) >= f(c)
42231914882SAlex Richardson#
42331914882SAlex Richardson# Return value is an (x,y) pair of BigFloats giving the extremal input
42431914882SAlex Richardson# and output. (That is, y=f(x).)
42531914882SAlex Richardsonfunction goldensection(f::Function, a::BigFloat, b::BigFloat, c::BigFloat)
42631914882SAlex Richardson    # Decide on a 'good enough' threshold.
42731914882SAlex Richardson    threshold = abs(c-a) * 2^(-epsbits/2)
42831914882SAlex Richardson
42931914882SAlex Richardson    # We'll need the golden ratio phi, of course. Or rather, in this
43031914882SAlex Richardson    # case, we need 1/phi = 0.618...
43131914882SAlex Richardson    one_over_phi = 2 / (1 + sqrt(BigFloat(5)))
43231914882SAlex Richardson
43331914882SAlex Richardson    # Flip round the interval endpoints so that the interval [a,b] is
43431914882SAlex Richardson    # at least as large as [b,c]. (Then we can always pick our new
43531914882SAlex Richardson    # point in [a,b] without having to handle lots of special cases.)
43631914882SAlex Richardson    if abs(b-a) < abs(c-a)
43731914882SAlex Richardson        a,  c  = c,  a
43831914882SAlex Richardson    end
43931914882SAlex Richardson
44031914882SAlex Richardson    # Evaluate the function at the initial points.
44131914882SAlex Richardson    fa = f(a)
44231914882SAlex Richardson    fb = f(b)
44331914882SAlex Richardson    fc = f(c)
44431914882SAlex Richardson
44531914882SAlex Richardson    @debug("goldensection", "starting")
44631914882SAlex Richardson
44731914882SAlex Richardson    while abs(c-a) > threshold
44831914882SAlex Richardson        @debug("goldensection", "a: ", a, " -> ", fa)
44931914882SAlex Richardson        @debug("goldensection", "b: ", b, " -> ", fb)
45031914882SAlex Richardson        @debug("goldensection", "c: ", c, " -> ", fc)
45131914882SAlex Richardson
45231914882SAlex Richardson        # Check invariants.
45331914882SAlex Richardson        @assert(a <= b <= c || c <= b <= a)
45431914882SAlex Richardson        @assert(fa <= fb >= fc)
45531914882SAlex Richardson
45631914882SAlex Richardson        # Subdivide the larger of the intervals [a,b] and [b,c]. We've
45731914882SAlex Richardson        # arranged that this is always [a,b], for simplicity.
45831914882SAlex Richardson        d = a + (b-a) * one_over_phi
45931914882SAlex Richardson
46031914882SAlex Richardson        # Now we have an interval looking like this (possibly
46131914882SAlex Richardson        # reversed):
46231914882SAlex Richardson        #
46331914882SAlex Richardson        #    a            d       b            c
46431914882SAlex Richardson        #
46531914882SAlex Richardson        # and we know f(b) is bigger than either f(a) or f(c). We have
46631914882SAlex Richardson        # two cases: either f(d) > f(b), or vice versa. In either
46731914882SAlex Richardson        # case, we can narrow to an interval of 1/phi the size, and
46831914882SAlex Richardson        # still satisfy all our invariants (three ordered points,
46931914882SAlex Richardson        # [a,b] at least the width of [b,c], f(a)<=f(b)>=f(c)).
47031914882SAlex Richardson        fd = f(d)
47131914882SAlex Richardson        @debug("goldensection", "d: ", d, " -> ", fd)
47231914882SAlex Richardson        if fd > fb
47331914882SAlex Richardson            a,  b,  c  = a,  d,  b
47431914882SAlex Richardson            fa, fb, fc = fa, fd, fb
47531914882SAlex Richardson            @debug("goldensection", "adb case")
47631914882SAlex Richardson        else
47731914882SAlex Richardson            a,  b,  c  = c,  b,  d
47831914882SAlex Richardson            fa, fb, fc = fc, fb, fd
47931914882SAlex Richardson            @debug("goldensection", "cbd case")
48031914882SAlex Richardson        end
48131914882SAlex Richardson    end
48231914882SAlex Richardson
48331914882SAlex Richardson    @debug("goldensection", "done: ", b, " -> ", fb)
48431914882SAlex Richardson    return (b, fb)
48531914882SAlex Richardsonend
48631914882SAlex Richardson
48731914882SAlex Richardson# ----------------------------------------------------------------------
48831914882SAlex Richardson# Find the extrema of a function within a given interval.
48931914882SAlex Richardson
49031914882SAlex Richardson# Arguments:
49131914882SAlex Richardson#    f         The function to be approximated. Maps BigFloat -> BigFloat.
49231914882SAlex Richardson#    grid      A set of points at which to evaluate f. Must be high enough
49331914882SAlex Richardson#              resolution to make extrema obvious.
49431914882SAlex Richardson#
49531914882SAlex Richardson# Returns an array of (x,y) pairs of BigFloats, with each x,y giving
49631914882SAlex Richardson# the extremum location and its value (i.e. y=f(x)).
49731914882SAlex Richardsonfunction find_extrema(f::Function, grid::Array{BigFloat})
49831914882SAlex Richardson    len = length(grid)
49931914882SAlex Richardson    extrema = array1d(Tuple{BigFloat, BigFloat}, 0)
50031914882SAlex Richardson    for i = 1:1:len
50131914882SAlex Richardson        # We have to provide goldensection() with three points
50231914882SAlex Richardson        # bracketing the extremum. If the extremum is at one end of
50331914882SAlex Richardson        # the interval, then the only way we can do that is to set two
50431914882SAlex Richardson        # of the points equal (which goldensection() will cope with).
50531914882SAlex Richardson        prev = max(1, i-1)
50631914882SAlex Richardson        next = min(i+1, len)
50731914882SAlex Richardson
50831914882SAlex Richardson        # Find our three pairs of (x,y) coordinates.
50931914882SAlex Richardson        xp, xi, xn = grid[prev], grid[i], grid[next]
51031914882SAlex Richardson        yp, yi, yn = f(xp), f(xi), f(xn)
51131914882SAlex Richardson
51231914882SAlex Richardson        # See if they look like an extremum, and if so, ask
51331914882SAlex Richardson        # goldensection() to give a more exact location for it.
51431914882SAlex Richardson        if yp <= yi >= yn
51531914882SAlex Richardson            push!(extrema, goldensection(f, xp, xi, xn))
51631914882SAlex Richardson        elseif yp >= yi <= yn
51731914882SAlex Richardson            x, y = goldensection(x->-f(x), xp, xi, xn)
51831914882SAlex Richardson            push!(extrema, (x, -y))
51931914882SAlex Richardson        end
52031914882SAlex Richardson    end
52131914882SAlex Richardson    return extrema
52231914882SAlex Richardsonend
52331914882SAlex Richardson
52431914882SAlex Richardson# ----------------------------------------------------------------------
52531914882SAlex Richardson# Winnow a list of a function's extrema to give a subsequence of a
52631914882SAlex Richardson# specified length, with the extrema in the subsequence alternating
52731914882SAlex Richardson# signs, and with the smallest absolute value of an extremum in the
52831914882SAlex Richardson# subsequence as large as possible.
52931914882SAlex Richardson#
53031914882SAlex Richardson# We do this using a dynamic-programming approach. We work along the
53131914882SAlex Richardson# provided array of extrema, and at all times, we track the best set
53231914882SAlex Richardson# of extrema we have so far seen for each possible (length, sign of
53331914882SAlex Richardson# last extremum) pair. Each new extremum is evaluated to see whether
53431914882SAlex Richardson# it can be added to any previously seen best subsequence to make a
53531914882SAlex Richardson# new subsequence that beats the previous record holder in its slot.
53631914882SAlex Richardson
53731914882SAlex Richardson# Arguments:
53831914882SAlex Richardson#    extrema   An array of (x,y) pairs of BigFloats giving the input extrema.
53931914882SAlex Richardson#    n         Number of extrema required as output.
54031914882SAlex Richardson#
54131914882SAlex Richardson# Returns a new array of (x,y) pairs which is a subsequence of the
54231914882SAlex Richardson# original sequence. (So, in particular, if the input was sorted by x
54331914882SAlex Richardson# then so will the output be.)
54431914882SAlex Richardsonfunction winnow_extrema(extrema::Array{Tuple{BigFloat,BigFloat}}, n)
54531914882SAlex Richardson    # best[i,j] gives the best sequence so far of length i and with
54631914882SAlex Richardson    # sign j (where signs are coded as 1=positive, 2=negative), in the
54731914882SAlex Richardson    # form of a tuple (cost, actual array of x,y pairs).
54831914882SAlex Richardson    best = fill((BigFloat(0), array1d(Tuple{BigFloat,BigFloat}, 0)), n, 2)
54931914882SAlex Richardson
55031914882SAlex Richardson    for (x,y) = extrema
55131914882SAlex Richardson        if y > 0
55231914882SAlex Richardson            sign = 1
55331914882SAlex Richardson        elseif y < 0
55431914882SAlex Richardson            sign = 2
55531914882SAlex Richardson        else
55631914882SAlex Richardson            # A zero-valued extremum cannot possibly contribute to any
55731914882SAlex Richardson            # optimal sequence, so we simply ignore it!
55831914882SAlex Richardson            continue
55931914882SAlex Richardson        end
56031914882SAlex Richardson
56131914882SAlex Richardson        for i = 1:1:n
56231914882SAlex Richardson            # See if we can create a new entry for best[i,sign] by
56331914882SAlex Richardson            # appending our current (x,y) to some previous thing.
56431914882SAlex Richardson            if i == 1
56531914882SAlex Richardson                # Special case: we don't store a best zero-length
56631914882SAlex Richardson                # sequence :-)
56731914882SAlex Richardson                candidate = (abs(y), [(x,y)])
56831914882SAlex Richardson            else
56931914882SAlex Richardson                othersign = 3-sign # map 1->2 and 2->1
57031914882SAlex Richardson                oldscore, oldlist = best[i-1, othersign]
57131914882SAlex Richardson                newscore = min(abs(y), oldscore)
57231914882SAlex Richardson                newlist = vcat(oldlist, [(x,y)])
57331914882SAlex Richardson                candidate = (newscore, newlist)
57431914882SAlex Richardson            end
57531914882SAlex Richardson            # If our new candidate improves on the previous value of
57631914882SAlex Richardson            # best[i,sign], then replace it.
57731914882SAlex Richardson            if candidate[1] > best[i,sign][1]
57831914882SAlex Richardson                best[i,sign] = candidate
57931914882SAlex Richardson            end
58031914882SAlex Richardson        end
58131914882SAlex Richardson    end
58231914882SAlex Richardson
58331914882SAlex Richardson    # Our ultimate return value has to be either best[n,1] or
58431914882SAlex Richardson    # best[n,2], but it could be either. See which one has the higher
58531914882SAlex Richardson    # score.
58631914882SAlex Richardson    if best[n,1][1] > best[n,2][1]
58731914882SAlex Richardson        ret = best[n,1][2]
58831914882SAlex Richardson    else
58931914882SAlex Richardson        ret = best[n,2][2]
59031914882SAlex Richardson    end
59131914882SAlex Richardson    # Make sure we did actually _find_ a good answer.
59231914882SAlex Richardson    @assert(length(ret) == n)
59331914882SAlex Richardson    return ret
59431914882SAlex Richardsonend
59531914882SAlex Richardson
59631914882SAlex Richardson# ----------------------------------------------------------------------
59731914882SAlex Richardson# Construct a rational-function approximation with equal and
59831914882SAlex Richardson# alternating weighted deviation at a specific set of x-coordinates.
59931914882SAlex Richardson
60031914882SAlex Richardson# Arguments:
60131914882SAlex Richardson#    f         The function to be approximated. Maps BigFloat -> BigFloat.
60231914882SAlex Richardson#    coords    An array of BigFloats giving the x-coordinates. There should
60331914882SAlex Richardson#              be n+d+2 of them.
60431914882SAlex Richardson#    n, d      The degrees of the numerator and denominator of the desired
60531914882SAlex Richardson#              approximation.
60631914882SAlex Richardson#    prev_err  A plausible value for the alternating weighted deviation.
60731914882SAlex Richardson#              (Required to kickstart a binary search in the nonlinear case;
60831914882SAlex Richardson#              see comments below.)
60931914882SAlex Richardson#    w         Error-weighting function. Takes two BigFloat arguments x,y
61031914882SAlex Richardson#              and returns a scaling factor for the error at that location.
61131914882SAlex Richardson#              The returned approximation R should have the minimum possible
61231914882SAlex Richardson#              maximum value of abs((f(x)-R(x)) * w(x,f(x))). Optional
61331914882SAlex Richardson#              parameter, defaulting to the always-return-1 function.
61431914882SAlex Richardson#
61531914882SAlex Richardson# Return values: a pair of arrays of BigFloats (N,D) giving the
61631914882SAlex Richardson# coefficients of the returned rational function. N has size n+1; D
61731914882SAlex Richardson# has size d+1. Both start with the constant term, i.e. N[i] is the
61831914882SAlex Richardson# coefficient of x^(i-1) (because Julia arrays are 1-based). D[1] will
61931914882SAlex Richardson# be 1.
62031914882SAlex Richardsonfunction ratfn_equal_deviation(f::Function, coords::Array{BigFloat},
62131914882SAlex Richardson                               n, d, prev_err::BigFloat,
62231914882SAlex Richardson                               w = (x,y)->BigFloat(1))
62331914882SAlex Richardson    @debug("equaldev", "n=", n, " d=", d, " coords=", repr(coords))
62431914882SAlex Richardson    @assert(length(coords) == n+d+2)
62531914882SAlex Richardson
62631914882SAlex Richardson    if d == 0
62731914882SAlex Richardson        # Special case: we're after a polynomial. In this case, we
62831914882SAlex Richardson        # have the particularly easy job of just constructing and
62931914882SAlex Richardson        # solving a system of n+2 linear equations, to find the n+1
63031914882SAlex Richardson        # coefficients of the polynomial and also the amount of
63131914882SAlex Richardson        # deviation at the specified coordinates. Each equation is of
63231914882SAlex Richardson        # the form
63331914882SAlex Richardson        #
63431914882SAlex Richardson        #   p_0 x^0 + p_1 x^1 + ... + p_n x^n ± e/w(x) = f(x)
63531914882SAlex Richardson        #
63631914882SAlex Richardson        # in which the p_i and e are the variables, and the powers of
63731914882SAlex Richardson        # x and calls to w and f are the coefficients.
63831914882SAlex Richardson
63931914882SAlex Richardson        matrix = array2d(BigFloat, n+2, n+2)
64031914882SAlex Richardson        vector = array1d(BigFloat, n+2)
64131914882SAlex Richardson        currsign = +1
64231914882SAlex Richardson        for i = 1:1:n+2
64331914882SAlex Richardson            x = coords[i]
64431914882SAlex Richardson            for j = 0:1:n
64531914882SAlex Richardson                matrix[i,1+j] = x^j
64631914882SAlex Richardson            end
64731914882SAlex Richardson            y = f(x)
64831914882SAlex Richardson            vector[i] = y
64931914882SAlex Richardson            matrix[i, n+2] = currsign / w(x,y)
65031914882SAlex Richardson            currsign = -currsign
65131914882SAlex Richardson        end
65231914882SAlex Richardson
65331914882SAlex Richardson        @debug("equaldev", "matrix=", repr(matrix))
65431914882SAlex Richardson        @debug("equaldev", "vector=", repr(vector))
65531914882SAlex Richardson
65631914882SAlex Richardson        outvector = matrix \ vector
65731914882SAlex Richardson
65831914882SAlex Richardson        @debug("equaldev", "outvector=", repr(outvector))
65931914882SAlex Richardson
66031914882SAlex Richardson        ncoeffs = outvector[1:n+1]
66131914882SAlex Richardson        dcoeffs = [BigFloat(1)]
66231914882SAlex Richardson        return ncoeffs, dcoeffs
66331914882SAlex Richardson    else
66431914882SAlex Richardson        # For a nontrivial rational function, the system of equations
66531914882SAlex Richardson        # we need to solve becomes nonlinear, because each equation
66631914882SAlex Richardson        # now takes the form
66731914882SAlex Richardson        #
66831914882SAlex Richardson        #   p_0 x^0 + p_1 x^1 + ... + p_n x^n
66931914882SAlex Richardson        #   --------------------------------- ± e/w(x) = f(x)
67031914882SAlex Richardson        #     x^0 + q_1 x^1 + ... + q_d x^d
67131914882SAlex Richardson        #
67231914882SAlex Richardson        # and multiplying up by the denominator gives you a lot of
67331914882SAlex Richardson        # terms containing e × q_i. So we can't do this the really
67431914882SAlex Richardson        # easy way using a matrix equation as above.
67531914882SAlex Richardson        #
67631914882SAlex Richardson        # Fortunately, this is a fairly easy kind of nonlinear system.
67731914882SAlex Richardson        # The equations all become linear if you switch to treating e
67831914882SAlex Richardson        # as a constant, so a reasonably sensible approach is to pick
67931914882SAlex Richardson        # a candidate value of e, solve all but one of the equations
68031914882SAlex Richardson        # for the remaining unknowns, and then see what the error
68131914882SAlex Richardson        # turns out to be in the final equation. The Chebyshev
68231914882SAlex Richardson        # alternation theorem guarantees that that error in the last
68331914882SAlex Richardson        # equation will be anti-monotonic in the input e, so we can
68431914882SAlex Richardson        # just binary-search until we get the two as close to equal as
68531914882SAlex Richardson        # we need them.
68631914882SAlex Richardson
68731914882SAlex Richardson        function try_e(e)
68831914882SAlex Richardson            # Try a given value of e, derive the coefficients of the
68931914882SAlex Richardson            # resulting rational function by setting up equations
69031914882SAlex Richardson            # based on the first n+d+1 of the n+d+2 coordinates, and
69131914882SAlex Richardson            # see what the error turns out to be at the final
69231914882SAlex Richardson            # coordinate.
69331914882SAlex Richardson            matrix = array2d(BigFloat, n+d+1, n+d+1)
69431914882SAlex Richardson            vector = array1d(BigFloat, n+d+1)
69531914882SAlex Richardson            currsign = +1
69631914882SAlex Richardson            for i = 1:1:n+d+1
69731914882SAlex Richardson                x = coords[i]
69831914882SAlex Richardson                y = f(x)
69931914882SAlex Richardson                y_adj = y - currsign * e / w(x,y)
70031914882SAlex Richardson                for j = 0:1:n
70131914882SAlex Richardson                    matrix[i,1+j] = x^j
70231914882SAlex Richardson                end
70331914882SAlex Richardson                for j = 1:1:d
70431914882SAlex Richardson                    matrix[i,1+n+j] = -x^j * y_adj
70531914882SAlex Richardson                end
70631914882SAlex Richardson                vector[i] = y_adj
70731914882SAlex Richardson                currsign = -currsign
70831914882SAlex Richardson            end
70931914882SAlex Richardson
71031914882SAlex Richardson            @debug("equaldev", "trying e=", e)
71131914882SAlex Richardson            @debug("equaldev", "matrix=", repr(matrix))
71231914882SAlex Richardson            @debug("equaldev", "vector=", repr(vector))
71331914882SAlex Richardson
71431914882SAlex Richardson            outvector = matrix \ vector
71531914882SAlex Richardson
71631914882SAlex Richardson            @debug("equaldev", "outvector=", repr(outvector))
71731914882SAlex Richardson
71831914882SAlex Richardson            ncoeffs = outvector[1:n+1]
71931914882SAlex Richardson            dcoeffs = vcat([BigFloat(1)], outvector[n+2:n+d+1])
72031914882SAlex Richardson
72131914882SAlex Richardson            x = coords[n+d+2]
72231914882SAlex Richardson            y = f(x)
72331914882SAlex Richardson            last_e = (ratfn_eval(ncoeffs, dcoeffs, x) - y) * w(x,y) * -currsign
72431914882SAlex Richardson
72531914882SAlex Richardson            @debug("equaldev", "last e=", last_e)
72631914882SAlex Richardson
72731914882SAlex Richardson            return ncoeffs, dcoeffs, last_e
72831914882SAlex Richardson        end
72931914882SAlex Richardson
73031914882SAlex Richardson        threshold = 2^(-epsbits/2) # convergence threshold
73131914882SAlex Richardson
73231914882SAlex Richardson        # Start by trying our previous iteration's error value. This
73331914882SAlex Richardson        # value (e0) will be one end of our binary-search interval,
73431914882SAlex Richardson        # and whatever it caused the last point's error to be, that
73531914882SAlex Richardson        # (e1) will be the other end.
73631914882SAlex Richardson        e0 = prev_err
73731914882SAlex Richardson        @debug("equaldev", "e0 = ", e0)
73831914882SAlex Richardson        nc, dc, e1 = try_e(e0)
73931914882SAlex Richardson        @debug("equaldev", "e1 = ", e1)
74031914882SAlex Richardson        if abs(e1-e0) <= threshold
74131914882SAlex Richardson            # If we're _really_ lucky, we hit the error right on the
74231914882SAlex Richardson            # nose just by doing that!
74331914882SAlex Richardson            return nc, dc
74431914882SAlex Richardson        end
74531914882SAlex Richardson        s = sign(e1-e0)
74631914882SAlex Richardson        @debug("equaldev", "s = ", s)
74731914882SAlex Richardson
74831914882SAlex Richardson        # Verify by assertion that trying our other interval endpoint
74931914882SAlex Richardson        # e1 gives a value that's wrong in the other direction.
75031914882SAlex Richardson        # (Otherwise our binary search won't get a sensible answer at
75131914882SAlex Richardson        # all.)
75231914882SAlex Richardson        nc, dc, e2 = try_e(e1)
75331914882SAlex Richardson        @debug("equaldev", "e2 = ", e2)
75431914882SAlex Richardson        @assert(sign(e2-e1) == -s)
75531914882SAlex Richardson
75631914882SAlex Richardson        # Now binary-search until our two endpoints narrow enough.
75731914882SAlex Richardson        local emid
75831914882SAlex Richardson        while abs(e1-e0) > threshold
75931914882SAlex Richardson            emid = (e1+e0)/2
76031914882SAlex Richardson            nc, dc, enew = try_e(emid)
76131914882SAlex Richardson            if sign(enew-emid) == s
76231914882SAlex Richardson                e0 = emid
76331914882SAlex Richardson            else
76431914882SAlex Richardson                e1 = emid
76531914882SAlex Richardson            end
76631914882SAlex Richardson        end
76731914882SAlex Richardson
76831914882SAlex Richardson        @debug("equaldev", "final e=", emid)
76931914882SAlex Richardson        return nc, dc
77031914882SAlex Richardson    end
77131914882SAlex Richardsonend
77231914882SAlex Richardson
77331914882SAlex Richardson# ----------------------------------------------------------------------
77431914882SAlex Richardson# Top-level function to find a minimax rational-function approximation.
77531914882SAlex Richardson
77631914882SAlex Richardson# Arguments:
77731914882SAlex Richardson#    f         The function to be approximated. Maps BigFloat -> BigFloat.
77831914882SAlex Richardson#    interval  A pair of BigFloats giving the endpoints of the interval
77931914882SAlex Richardson#              (in either order) on which to approximate f.
78031914882SAlex Richardson#    n, d      The degrees of the numerator and denominator of the desired
78131914882SAlex Richardson#              approximation.
78231914882SAlex Richardson#    w         Error-weighting function. Takes two BigFloat arguments x,y
78331914882SAlex Richardson#              and returns a scaling factor for the error at that location.
78431914882SAlex Richardson#              The returned approximation R should have the minimum possible
78531914882SAlex Richardson#              maximum value of abs((f(x)-R(x)) * w(x,f(x))). Optional
78631914882SAlex Richardson#              parameter, defaulting to the always-return-1 function.
78731914882SAlex Richardson#
78831914882SAlex Richardson# Return values: a tuple (N,D,E,X), where
78931914882SAlex Richardson
79031914882SAlex Richardson#    N,D       A pair of arrays of BigFloats giving the coefficients
79131914882SAlex Richardson#              of the returned rational function. N has size n+1; D
79231914882SAlex Richardson#              has size d+1. Both start with the constant term, i.e.
79331914882SAlex Richardson#              N[i] is the coefficient of x^(i-1) (because Julia
79431914882SAlex Richardson#              arrays are 1-based). D[1] will be 1.
79531914882SAlex Richardson#    E         The maximum weighted error (BigFloat).
79631914882SAlex Richardson#    X         An array of pairs of BigFloats giving the locations of n+2
79731914882SAlex Richardson#              points and the weighted error at each of those points. The
79831914882SAlex Richardson#              weighted error values will have alternating signs, which
79931914882SAlex Richardson#              means that the Chebyshev alternation theorem guarantees
80031914882SAlex Richardson#              that any other function of the same degree must exceed
80131914882SAlex Richardson#              the error of this one at at least one of those points.
80231914882SAlex Richardsonfunction ratfn_minimax(f::Function, interval::Tuple{BigFloat,BigFloat}, n, d,
80331914882SAlex Richardson                       w = (x,y)->BigFloat(1))
80431914882SAlex Richardson    # We start off by finding a least-squares approximation. This
80531914882SAlex Richardson    # doesn't need to be perfect, but if we can get it reasonably good
80631914882SAlex Richardson    # then it'll save iterations in the refining stage.
80731914882SAlex Richardson    #
80831914882SAlex Richardson    # Least-squares approximations tend to look nicer in a minimax
80931914882SAlex Richardson    # sense if you evaluate the function at a big pile of Chebyshev
81031914882SAlex Richardson    # nodes rather than uniformly spaced points. These values will
81131914882SAlex Richardson    # also make a good grid to use for the initial search for error
81231914882SAlex Richardson    # extrema, so we'll keep them around for that reason too.
81331914882SAlex Richardson
81431914882SAlex Richardson    # Construct the grid.
81531914882SAlex Richardson    lo, hi = minimum(interval), maximum(interval)
81631914882SAlex Richardson    local grid
81731914882SAlex Richardson    let
81831914882SAlex Richardson        mid = (hi+lo)/2
81931914882SAlex Richardson        halfwid = (hi-lo)/2
82031914882SAlex Richardson        nnodes = 16 * (n+d+1)
82131914882SAlex Richardson        pi = 2*asin(BigFloat(1))
82231914882SAlex Richardson        grid = [ mid - halfwid * cos(pi*i/nnodes) for i=0:1:nnodes ]
82331914882SAlex Richardson    end
82431914882SAlex Richardson
82531914882SAlex Richardson    # Find the initial least-squares approximation.
82631914882SAlex Richardson    (nc, dc) = ratfn_leastsquares(f, grid, n, d, w)
82731914882SAlex Richardson    @debug("minimax", "initial leastsquares approx = ",
82831914882SAlex Richardson           ratfn_to_string(nc, dc))
82931914882SAlex Richardson
83031914882SAlex Richardson    # Threshold of convergence. We stop when the relative difference
83131914882SAlex Richardson    # between the min and max (winnowed) error extrema is less than
83231914882SAlex Richardson    # this.
83331914882SAlex Richardson    #
83431914882SAlex Richardson    # This is set to the cube root of machine epsilon on a more or
83531914882SAlex Richardson    # less empirical basis, because the rational-function case will
83631914882SAlex Richardson    # not converge reliably if you set it to only the square root.
83731914882SAlex Richardson    # (Repeatable by using the --test mode.) On the assumption that
83831914882SAlex Richardson    # input and output error in each iteration can be expected to be
83931914882SAlex Richardson    # related by a simple power law (because it'll just be down to how
84031914882SAlex Richardson    # many leading terms of a Taylor series are zero), the cube root
84131914882SAlex Richardson    # was the next thing to try.
84231914882SAlex Richardson    threshold = 2^(-epsbits/3)
84331914882SAlex Richardson
84431914882SAlex Richardson    # Main loop.
84531914882SAlex Richardson    while true
84631914882SAlex Richardson        # Find all the error extrema we can.
84731914882SAlex Richardson        function compute_error(x)
84831914882SAlex Richardson            real_y = f(x)
84931914882SAlex Richardson            approx_y = ratfn_eval(nc, dc, x)
85031914882SAlex Richardson            return (approx_y - real_y) * w(x, real_y)
85131914882SAlex Richardson        end
85231914882SAlex Richardson        extrema = find_extrema(compute_error, grid)
85331914882SAlex Richardson        @debug("minimax", "all extrema = ", format_xylist(extrema))
85431914882SAlex Richardson
85531914882SAlex Richardson        # Winnow the extrema down to the right number, and ensure they
85631914882SAlex Richardson        # have alternating sign.
85731914882SAlex Richardson        extrema = winnow_extrema(extrema, n+d+2)
85831914882SAlex Richardson        @debug("minimax", "winnowed extrema = ", format_xylist(extrema))
85931914882SAlex Richardson
86031914882SAlex Richardson        # See if we've finished.
86131914882SAlex Richardson        min_err = minimum([abs(y) for (x,y) = extrema])
86231914882SAlex Richardson        max_err = maximum([abs(y) for (x,y) = extrema])
86331914882SAlex Richardson        variation = (max_err - min_err) / max_err
86431914882SAlex Richardson        @debug("minimax", "extremum variation = ", variation)
86531914882SAlex Richardson        if variation < threshold
86631914882SAlex Richardson            @debug("minimax", "done!")
86731914882SAlex Richardson            return nc, dc, max_err, extrema
86831914882SAlex Richardson        end
86931914882SAlex Richardson
87031914882SAlex Richardson        # If not, refine our function by equalising the error at the
87131914882SAlex Richardson        # extrema points, and go round again.
87231914882SAlex Richardson        (nc, dc) = ratfn_equal_deviation(f, map(x->x[1], extrema),
87331914882SAlex Richardson                                         n, d, max_err, w)
87431914882SAlex Richardson        @debug("minimax", "refined approx = ", ratfn_to_string(nc, dc))
87531914882SAlex Richardson    end
87631914882SAlex Richardsonend
87731914882SAlex Richardson
87831914882SAlex Richardson# ----------------------------------------------------------------------
87931914882SAlex Richardson# Check if a polynomial is well-conditioned for accurate evaluation in
88031914882SAlex Richardson# a given interval by Horner's rule.
88131914882SAlex Richardson#
88231914882SAlex Richardson# This is true if at every step where Horner's rule computes
88331914882SAlex Richardson# (coefficient + x*value_so_far), the constant coefficient you're
88431914882SAlex Richardson# adding on is of larger magnitude than the x*value_so_far operand.
88531914882SAlex Richardson# And this has to be true for every x in the interval.
88631914882SAlex Richardson#
88731914882SAlex Richardson# Arguments:
88831914882SAlex Richardson#    coeffs    The coefficients of the polynomial under test. Starts with
88931914882SAlex Richardson#              the constant term, i.e. coeffs[i] is the coefficient of
89031914882SAlex Richardson#              x^(i-1) (because Julia arrays are 1-based).
89131914882SAlex Richardson#    lo, hi    The bounds of the interval.
89231914882SAlex Richardson#
89331914882SAlex Richardson# Return value: the largest ratio (x*value_so_far / coefficient), at
89431914882SAlex Richardson# any step of evaluation, for any x in the interval. If this is less
89531914882SAlex Richardson# than 1, the polynomial is at least somewhat well-conditioned;
89631914882SAlex Richardson# ideally you want it to be more like 1/8 or 1/16 or so, so that the
89731914882SAlex Richardson# relative rounding error accumulated at each step are reduced by
89831914882SAlex Richardson# several factors of 2 when the next coefficient is added on.
89931914882SAlex Richardson
90031914882SAlex Richardsonfunction wellcond(coeffs, lo, hi)
90131914882SAlex Richardson    x = max(abs(lo), abs(hi))
90231914882SAlex Richardson    worst = 0
90331914882SAlex Richardson    so_far = 0
90431914882SAlex Richardson    for i = length(coeffs):-1:1
90531914882SAlex Richardson        coeff = abs(coeffs[i])
90631914882SAlex Richardson        so_far *= x
90731914882SAlex Richardson        if coeff != 0
90831914882SAlex Richardson            thisval = so_far / coeff
90931914882SAlex Richardson            worst = max(worst, thisval)
91031914882SAlex Richardson            so_far += coeff
91131914882SAlex Richardson        end
91231914882SAlex Richardson    end
91331914882SAlex Richardson    return worst
91431914882SAlex Richardsonend
91531914882SAlex Richardson
91631914882SAlex Richardson# ----------------------------------------------------------------------
91731914882SAlex Richardson# Small set of unit tests.
91831914882SAlex Richardson
91931914882SAlex Richardsonfunction test()
92031914882SAlex Richardson    passes = 0
92131914882SAlex Richardson    fails = 0
92231914882SAlex Richardson
92331914882SAlex Richardson    function approx_eq(x, y, limit=1e-6)
92431914882SAlex Richardson        return abs(x - y) < limit
92531914882SAlex Richardson    end
92631914882SAlex Richardson
92731914882SAlex Richardson    function test(condition)
92831914882SAlex Richardson        if condition
92931914882SAlex Richardson            passes += 1
93031914882SAlex Richardson        else
93131914882SAlex Richardson            println("fail")
93231914882SAlex Richardson            fails += 1
93331914882SAlex Richardson        end
93431914882SAlex Richardson    end
93531914882SAlex Richardson
93631914882SAlex Richardson    # Test Gaussian elimination.
93731914882SAlex Richardson    println("Gaussian test 1:")
93831914882SAlex Richardson    m = BigFloat[1 1 2; 3 5 8; 13 34 21]
93931914882SAlex Richardson    v = BigFloat[1, -1, 2]
94031914882SAlex Richardson    ret = m \ v
94131914882SAlex Richardson    println("  ",repr(ret))
94231914882SAlex Richardson    test(approx_eq(ret[1], 109/26))
94331914882SAlex Richardson    test(approx_eq(ret[2], -105/130))
94431914882SAlex Richardson    test(approx_eq(ret[3], -31/26))
94531914882SAlex Richardson
94631914882SAlex Richardson    # Test leastsquares rational functions.
94731914882SAlex Richardson    println("Leastsquares test 1:")
94831914882SAlex Richardson    n = 10000
94931914882SAlex Richardson    a = array1d(BigFloat, n+1)
95031914882SAlex Richardson    for i = 0:1:n
95131914882SAlex Richardson        a[1+i] = i/BigFloat(n)
95231914882SAlex Richardson    end
95331914882SAlex Richardson    (nc, dc) = ratfn_leastsquares(x->exp(x), a, 2, 2)
95431914882SAlex Richardson    println("  ",ratfn_to_string(nc, dc))
95531914882SAlex Richardson    for x = a
95631914882SAlex Richardson        test(approx_eq(exp(x), ratfn_eval(nc, dc, x), 1e-4))
95731914882SAlex Richardson    end
95831914882SAlex Richardson
95931914882SAlex Richardson    # Test golden section search.
96031914882SAlex Richardson    println("Golden section test 1:")
96131914882SAlex Richardson    x, y = goldensection(x->sin(x),
96231914882SAlex Richardson                              BigFloat(0), BigFloat(1)/10, BigFloat(4))
96331914882SAlex Richardson    println("  ", x, " -> ", y)
96431914882SAlex Richardson    test(approx_eq(x, asin(BigFloat(1))))
96531914882SAlex Richardson    test(approx_eq(y, 1))
96631914882SAlex Richardson
96731914882SAlex Richardson    # Test extrema-winnowing algorithm.
96831914882SAlex Richardson    println("Winnow test 1:")
96931914882SAlex Richardson    extrema = [(x, sin(20*x)*sin(197*x))
97031914882SAlex Richardson               for x in BigFloat(0):BigFloat(1)/1000:BigFloat(1)]
97131914882SAlex Richardson    winnowed = winnow_extrema(extrema, 12)
97231914882SAlex Richardson    println("  ret = ", format_xylist(winnowed))
97331914882SAlex Richardson    prevx, prevy = -1, 0
97431914882SAlex Richardson    for (x,y) = winnowed
97531914882SAlex Richardson        test(x > prevx)
97631914882SAlex Richardson        test(y != 0)
97731914882SAlex Richardson        test(prevy * y <= 0) # tolerates initial prevx having no sign
97831914882SAlex Richardson        test(abs(y) > 0.9)
97931914882SAlex Richardson        prevx, prevy = x, y
98031914882SAlex Richardson    end
98131914882SAlex Richardson
98231914882SAlex Richardson    # Test actual minimax approximation.
98331914882SAlex Richardson    println("Minimax test 1 (polynomial):")
98431914882SAlex Richardson    (nc, dc, e, x) = ratfn_minimax(x->exp(x), (BigFloat(0), BigFloat(1)), 4, 0)
98531914882SAlex Richardson    println("  ",e)
98631914882SAlex Richardson    println("  ",ratfn_to_string(nc, dc))
98731914882SAlex Richardson    test(0 < e < 1e-3)
98831914882SAlex Richardson    for x = 0:BigFloat(1)/1000:1
98931914882SAlex Richardson        test(abs(ratfn_eval(nc, dc, x) - exp(x)) <= e * 1.0000001)
99031914882SAlex Richardson    end
99131914882SAlex Richardson
99231914882SAlex Richardson    println("Minimax test 2 (rational):")
99331914882SAlex Richardson    (nc, dc, e, x) = ratfn_minimax(x->exp(x), (BigFloat(0), BigFloat(1)), 2, 2)
99431914882SAlex Richardson    println("  ",e)
99531914882SAlex Richardson    println("  ",ratfn_to_string(nc, dc))
99631914882SAlex Richardson    test(0 < e < 1e-3)
99731914882SAlex Richardson    for x = 0:BigFloat(1)/1000:1
99831914882SAlex Richardson        test(abs(ratfn_eval(nc, dc, x) - exp(x)) <= e * 1.0000001)
99931914882SAlex Richardson    end
100031914882SAlex Richardson
100131914882SAlex Richardson    println("Minimax test 3 (polynomial, weighted):")
100231914882SAlex Richardson    (nc, dc, e, x) = ratfn_minimax(x->exp(x), (BigFloat(0), BigFloat(1)), 4, 0,
100331914882SAlex Richardson                                   (x,y)->1/y)
100431914882SAlex Richardson    println("  ",e)
100531914882SAlex Richardson    println("  ",ratfn_to_string(nc, dc))
100631914882SAlex Richardson    test(0 < e < 1e-3)
100731914882SAlex Richardson    for x = 0:BigFloat(1)/1000:1
100831914882SAlex Richardson        test(abs(ratfn_eval(nc, dc, x) - exp(x))/exp(x) <= e * 1.0000001)
100931914882SAlex Richardson    end
101031914882SAlex Richardson
101131914882SAlex Richardson    println("Minimax test 4 (rational, weighted):")
101231914882SAlex Richardson    (nc, dc, e, x) = ratfn_minimax(x->exp(x), (BigFloat(0), BigFloat(1)), 2, 2,
101331914882SAlex Richardson                                   (x,y)->1/y)
101431914882SAlex Richardson    println("  ",e)
101531914882SAlex Richardson    println("  ",ratfn_to_string(nc, dc))
101631914882SAlex Richardson    test(0 < e < 1e-3)
101731914882SAlex Richardson    for x = 0:BigFloat(1)/1000:1
101831914882SAlex Richardson        test(abs(ratfn_eval(nc, dc, x) - exp(x))/exp(x) <= e * 1.0000001)
101931914882SAlex Richardson    end
102031914882SAlex Richardson
102131914882SAlex Richardson    println("Minimax test 5 (rational, weighted, odd degree):")
102231914882SAlex Richardson    (nc, dc, e, x) = ratfn_minimax(x->exp(x), (BigFloat(0), BigFloat(1)), 2, 1,
102331914882SAlex Richardson                                   (x,y)->1/y)
102431914882SAlex Richardson    println("  ",e)
102531914882SAlex Richardson    println("  ",ratfn_to_string(nc, dc))
102631914882SAlex Richardson    test(0 < e < 1e-3)
102731914882SAlex Richardson    for x = 0:BigFloat(1)/1000:1
102831914882SAlex Richardson        test(abs(ratfn_eval(nc, dc, x) - exp(x))/exp(x) <= e * 1.0000001)
102931914882SAlex Richardson    end
103031914882SAlex Richardson
103131914882SAlex Richardson    total = passes + fails
103231914882SAlex Richardson    println(passes, " passes ", fails, " fails ", total, " total")
103331914882SAlex Richardsonend
103431914882SAlex Richardson
103531914882SAlex Richardson# ----------------------------------------------------------------------
103631914882SAlex Richardson# Online help.
103731914882SAlex Richardsonfunction help()
103831914882SAlex Richardson    print("""
103931914882SAlex RichardsonUsage:
104031914882SAlex Richardson
104131914882SAlex Richardson    remez.jl [options] <lo> <hi> <n> <d> <expr> [<weight>]
104231914882SAlex Richardson
104331914882SAlex RichardsonArguments:
104431914882SAlex Richardson
104531914882SAlex Richardson    <lo>, <hi>
104631914882SAlex Richardson
104731914882SAlex Richardson        Bounds of the interval on which to approximate the target
104831914882SAlex Richardson        function. These are parsed and evaluated as Julia expressions,
104931914882SAlex Richardson        so you can write things like '1/BigFloat(6)' to get an
105031914882SAlex Richardson        accurate representation of 1/6, or '4*atan(BigFloat(1))' to
105131914882SAlex Richardson        get pi. (Unfortunately, the obvious 'BigFloat(pi)' doesn't
105231914882SAlex Richardson        work in Julia.)
105331914882SAlex Richardson
105431914882SAlex Richardson    <n>, <d>
105531914882SAlex Richardson
105631914882SAlex Richardson        The desired degree of polynomial(s) you want for your
105731914882SAlex Richardson        approximation. These should be non-negative integers. If you
105831914882SAlex Richardson        want a rational function as output, set <n> to the degree of
105931914882SAlex Richardson        the numerator, and <d> the denominator. If you just want an
106031914882SAlex Richardson        ordinary polynomial, set <d> to 0, and <n> to the degree of
106131914882SAlex Richardson        the polynomial you want.
106231914882SAlex Richardson
106331914882SAlex Richardson    <expr>
106431914882SAlex Richardson
106531914882SAlex Richardson        A Julia expression giving the function to be approximated on
106631914882SAlex Richardson        the interval. The input value is predefined as 'x' when this
106731914882SAlex Richardson        expression is evaluated, so you should write something along
106831914882SAlex Richardson        the lines of 'sin(x)' or 'sqrt(1+tan(x)^2)' etc.
106931914882SAlex Richardson
107031914882SAlex Richardson    <weight>
107131914882SAlex Richardson
107231914882SAlex Richardson        If provided, a Julia expression giving the weighting factor
107331914882SAlex Richardson        for the approximation error. The output polynomial will
107431914882SAlex Richardson        minimise the largest absolute value of (P-f) * w at any point
107531914882SAlex Richardson        in the interval, where P is the value of the polynomial, f is
107631914882SAlex Richardson        the value of the target function given by <expr>, and w is the
107731914882SAlex Richardson        weight given by this function.
107831914882SAlex Richardson
107931914882SAlex Richardson        When this expression is evaluated, the input value to P and f
108031914882SAlex Richardson        is predefined as 'x', and also the true output value f(x) is
108131914882SAlex Richardson        predefined as 'y'. So you can minimise the relative error by
108231914882SAlex Richardson        simply writing '1/y'.
108331914882SAlex Richardson
108431914882SAlex Richardson        If the <weight> argument is not provided, the default
108531914882SAlex Richardson        weighting function always returns 1, so that the polynomial
108631914882SAlex Richardson        will minimise the maximum absolute error |P-f|.
108731914882SAlex Richardson
108831914882SAlex RichardsonComputation options:
108931914882SAlex Richardson
109031914882SAlex Richardson    --pre=<predef_expr>
109131914882SAlex Richardson
109231914882SAlex Richardson        Evaluate the Julia expression <predef_expr> before starting
109331914882SAlex Richardson        the computation. This permits you to pre-define variables or
109431914882SAlex Richardson        functions which the Julia expressions in your main arguments
109531914882SAlex Richardson        can refer to. All of <lo>, <hi>, <expr> and <weight> can make
109631914882SAlex Richardson        use of things defined by <predef_expr>.
109731914882SAlex Richardson
109831914882SAlex Richardson        One internal remez.jl function that you might sometimes find
109931914882SAlex Richardson        useful in this expression is 'goldensection', which finds the
110031914882SAlex Richardson        location and value of a maximum of a function. For example,
110131914882SAlex Richardson        one implementation strategy for the gamma function involves
110231914882SAlex Richardson        translating it to put its unique local minimum at the origin,
110331914882SAlex Richardson        in which case you can write something like this
110431914882SAlex Richardson
110531914882SAlex Richardson            --pre='(m,my) = goldensection(x -> -gamma(x),
110631914882SAlex Richardson                  BigFloat(1), BigFloat(1.5), BigFloat(2))'
110731914882SAlex Richardson
110831914882SAlex Richardson        to predefine 'm' as the location of gamma's minimum, and 'my'
110931914882SAlex Richardson        as the (negated) value that gamma actually takes at that
111031914882SAlex Richardson        point, i.e. -gamma(m).
111131914882SAlex Richardson
111231914882SAlex Richardson        (Since 'goldensection' always finds a maximum, we had to
111331914882SAlex Richardson        negate gamma in the input function to make it find a minimum
111431914882SAlex Richardson        instead. Consult the comments in the source for more details
111531914882SAlex Richardson        on the use of this function.)
111631914882SAlex Richardson
111731914882SAlex Richardson        If you use this option more than once, all the expressions you
111831914882SAlex Richardson        provide will be run in sequence.
111931914882SAlex Richardson
112031914882SAlex Richardson    --bits=<bits>
112131914882SAlex Richardson
112231914882SAlex Richardson        Specify the accuracy to which you want the output polynomial,
112331914882SAlex Richardson        in bits. Default 256, which should be more than enough.
112431914882SAlex Richardson
112531914882SAlex Richardson    --bigfloatbits=<bits>
112631914882SAlex Richardson
112731914882SAlex Richardson        Turn up the precision used by Julia for its BigFloat
112831914882SAlex Richardson        evaluation. Default is Julia's default (also 256). You might
112931914882SAlex Richardson        want to try setting this higher than the --bits value if the
113031914882SAlex Richardson        algorithm is failing to converge for some reason.
113131914882SAlex Richardson
113231914882SAlex RichardsonOutput options:
113331914882SAlex Richardson
113431914882SAlex Richardson    --full
113531914882SAlex Richardson
113631914882SAlex Richardson        Instead of just printing the approximation function itself,
113731914882SAlex Richardson        also print auxiliary information:
113831914882SAlex Richardson         - the locations of the error extrema, and the actual
113931914882SAlex Richardson           (weighted) error at each of those locations
114031914882SAlex Richardson         - the overall maximum error of the function
114131914882SAlex Richardson         - a 'well-conditioning quotient', giving the worst-case ratio
114231914882SAlex Richardson           between any polynomial coefficient and the largest possible
114331914882SAlex Richardson           value of the higher-order terms it will be added to.
114431914882SAlex Richardson
114531914882SAlex Richardson        The well-conditioning quotient should be less than 1, ideally
114631914882SAlex Richardson        by several factors of two, for accurate evaluation in the
114731914882SAlex Richardson        target precision. If you request a rational function, a
114831914882SAlex Richardson        separate well-conditioning quotient will be printed for the
114931914882SAlex Richardson        numerator and denominator.
115031914882SAlex Richardson
115131914882SAlex Richardson        Use this option when deciding how wide an interval to
115231914882SAlex Richardson        approximate your function on, and what degree of polynomial
115331914882SAlex Richardson        you need.
115431914882SAlex Richardson
115531914882SAlex Richardson    --variable=<identifier>
115631914882SAlex Richardson
115731914882SAlex Richardson        When writing the output polynomial or rational function in its
115831914882SAlex Richardson        usual form as an arithmetic expression, use <identifier> as
115931914882SAlex Richardson        the name of the input variable. Default is 'x'.
116031914882SAlex Richardson
116131914882SAlex Richardson    --suffix=<suffix>
116231914882SAlex Richardson
116331914882SAlex Richardson        When writing the output polynomial or rational function in its
116431914882SAlex Richardson        usual form as an arithmetic expression, write <suffix> after
116531914882SAlex Richardson        every floating-point literal. For example, '--suffix=F' will
116631914882SAlex Richardson        generate a C expression in which the coefficients are literals
116731914882SAlex Richardson        of type 'float' rather than 'double'.
116831914882SAlex Richardson
116931914882SAlex Richardson    --array
117031914882SAlex Richardson
117131914882SAlex Richardson        Instead of writing the output polynomial as an arithmetic
117231914882SAlex Richardson        expression in Horner's rule form, write out just its
117331914882SAlex Richardson        coefficients, one per line, each with a trailing comma.
117431914882SAlex Richardson        Suitable for pasting into a C array declaration.
117531914882SAlex Richardson
117631914882SAlex Richardson        This option is not currently supported if the output is a
117731914882SAlex Richardson        rational function, because you'd need two separate arrays for
117831914882SAlex Richardson        the numerator and denominator coefficients and there's no
117931914882SAlex Richardson        obviously right way to provide both of those together.
118031914882SAlex Richardson
118131914882SAlex RichardsonDebug and test options:
118231914882SAlex Richardson
118331914882SAlex Richardson    --debug=<facility>
118431914882SAlex Richardson
118531914882SAlex Richardson        Enable debugging output from various parts of the Remez
118631914882SAlex Richardson        calculation. <facility> should be the name of one of the
118731914882SAlex Richardson        classes of diagnostic output implemented in the program.
118831914882SAlex Richardson        Useful values include 'gausselim', 'leastsquares',
118931914882SAlex Richardson        'goldensection', 'equaldev', 'minimax'. This is probably
119031914882SAlex Richardson        mostly useful to people debugging problems with the script, so
119131914882SAlex Richardson        consult the source code for more information about what the
119231914882SAlex Richardson        diagnostic output for each of those facilities will be.
119331914882SAlex Richardson
119431914882SAlex Richardson        If you want diagnostics from more than one facility, specify
119531914882SAlex Richardson        this option multiple times with different arguments.
119631914882SAlex Richardson
119731914882SAlex Richardson    --test
119831914882SAlex Richardson
119931914882SAlex Richardson        Run remez.jl's internal test suite. No arguments needed.
120031914882SAlex Richardson
120131914882SAlex RichardsonMiscellaneous options:
120231914882SAlex Richardson
120331914882SAlex Richardson    --help
120431914882SAlex Richardson
120531914882SAlex Richardson        Display this text and exit. No arguments needed.
120631914882SAlex Richardson
120731914882SAlex Richardson""")
120831914882SAlex Richardsonend
120931914882SAlex Richardson
121031914882SAlex Richardson# ----------------------------------------------------------------------
121131914882SAlex Richardson# Main program.
121231914882SAlex Richardson
121331914882SAlex Richardsonfunction main()
121431914882SAlex Richardson    nargs = length(argwords)
121531914882SAlex Richardson    if nargs != 5 && nargs != 6
121631914882SAlex Richardson        error("usage: remez.jl <lo> <hi> <n> <d> <expr> [<weight>]\n" *
121731914882SAlex Richardson              "       run 'remez.jl --help' for more help")
121831914882SAlex Richardson    end
121931914882SAlex Richardson
122031914882SAlex Richardson    for preliminary_command in preliminary_commands
122131914882SAlex Richardson        eval(Meta.parse(preliminary_command))
122231914882SAlex Richardson    end
122331914882SAlex Richardson
122431914882SAlex Richardson    lo = BigFloat(eval(Meta.parse(argwords[1])))
122531914882SAlex Richardson    hi = BigFloat(eval(Meta.parse(argwords[2])))
122631914882SAlex Richardson    n = parse(Int,argwords[3])
122731914882SAlex Richardson    d = parse(Int,argwords[4])
122831914882SAlex Richardson    f = eval(Meta.parse("x -> " * argwords[5]))
122931914882SAlex Richardson
123031914882SAlex Richardson    # Wrap the user-provided function with a function of our own. This
123131914882SAlex Richardson    # arranges to detect silly FP values (inf,nan) early and diagnose
123231914882SAlex Richardson    # them sensibly, and also lets us log all evaluations of the
123331914882SAlex Richardson    # function in case you suspect it's doing the wrong thing at some
123431914882SAlex Richardson    # special-case point.
123531914882SAlex Richardson    function func(x)
123631914882SAlex Richardson        y = run(f,x)
123731914882SAlex Richardson        @debug("f", x, " -> ", y)
123831914882SAlex Richardson        if !isfinite(y)
123931914882SAlex Richardson            error("f(" * string(x) * ") returned non-finite value " * string(y))
124031914882SAlex Richardson        end
124131914882SAlex Richardson        return y
124231914882SAlex Richardson    end
124331914882SAlex Richardson
124431914882SAlex Richardson    if nargs == 6
124531914882SAlex Richardson        # Wrap the user-provided weight function similarly.
124631914882SAlex Richardson        w = eval(Meta.parse("(x,y) -> " * argwords[6]))
124731914882SAlex Richardson        function wrapped_weight(x,y)
124831914882SAlex Richardson            ww = run(w,x,y)
124931914882SAlex Richardson            if !isfinite(ww)
125031914882SAlex Richardson                error("w(" * string(x) * "," * string(y) *
125131914882SAlex Richardson                      ") returned non-finite value " * string(ww))
125231914882SAlex Richardson            end
125331914882SAlex Richardson            return ww
125431914882SAlex Richardson        end
125531914882SAlex Richardson        weight = wrapped_weight
125631914882SAlex Richardson    else
125731914882SAlex Richardson        weight = (x,y)->BigFloat(1)
125831914882SAlex Richardson    end
125931914882SAlex Richardson
126031914882SAlex Richardson    (nc, dc, e, extrema) = ratfn_minimax(func, (lo, hi), n, d, weight)
126131914882SAlex Richardson    if array_format
126231914882SAlex Richardson        if d == 0
126331914882SAlex Richardson            functext = join([string(x)*",\n" for x=nc],"")
126431914882SAlex Richardson        else
126531914882SAlex Richardson            # It's unclear how you should best format an array of
126631914882SAlex Richardson            # coefficients for a rational function, so I'll leave
126731914882SAlex Richardson            # implementing this option until I have a use case.
126831914882SAlex Richardson            error("--array unsupported for rational functions")
126931914882SAlex Richardson        end
127031914882SAlex Richardson    else
127131914882SAlex Richardson        functext = ratfn_to_string(nc, dc) * "\n"
127231914882SAlex Richardson    end
127331914882SAlex Richardson    if full_output
127431914882SAlex Richardson        # Print everything you might want to know about the function
127531914882SAlex Richardson        println("extrema = ", format_xylist(extrema))
127631914882SAlex Richardson        println("maxerror = ", string(e))
127731914882SAlex Richardson        if length(dc) > 1
127831914882SAlex Richardson            println("wellconditioning_numerator = ",
127931914882SAlex Richardson                    string(wellcond(nc, lo, hi)))
128031914882SAlex Richardson            println("wellconditioning_denominator = ",
128131914882SAlex Richardson                    string(wellcond(dc, lo, hi)))
128231914882SAlex Richardson        else
128331914882SAlex Richardson            println("wellconditioning = ", string(wellcond(nc, lo, hi)))
128431914882SAlex Richardson        end
128531914882SAlex Richardson        print("function = ", functext)
128631914882SAlex Richardson    else
128731914882SAlex Richardson        # Just print the text people will want to paste into their code
128831914882SAlex Richardson        print(functext)
128931914882SAlex Richardson    end
129031914882SAlex Richardsonend
129131914882SAlex Richardson
129231914882SAlex Richardson# ----------------------------------------------------------------------
129331914882SAlex Richardson# Top-level code: parse the argument list and decide what to do.
129431914882SAlex Richardson
129531914882SAlex Richardsonwhat_to_do = main
129631914882SAlex Richardson
129731914882SAlex Richardsondoing_opts = true
129831914882SAlex Richardsonargwords = array1d(String, 0)
129931914882SAlex Richardsonfor arg = ARGS
130031914882SAlex Richardson    global doing_opts, what_to_do, argwords
130131914882SAlex Richardson    global full_output, array_format, xvarname, floatsuffix, epsbits
130231914882SAlex Richardson    if doing_opts && startswith(arg, "-")
130331914882SAlex Richardson        if arg == "--"
130431914882SAlex Richardson            doing_opts = false
130531914882SAlex Richardson        elseif arg == "--help"
130631914882SAlex Richardson            what_to_do = help
130731914882SAlex Richardson        elseif arg == "--test"
130831914882SAlex Richardson            what_to_do = test
130931914882SAlex Richardson        elseif arg == "--full"
131031914882SAlex Richardson            full_output = true
131131914882SAlex Richardson        elseif arg == "--array"
131231914882SAlex Richardson            array_format = true
131331914882SAlex Richardson        elseif startswith(arg, "--debug=")
131431914882SAlex Richardson            enable_debug(arg[length("--debug=")+1:end])
131531914882SAlex Richardson        elseif startswith(arg, "--variable=")
131631914882SAlex Richardson            xvarname = arg[length("--variable=")+1:end]
131731914882SAlex Richardson        elseif startswith(arg, "--suffix=")
131831914882SAlex Richardson            floatsuffix = arg[length("--suffix=")+1:end]
131931914882SAlex Richardson        elseif startswith(arg, "--bits=")
132031914882SAlex Richardson            epsbits = parse(Int,arg[length("--bits=")+1:end])
132131914882SAlex Richardson        elseif startswith(arg, "--bigfloatbits=")
132231914882SAlex Richardson            set_bigfloat_precision(
132331914882SAlex Richardson                parse(Int,arg[length("--bigfloatbits=")+1:end]))
132431914882SAlex Richardson        elseif startswith(arg, "--pre=")
132531914882SAlex Richardson            push!(preliminary_commands, arg[length("--pre=")+1:end])
132631914882SAlex Richardson        else
132731914882SAlex Richardson            error("unrecognised option: ", arg)
132831914882SAlex Richardson        end
132931914882SAlex Richardson    else
133031914882SAlex Richardson        push!(argwords, arg)
133131914882SAlex Richardson    end
133231914882SAlex Richardsonend
133331914882SAlex Richardson
133431914882SAlex Richardsonwhat_to_do()
1335