1"""
2Unit tests for trust-region optimization routines.
3
4To run it in its simplest form::
5  nosetests test_optimize.py
6
7"""
8import itertools
9from copy import deepcopy
10import numpy as np
11from numpy.testing import assert_, assert_equal, assert_allclose
12from scipy.optimize import (minimize, rosen, rosen_der, rosen_hess,
13                            rosen_hess_prod, BFGS)
14from scipy.optimize._differentiable_functions import FD_METHODS
15import pytest
16
17
18class Accumulator:
19    """ This is for testing callbacks."""
20    def __init__(self):
21        self.count = 0
22        self.accum = None
23
24    def __call__(self, x):
25        self.count += 1
26        if self.accum is None:
27            self.accum = np.array(x)
28        else:
29            self.accum += x
30
31
32class TestTrustRegionSolvers:
33
34    def setup_method(self):
35        self.x_opt = [1.0, 1.0]
36        self.easy_guess = [2.0, 2.0]
37        self.hard_guess = [-1.2, 1.0]
38
39    def test_dogleg_accuracy(self):
40        # test the accuracy and the return_all option
41        x0 = self.hard_guess
42        r = minimize(rosen, x0, jac=rosen_der, hess=rosen_hess, tol=1e-8,
43                     method='dogleg', options={'return_all': True},)
44        assert_allclose(x0, r['allvecs'][0])
45        assert_allclose(r['x'], r['allvecs'][-1])
46        assert_allclose(r['x'], self.x_opt)
47
48    def test_dogleg_callback(self):
49        # test the callback mechanism and the maxiter and return_all options
50        accumulator = Accumulator()
51        maxiter = 5
52        r = minimize(rosen, self.hard_guess, jac=rosen_der, hess=rosen_hess,
53                     callback=accumulator, method='dogleg',
54                     options={'return_all': True, 'maxiter': maxiter},)
55        assert_equal(accumulator.count, maxiter)
56        assert_equal(len(r['allvecs']), maxiter+1)
57        assert_allclose(r['x'], r['allvecs'][-1])
58        assert_allclose(sum(r['allvecs'][1:]), accumulator.accum)
59
60    def test_solver_concordance(self):
61        # Assert that dogleg uses fewer iterations than ncg on the Rosenbrock
62        # test function, although this does not necessarily mean
63        # that dogleg is faster or better than ncg even for this function
64        # and especially not for other test functions.
65        f = rosen
66        g = rosen_der
67        h = rosen_hess
68        for x0 in (self.easy_guess, self.hard_guess):
69            r_dogleg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
70                                method='dogleg', options={'return_all': True})
71            r_trust_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
72                                   method='trust-ncg',
73                                   options={'return_all': True})
74            r_trust_krylov = minimize(f, x0, jac=g, hess=h, tol=1e-8,
75                                   method='trust-krylov',
76                                   options={'return_all': True})
77            r_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
78                             method='newton-cg', options={'return_all': True})
79            r_iterative = minimize(f, x0, jac=g, hess=h, tol=1e-8,
80                                   method='trust-exact',
81                                   options={'return_all': True})
82            assert_allclose(self.x_opt, r_dogleg['x'])
83            assert_allclose(self.x_opt, r_trust_ncg['x'])
84            assert_allclose(self.x_opt, r_trust_krylov['x'])
85            assert_allclose(self.x_opt, r_ncg['x'])
86            assert_allclose(self.x_opt, r_iterative['x'])
87            assert_(len(r_dogleg['allvecs']) < len(r_ncg['allvecs']))
88
89    def test_trust_ncg_hessp(self):
90        for x0 in (self.easy_guess, self.hard_guess, self.x_opt):
91            r = minimize(rosen, x0, jac=rosen_der, hessp=rosen_hess_prod,
92                         tol=1e-8, method='trust-ncg')
93            assert_allclose(self.x_opt, r['x'])
94
95    def test_trust_ncg_start_in_optimum(self):
96        r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
97                     tol=1e-8, method='trust-ncg')
98        assert_allclose(self.x_opt, r['x'])
99
100    def test_trust_krylov_start_in_optimum(self):
101        r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
102                     tol=1e-8, method='trust-krylov')
103        assert_allclose(self.x_opt, r['x'])
104
105    def test_trust_exact_start_in_optimum(self):
106        r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
107                     tol=1e-8, method='trust-exact')
108        assert_allclose(self.x_opt, r['x'])
109
110    def test_finite_differences(self):
111        # if the Hessian is estimated by finite differences or
112        # a HessianUpdateStrategy (and no hessp is provided) then creation
113        # of a hessp is possible.
114        # GH13754
115        methods = ["trust-ncg", "trust-krylov", "dogleg"]
116        product = itertools.product(
117            FD_METHODS + (BFGS,),
118            methods
119        )
120        # a hessian needs to be specified for trustregion
121        for method in methods:
122            with pytest.raises(ValueError):
123                minimize(rosen, x0=self.x_opt, jac=rosen_der, method=method)
124
125        # estimate hessian by finite differences. In _trustregion.py this
126        # creates a hessp from the LinearOperator/HessianUpdateStrategy
127        # that's returned from ScalarFunction.
128        for fd, method in product:
129            hess = fd
130            if fd == BFGS:
131                # don't want to use the same object over and over
132                hess = deepcopy(fd())
133
134            r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=hess,
135                         tol=1e-8, method=method)
136            assert_allclose(self.x_opt, r['x'])
137