1# Licensed under a 3-clause BSD style license - see LICENSE.rst:
2
3"""
4Tests for model evaluation.
5Compare the results of some models with other programs.
6"""
7# pylint: disable=invalid-name, no-member
8import pytest
9import numpy as np
10import unittest.mock as mk
11import astropy.modeling.tabular as tabular_models
12
13from numpy.testing import assert_allclose, assert_equal
14
15from astropy import units as u
16from astropy.modeling import fitting, models
17from astropy.modeling.models import Gaussian2D
18from astropy.modeling.bounding_box import ModelBoundingBox
19from astropy.modeling.core import FittableModel
20from astropy.modeling.parameters import Parameter
21from astropy.modeling.polynomial import PolynomialBase
22from astropy.modeling.powerlaws import SmoothlyBrokenPowerLaw1D
23from astropy.modeling.parameters import InputParameterError
24from astropy.tests.helper import assert_quantity_allclose
25from astropy.utils import NumpyRNGContext
26from astropy.utils.compat.optional_deps import HAS_SCIPY  # noqa
27from .example_models import models_1D, models_2D
28
29
30@pytest.mark.skipif('not HAS_SCIPY')
31def test_custom_model(amplitude=4, frequency=1):
32
33    def sine_model(x, amplitude=4, frequency=1):
34        """
35        Model function
36        """
37        return amplitude * np.sin(2 * np.pi * frequency * x)
38
39    def sine_deriv(x, amplitude=4, frequency=1):
40        """
41        Jacobian of model function, e.g. derivative of the function with
42        respect to the *parameters*
43        """
44        da = np.sin(2 * np.pi * frequency * x)
45        df = 2 * np.pi * x * amplitude * np.cos(2 * np.pi * frequency * x)
46        return np.vstack((da, df))
47
48    SineModel = models.custom_model(sine_model, fit_deriv=sine_deriv)
49
50    x = np.linspace(0, 4, 50)
51    sin_model = SineModel()
52
53    sin_model.evaluate(x, 5., 2.)
54    sin_model.fit_deriv(x, 5., 2.)
55
56    np.random.seed(0)
57    data = sin_model(x) + np.random.rand(len(x)) - 0.5
58    fitter = fitting.LevMarLSQFitter()
59    model = fitter(sin_model, x, data)
60    assert np.all((np.array([model.amplitude.value, model.frequency.value]) -
61                   np.array([amplitude, frequency])) < 0.001)
62
63
64def test_custom_model_init():
65    @models.custom_model
66    def SineModel(x, amplitude=4, frequency=1):
67        """Model function"""
68
69        return amplitude * np.sin(2 * np.pi * frequency * x)
70
71    sin_model = SineModel(amplitude=2., frequency=0.5)
72    assert sin_model.amplitude == 2.
73    assert sin_model.frequency == 0.5
74
75
76def test_custom_model_defaults():
77    @models.custom_model
78    def SineModel(x, amplitude=4, frequency=1):
79        """Model function"""
80
81        return amplitude * np.sin(2 * np.pi * frequency * x)
82
83    sin_model = SineModel()
84    assert SineModel.amplitude.default == 4
85    assert SineModel.frequency.default == 1
86
87    assert sin_model.amplitude == 4
88    assert sin_model.frequency == 1
89
90
91def test_inconsistent_input_shapes():
92    g = Gaussian2D()
93    x = np.arange(-1., 1, .2)
94    y = x.copy()
95    # check scalar input broadcasting works
96    assert np.abs(g(x, 0) - g(x, 0 * x)).sum() == 0
97    # but not array broadcasting
98    x.shape = (10, 1)
99    y.shape = (1, 10)
100    result = g(x, y)
101    assert result.shape == (10, 10)
102
103
104def test_custom_model_bounding_box():
105    """Test bounding box evaluation for a 3D model"""
106
107    def ellipsoid(x, y, z, x0=13, y0=10, z0=8, a=4, b=3, c=2, amp=1):
108        rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
109        val = (rsq < 1) * amp
110        return val
111
112    class Ellipsoid3D(models.custom_model(ellipsoid)):
113        @property
114        def bounding_box(self):
115            return ((self.z0 - self.c, self.z0 + self.c),
116                    (self.y0 - self.b, self.y0 + self.b),
117                    (self.x0 - self.a, self.x0 + self.a))
118
119    model = Ellipsoid3D()
120    bbox = model.bounding_box
121
122    zlim, ylim, xlim = bbox.bounding_box()
123    dz, dy, dx = np.diff(bbox) / 2
124    z1, y1, x1 = np.mgrid[slice(zlim[0], zlim[1] + 1),
125                          slice(ylim[0], ylim[1] + 1),
126                          slice(xlim[0], xlim[1] + 1)]
127    z2, y2, x2 = np.mgrid[slice(zlim[0] - dz, zlim[1] + dz + 1),
128                          slice(ylim[0] - dy, ylim[1] + dy + 1),
129                          slice(xlim[0] - dx, xlim[1] + dx + 1)]
130
131    arr = model(x2, y2, z2, with_bounding_box=True)
132    sub_arr = model(x1, y1, z1, with_bounding_box=True)
133
134    # check for flux agreement
135    assert abs(np.nansum(arr) - np.nansum(sub_arr)) < np.nansum(arr) * 1e-7
136
137
138class Fittable2DModelTester:
139    """
140    Test class for all two dimensional parametric models.
141
142    Test values have to be defined in example_models.py. It currently test the
143    model with different input types, evaluates the model at different
144    positions and assures that it gives the correct values. And tests if the
145    model works with non-linear fitters.
146
147    This can be used as a base class for user defined model testing.
148    """
149
150    def setup_class(self):
151        self.N = 100
152        self.M = 100
153        self.eval_error = 0.0001
154        self.fit_error = 0.1
155        self.x = 5.3
156        self.y = 6.7
157        self.x1 = np.arange(1, 10, .1)
158        self.y1 = np.arange(1, 10, .1)
159        self.y2, self.x2 = np.mgrid[:10, :8]
160
161    def test_input2D(self, model_class, test_parameters):
162        """Test model with different input types."""
163
164        model = create_model(model_class, test_parameters)
165        model(self.x, self.y)
166        model(self.x1, self.y1)
167        model(self.x2, self.y2)
168
169    def test_eval2D(self, model_class, test_parameters):
170        """Test model values add certain given points"""
171
172        model = create_model(model_class, test_parameters)
173        x = test_parameters['x_values']
174        y = test_parameters['y_values']
175        z = test_parameters['z_values']
176        assert np.all(np.abs(model(x, y) - z) < self.eval_error)
177
178    def test_bounding_box2D(self, model_class, test_parameters):
179        """Test bounding box evaluation"""
180
181        model = create_model(model_class, test_parameters)
182
183        # testing setter
184        model.bounding_box = ((-5, 5), (-5, 5))
185        assert model.bounding_box == ((-5, 5), (-5, 5))
186
187        model.bounding_box = None
188        with pytest.raises(NotImplementedError):
189            model.bounding_box
190
191        # test the exception of dimensions don't match
192        with pytest.raises(ValueError):
193            model.bounding_box = (-5, 5)
194
195        del model.bounding_box
196
197        try:
198            bbox = model.bounding_box
199        except NotImplementedError:
200            return
201
202        ddx = 0.01
203        ylim, xlim = bbox
204        x1 = np.arange(xlim[0], xlim[1], ddx)
205        y1 = np.arange(ylim[0], ylim[1], ddx)
206
207        x2 = np.concatenate(([xlim[0] - idx * ddx for idx in range(10, 0, -1)],
208                             x1,
209                             [xlim[1] + idx * ddx for idx in range(1, 10)]))
210        y2 = np.concatenate(([ylim[0] - idx * ddx for idx in range(10, 0, -1)],
211                             y1,
212                             [ylim[1] + idx * ddx for idx in range(1, 10)]))
213
214        inside_bbox = model(x1, y1)
215        outside_bbox = model(x2, y2, with_bounding_box=True)
216        outside_bbox = outside_bbox[~np.isnan(outside_bbox)]
217
218        assert np.all(inside_bbox == outside_bbox)
219
220    def test_bounding_box2D_peak(self, model_class, test_parameters):
221        if not test_parameters.pop('bbox_peak', False):
222            return
223
224        model = create_model(model_class, test_parameters)
225        bbox = model.bounding_box
226
227        ylim, xlim = bbox
228        dy, dx = np.diff(bbox)/2
229        y1, x1 = np.mgrid[slice(ylim[0], ylim[1] + 1),
230                          slice(xlim[0], xlim[1] + 1)]
231        y2, x2 = np.mgrid[slice(ylim[0] - dy, ylim[1] + dy + 1),
232                          slice(xlim[0] - dx, xlim[1] + dx + 1)]
233
234        arr = model(x2, y2)
235        sub_arr = model(x1, y1)
236
237        # check for flux agreement
238        assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
239
240    @pytest.mark.skipif('not HAS_SCIPY')
241    def test_fitter2D(self, model_class, test_parameters):
242        """Test if the parametric model works with the fitter."""
243
244        x_lim = test_parameters['x_lim']
245        y_lim = test_parameters['y_lim']
246
247        parameters = test_parameters['parameters']
248        model = create_model(model_class, test_parameters)
249
250        if isinstance(parameters, dict):
251            parameters = [parameters[name] for name in model.param_names]
252
253        if "log_fit" in test_parameters:
254            if test_parameters['log_fit']:
255                x = np.logspace(x_lim[0], x_lim[1], self.N)
256                y = np.logspace(y_lim[0], y_lim[1], self.N)
257        else:
258            x = np.linspace(x_lim[0], x_lim[1], self.N)
259            y = np.linspace(y_lim[0], y_lim[1], self.N)
260        xv, yv = np.meshgrid(x, y)
261
262        np.random.seed(0)
263        # add 10% noise to the amplitude
264        noise = np.random.rand(self.N, self.N) - 0.5
265        data = model(xv, yv) + 0.1 * parameters[0] * noise
266        fitter = fitting.LevMarLSQFitter()
267        new_model = fitter(model, xv, yv, data)
268
269        params = [getattr(new_model, name) for name in new_model.param_names]
270        fixed = [param.fixed for param in params]
271        expected = np.array([val for val, fixed in zip(parameters, fixed)
272                             if not fixed])
273        fitted = np.array([param.value for param in params
274                           if not param.fixed])
275        assert_allclose(fitted, expected,
276                        atol=self.fit_error)
277
278    @pytest.mark.skipif('not HAS_SCIPY')
279    def test_deriv_2D(self, model_class, test_parameters):
280        """
281        Test the derivative of a model by fitting with an estimated and
282        analytical derivative.
283        """
284
285        x_lim = test_parameters['x_lim']
286        y_lim = test_parameters['y_lim']
287
288        if model_class.fit_deriv is None or issubclass(model_class, PolynomialBase):
289            return
290
291        if "log_fit" in test_parameters:
292            if test_parameters['log_fit']:
293                x = np.logspace(x_lim[0], x_lim[1], self.N)
294                y = np.logspace(y_lim[0], y_lim[1], self.M)
295        else:
296            x = np.linspace(x_lim[0], x_lim[1], self.N)
297            y = np.linspace(y_lim[0], y_lim[1], self.M)
298        xv, yv = np.meshgrid(x, y)
299
300        try:
301            model_with_deriv = create_model(model_class, test_parameters,
302                                            use_constraints=False,
303                                            parameter_key='deriv_initial')
304            model_no_deriv = create_model(model_class, test_parameters,
305                                          use_constraints=False,
306                                          parameter_key='deriv_initial')
307            model = create_model(model_class, test_parameters,
308                                 use_constraints=False,
309                                 parameter_key='deriv_initial')
310        except KeyError:
311            model_with_deriv = create_model(model_class, test_parameters,
312                                            use_constraints=False)
313            model_no_deriv = create_model(model_class, test_parameters,
314                                          use_constraints=False)
315            model = create_model(model_class, test_parameters,
316                                 use_constraints=False)
317
318        # add 10% noise to the amplitude
319        rsn = np.random.default_rng(0)
320        amplitude = test_parameters['parameters'][0]
321        n = 0.1 * amplitude * (rsn.random((self.M, self.N)) - 0.5)
322
323        data = model(xv, yv) + n
324        fitter_with_deriv = fitting.LevMarLSQFitter()
325        new_model_with_deriv = fitter_with_deriv(model_with_deriv, xv, yv,
326                                                 data)
327        fitter_no_deriv = fitting.LevMarLSQFitter()
328        new_model_no_deriv = fitter_no_deriv(model_no_deriv, xv, yv, data,
329                                             estimate_jacobian=True)
330        assert_allclose(new_model_with_deriv.parameters,
331                        new_model_no_deriv.parameters,
332                        rtol=0.1)
333
334
335class Fittable1DModelTester:
336    """
337    Test class for all one dimensional parametric models.
338
339    Test values have to be defined in example_models.py. It currently test the
340    model with different input types, evaluates the model at different
341    positions and assures that it gives the correct values. And tests if the
342    model works with non-linear fitters.
343
344    This can be used as a base class for user defined model testing.
345    """
346
347    def setup_class(self):
348        self.N = 100
349        self.M = 100
350        self.eval_error = 0.0001
351        self.fit_error = 0.11
352        self.x = 5.3
353        self.y = 6.7
354        self.x1 = np.arange(1, 10, .1)
355        self.y1 = np.arange(1, 10, .1)
356        self.y2, self.x2 = np.mgrid[:10, :8]
357
358    @pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')
359    def test_input1D(self, model_class, test_parameters):
360        """Test model with different input types."""
361
362        model = create_model(model_class, test_parameters)
363        model(self.x)
364        model(self.x1)
365        model(self.x2)
366
367    def test_eval1D(self, model_class, test_parameters):
368        """
369        Test model values at certain given points
370        """
371        model = create_model(model_class, test_parameters)
372        x = test_parameters['x_values']
373        y = test_parameters['y_values']
374        assert_allclose(model(x), y, atol=self.eval_error)
375
376    def test_bounding_box1D(self, model_class, test_parameters):
377        """Test bounding box evaluation"""
378
379        model = create_model(model_class, test_parameters)
380
381        # testing setter
382        model.bounding_box = (-5, 5)
383        model.bounding_box = None
384
385        with pytest.raises(NotImplementedError):
386            model.bounding_box
387
388        del model.bounding_box
389
390        # test exception if dimensions don't match
391        with pytest.raises(ValueError):
392            model.bounding_box = 5
393
394        try:
395            bbox = model.bounding_box.bounding_box()
396        except NotImplementedError:
397            return
398
399        ddx = 0.01
400        x1 = np.arange(bbox[0], bbox[1], ddx)
401        x2 = np.concatenate(([bbox[0] - idx * ddx for idx in range(10, 0, -1)],
402                             x1,
403                             [bbox[1] + idx * ddx for idx in range(1, 10)]))
404
405        inside_bbox = model(x1)
406        outside_bbox = model(x2, with_bounding_box=True)
407        outside_bbox = outside_bbox[~np.isnan(outside_bbox)]
408
409        assert np.all(inside_bbox == outside_bbox)
410
411    def test_bounding_box1D_peak(self, model_class, test_parameters):
412        if not test_parameters.pop('bbox_peak', False):
413            return
414
415        model = create_model(model_class, test_parameters)
416        bbox = model.bounding_box
417
418        if isinstance(model, models.Lorentz1D) or isinstance(model, models.Drude1D):
419            rtol = 0.01  # 1% agreement is enough due to very extended wings
420            ddx = 0.1  # Finer sampling to "integrate" flux for narrow peak
421        else:
422            rtol = 1e-7
423            ddx = 1
424
425        if isinstance(bbox, ModelBoundingBox):
426            bbox = bbox.bounding_box()
427
428        dx = np.diff(bbox) / 2
429        x1 = np.mgrid[slice(bbox[0], bbox[1] + 1, ddx)]
430        x2 = np.mgrid[slice(bbox[0] - dx, bbox[1] + dx + 1, ddx)]
431        arr = model(x2)
432        sub_arr = model(x1)
433
434        # check for flux agreement
435        assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * rtol
436
437    @pytest.mark.skipif('not HAS_SCIPY')
438    def test_fitter1D(self, model_class, test_parameters):
439        """
440        Test if the parametric model works with the fitter.
441        """
442        x_lim = test_parameters['x_lim']
443        parameters = test_parameters['parameters']
444        model = create_model(model_class, test_parameters)
445
446        if isinstance(parameters, dict):
447            parameters = [parameters[name] for name in model.param_names]
448
449        if "log_fit" in test_parameters:
450            if test_parameters['log_fit']:
451                x = np.logspace(x_lim[0], x_lim[1], self.N)
452        else:
453            x = np.linspace(x_lim[0], x_lim[1], self.N)
454
455        np.random.seed(0)
456        # add 10% noise to the amplitude
457        relative_noise_amplitude = 0.01
458        data = ((1 + relative_noise_amplitude * np.random.randn(len(x))) *
459                model(x))
460        fitter = fitting.LevMarLSQFitter()
461        new_model = fitter(model, x, data)
462
463        # Only check parameters that were free in the fit
464        params = [getattr(new_model, name) for name in new_model.param_names]
465        fixed = [param.fixed for param in params]
466        expected = np.array([val for val, fixed in zip(parameters, fixed)
467                             if not fixed])
468        fitted = np.array([param.value for param in params
469                           if not param.fixed])
470        assert_allclose(fitted, expected, atol=self.fit_error)
471
472    @pytest.mark.skipif('not HAS_SCIPY')
473    @pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')
474    def test_deriv_1D(self, model_class, test_parameters):
475        """
476        Test the derivative of a model by comparing results with an estimated
477        derivative.
478        """
479
480        x_lim = test_parameters['x_lim']
481
482        if model_class.fit_deriv is None or issubclass(model_class, PolynomialBase):
483            return
484
485        if "log_fit" in test_parameters:
486            if test_parameters['log_fit']:
487                x = np.logspace(x_lim[0], x_lim[1], self.N)
488        else:
489            x = np.linspace(x_lim[0], x_lim[1], self.N)
490
491        parameters = test_parameters['parameters']
492        model_with_deriv = create_model(model_class, test_parameters,
493                                        use_constraints=False)
494        model_no_deriv = create_model(model_class, test_parameters,
495                                      use_constraints=False)
496
497        # NOTE: PR 10644 replaced deprecated usage of RandomState but could not
498        #       find a new seed that did not cause test failure, resorted to hardcoding.
499        # add 10% noise to the amplitude
500        rsn_rand_1234567890 = np.array([
501            0.61879477, 0.59162363, 0.88868359, 0.89165480, 0.45756748,
502            0.77818808, 0.26706377, 0.99610621, 0.54009489, 0.53752161,
503            0.40099938, 0.70540579, 0.40518559, 0.94999075, 0.03075388,
504            0.13602495, 0.08297726, 0.42352224, 0.23449723, 0.74743526,
505            0.65177865, 0.68998682, 0.16413419, 0.87642114, 0.44733314,
506            0.57871104, 0.52377835, 0.62689056, 0.34869427, 0.26209748,
507            0.07498055, 0.17940570, 0.82999425, 0.98759822, 0.11326099,
508            0.63846415, 0.73056694, 0.88321124, 0.52721004, 0.66487673,
509            0.74209309, 0.94083846, 0.70123128, 0.29534353, 0.76134369,
510            0.77593881, 0.36985514, 0.89519067, 0.33082813, 0.86108824,
511            0.76897859, 0.61343376, 0.43870907, 0.91913538, 0.76958966,
512            0.51063556, 0.04443249, 0.57463611, 0.31382006, 0.41221713,
513            0.21531811, 0.03237521, 0.04166386, 0.73109303, 0.74556052,
514            0.64716325, 0.77575353, 0.64599254, 0.16885816, 0.48485480,
515            0.53844248, 0.99690349, 0.23657074, 0.04119088, 0.46501519,
516            0.35739006, 0.23002665, 0.53420791, 0.71639475, 0.81857486,
517            0.73994342, 0.07948837, 0.75688276, 0.13240193, 0.48465576,
518            0.20624753, 0.02298276, 0.54257873, 0.68123230, 0.35887468,
519            0.36296147, 0.67368397, 0.29505730, 0.66558885, 0.93652252,
520            0.36755130, 0.91787687, 0.75922703, 0.48668067, 0.45967890])
521        n = 0.1 * parameters[0] * (rsn_rand_1234567890 - 0.5)
522
523        data = model_with_deriv(x) + n
524        fitter_with_deriv = fitting.LevMarLSQFitter()
525        new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data)
526        fitter_no_deriv = fitting.LevMarLSQFitter()
527        new_model_no_deriv = fitter_no_deriv(model_no_deriv, x, data,
528                                             estimate_jacobian=True)
529        assert_allclose(new_model_with_deriv.parameters,
530                        new_model_no_deriv.parameters, atol=0.15)
531
532
533def create_model(model_class, test_parameters, use_constraints=True,
534                 parameter_key='parameters'):
535    """Create instance of model class."""
536
537    constraints = {}
538    if issubclass(model_class, PolynomialBase):
539        return model_class(**test_parameters[parameter_key])
540    elif issubclass(model_class, FittableModel):
541        if "requires_scipy" in test_parameters and not HAS_SCIPY:
542            pytest.skip("SciPy not found")
543        if use_constraints:
544            if 'constraints' in test_parameters:
545                constraints = test_parameters['constraints']
546        return model_class(*test_parameters[parameter_key], **constraints)
547
548
549@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters.*')
550@pytest.mark.filterwarnings(r'ignore:The fit may be unsuccessful.*')
551@pytest.mark.parametrize(('model_class', 'test_parameters'),
552                         sorted(models_1D.items(), key=lambda x: str(x[0])))
553class TestFittable1DModels(Fittable1DModelTester):
554    pass
555
556
557@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters.*')
558@pytest.mark.parametrize(('model_class', 'test_parameters'),
559                         sorted(models_2D.items(), key=lambda x: str(x[0])))
560class TestFittable2DModels(Fittable2DModelTester):
561    pass
562
563
564def test_ShiftModel():
565    # Shift by a scalar
566    m = models.Shift(42)
567    assert m(0) == 42
568    assert_equal(m([1, 2]), [43, 44])
569
570    # Shift by a list
571    m = models.Shift([42, 43], n_models=2)
572    assert_equal(m(0), [42, 43])
573    assert_equal(m([1, 2], model_set_axis=False),
574                 [[43, 44], [44, 45]])
575
576
577def test_ScaleModel():
578    # Scale by a scalar
579    m = models.Scale(42)
580    assert m(0) == 0
581    assert_equal(m([1, 2]), [42, 84])
582
583    # Scale by a list
584    m = models.Scale([42, 43], n_models=2)
585    assert_equal(m(0), [0, 0])
586    assert_equal(m([1, 2], model_set_axis=False),
587                 [[42, 84], [43, 86]])
588
589
590def test_voigt_model():
591    """
592    Currently just tests that the model peaks at its origin.
593    Regression test for https://github.com/astropy/astropy/issues/3942
594    """
595
596    m = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
597    x = np.arange(0, 10, 0.01)
598    y = m(x)
599    assert y[500] == y.max()  # y[500] is right at the center
600
601
602def test_model_instance_repr():
603    m = models.Gaussian1D(1.5, 2.5, 3.5)
604    assert repr(m) == '<Gaussian1D(amplitude=1.5, mean=2.5, stddev=3.5)>'
605
606
607@pytest.mark.skipif("not HAS_SCIPY")
608def test_tabular_interp_1d():
609    """
610    Test Tabular1D model.
611    """
612    points = np.arange(0, 5)
613    values = [1., 10, 2, 45, -3]
614    LookupTable = models.tabular_model(1)
615    model = LookupTable(points=points, lookup_table=values)
616    xnew = [0., .7, 1.4, 2.1, 3.9]
617    ans1 = [1., 7.3, 6.8, 6.3, 1.8]
618    assert_allclose(model(xnew), ans1)
619    # Test evaluate without passing `points`.
620    model = LookupTable(lookup_table=values)
621    assert_allclose(model(xnew), ans1)
622    # Test bounds error.
623    xextrap = [0., .7, 1.4, 2.1, 3.9, 4.1]
624    with pytest.raises(ValueError):
625        model(xextrap)
626    # test extrapolation and fill value
627    model = LookupTable(lookup_table=values, bounds_error=False,
628                        fill_value=None)
629    assert_allclose(model(xextrap),
630                    [1., 7.3, 6.8, 6.3, 1.8, -7.8])
631
632    # Test unit support
633    xnew = xnew * u.nm
634    ans1 = ans1 * u.nJy
635    model = LookupTable(points=points*u.nm, lookup_table=values*u.nJy)
636    assert_quantity_allclose(model(xnew), ans1)
637    assert_quantity_allclose(model(xnew.to(u.nm)), ans1)
638    assert model.bounding_box == (0 * u.nm, 4 * u.nm)
639
640    # Test fill value unit conversion and unitless input on table with unit
641    model = LookupTable([1, 2, 3], [10, 20, 30] * u.nJy, bounds_error=False,
642                        fill_value=1e-33*(u.W / (u.m * u.m * u.Hz)))
643    assert_quantity_allclose(model(np.arange(5)),
644                             [100, 10, 20, 30, 100] * u.nJy)
645
646
647@pytest.mark.skipif("not HAS_SCIPY")
648def test_tabular_interp_2d():
649    table = np.array([
650        [-0.04614432, -0.02512547, -0.00619557, 0.0144165, 0.0297525],
651        [-0.04510594, -0.03183369, -0.01118008, 0.01201388, 0.02496205],
652        [-0.05464094, -0.02804499, -0.00960086, 0.01134333, 0.02284104],
653        [-0.04879338, -0.02539565, -0.00440462, 0.01795145, 0.02122417],
654        [-0.03637372, -0.01630025, -0.00157902, 0.01649774, 0.01952131]])
655
656    points = np.arange(0, 5)
657    points = (points, points)
658
659    xnew = np.array([0., .7, 1.4, 2.1, 3.9])
660    LookupTable = models.tabular_model(2)
661    model = LookupTable(points, table)
662    znew = model(xnew, xnew)
663    result = np.array(
664        [-0.04614432, -0.03450009, -0.02241028, -0.0069727, 0.01938675])
665    assert_allclose(znew, result, atol=1e-7)
666
667    # test 2D arrays as input
668    a = np.arange(12).reshape((3, 4))
669    y, x = np.mgrid[:3, :4]
670    t = models.Tabular2D(lookup_table=a)
671    r = t(y, x)
672    assert_allclose(a, r)
673
674    with pytest.raises(ValueError):
675        model = LookupTable(points=([1.2, 2.3], [1.2, 6.7], [3, 4]))
676    with pytest.raises(ValueError):
677        model = LookupTable(lookup_table=[1, 2, 3])
678    with pytest.raises(NotImplementedError):
679        model = LookupTable(n_models=2)
680    with pytest.raises(ValueError):
681        model = LookupTable(([1, 2], [3, 4]), [5, 6])
682    with pytest.raises(ValueError):
683        model = LookupTable(([1, 2] * u.m, [3, 4]), [[5, 6], [7, 8]])
684    with pytest.raises(ValueError):
685        model = LookupTable(points, table, bounds_error=False,
686                            fill_value=1*u.Jy)
687
688    # Test unit support
689    points = points[0] * u.nm
690    points = (points, points)
691    xnew = xnew * u.nm
692    model = LookupTable(points, table * u.nJy)
693    result = result * u.nJy
694    assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy)
695    xnew = xnew.to(u.m)
696    assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy)
697    bbox = (0 * u.nm, 4 * u.nm)
698    bbox = (bbox, bbox)
699    assert model.bounding_box == bbox
700
701
702@pytest.mark.skipif("not HAS_SCIPY")
703def test_tabular_nd():
704    a = np.arange(24).reshape((2, 3, 4))
705    x, y, z = np.mgrid[:2, :3, :4]
706    tab = models.tabular_model(3)
707    t = tab(lookup_table=a)
708    result = t(x, y, z)
709    assert_allclose(a, result)
710
711    with pytest.raises(ValueError):
712        models.tabular_model(0)
713
714
715def test_with_bounding_box():
716    """
717    Test the option to evaluate a model respecting
718    its bunding_box.
719    """
720    p = models.Polynomial2D(2) & models.Polynomial2D(2)
721    m = models.Mapping((0, 1, 0, 1)) | p
722    with NumpyRNGContext(1234567):
723        m.parameters = np.random.rand(12)
724
725    m.bounding_box = ((3, 9), (1, 8))
726    x, y = np.mgrid[:10, :10]
727    a, b = m(x, y)
728    aw, bw = m(x, y, with_bounding_box=True)
729    ind = (~np.isnan(aw)).nonzero()
730    assert_allclose(a[ind], aw[ind])
731    assert_allclose(b[ind], bw[ind])
732
733    aw, bw = m(x, y, with_bounding_box=True, fill_value=1000)
734    ind = (aw != 1000).nonzero()
735    assert_allclose(a[ind], aw[ind])
736    assert_allclose(b[ind], bw[ind])
737
738    # test the order of bbox is not reversed for 1D models
739    p = models.Polynomial1D(1, c0=12, c1=2.3)
740    p.bounding_box = (0, 5)
741    assert(p(1) == p(1, with_bounding_box=True))
742
743    t3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
744    t3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
745    assert_allclose(t3([1, 1], [7, 7], [3, 5], with_bounding_box=True),
746                    [[np.nan, 11], [np.nan, 14], [np.nan, 4]])
747
748    trans3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
749    trans3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
750    assert_allclose(trans3(1, 7, 5, with_bounding_box=True), [11, 14, 4])
751
752
753@pytest.mark.skipif("not HAS_SCIPY")
754def test_tabular_with_bounding_box():
755    points = np.arange(5)
756    values = np.array([1.5, 3.4, 6.7, 7, 32])
757    t = models.Tabular1D(points, values)
758    result = t(1, with_bounding_box=True)
759
760    assert result == 3.4
761    assert t.inverse(result, with_bounding_box=True) == 1.
762
763
764@pytest.mark.skipif("not HAS_SCIPY")
765def test_tabular_bounding_box_with_units():
766    points = np.arange(5)*u.pix
767    lt = np.arange(5)*u.AA
768    t = models.Tabular1D(points, lt)
769    result = t(1*u.pix, with_bounding_box=True)
770
771    assert result == 1.*u.AA
772    assert t.inverse(result, with_bounding_box=True) == 1*u.pix
773
774
775@pytest.mark.skipif("not HAS_SCIPY")
776def test_tabular1d_inverse():
777    """Test that the Tabular1D inverse is defined"""
778    points = np.arange(5)
779    values = np.array([1.5, 3.4, 6.7, 7, 32])
780    t = models.Tabular1D(points, values)
781    result = t.inverse((3.4, 6.7))
782    assert_allclose(result, np.array((1., 2.)))
783
784    # Check that it works for descending values in lookup_table
785    t2 = models.Tabular1D(points, values[::-1])
786    assert_allclose(t2.inverse.points[0], t2.lookup_table[::-1])
787
788    result2 = t2.inverse((7, 6.7))
789    assert_allclose(result2, np.array((1., 2.)))
790
791    # Check that it errors on double-valued lookup_table
792    points = np.arange(5)
793    values = np.array([1.5, 3.4, 3.4, 32, 25])
794    t = models.Tabular1D(points, values)
795    with pytest.raises(NotImplementedError):
796        t.inverse((3.4, 7.))
797
798    # Check that Tabular2D.inverse raises an error
799    table = np.arange(5*5).reshape(5, 5)
800    points = np.arange(0, 5)
801    points = (points, points)
802    t3 = models.Tabular2D(points=points, lookup_table=table)
803    with pytest.raises(NotImplementedError):
804        t3.inverse((3, 3))
805
806    # Check that it uses the same kwargs as the original model
807    points = np.arange(5)
808    values = np.array([1.5, 3.4, 6.7, 7, 32])
809    t = models.Tabular1D(points, values)
810    with pytest.raises(ValueError):
811        t.inverse(100)
812    t = models.Tabular1D(points, values, bounds_error=False, fill_value=None)
813    result = t.inverse(100)
814    assert_allclose(t(result), 100)
815
816
817@pytest.mark.skipif("not HAS_SCIPY")
818def test_tabular_grid_shape_mismatch_error():
819    points = np.arange(5)
820    lt = np.mgrid[0:5, 0:5][0]
821    with pytest.raises(ValueError) as err:
822        models.Tabular2D(points, lt)
823    assert str(err.value) ==\
824        "Expected grid points in 2 directions, got 5."
825
826
827@pytest.mark.skipif("not HAS_SCIPY")
828def test_tabular_repr():
829    points = np.arange(5)
830    lt = np.arange(5)
831    t = models.Tabular1D(points, lt)
832    assert repr(t) ==\
833        "<Tabular1D(points=(array([0, 1, 2, 3, 4]),), lookup_table=[0 1 2 3 4])>"
834
835    table = np.arange(5*5).reshape(5, 5)
836    points = np.arange(0, 5)
837    points = (points, points)
838    t = models.Tabular2D(points=points, lookup_table=table)
839    assert repr(t) ==\
840        "<Tabular2D(points=(array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4])), " +\
841        "lookup_table=[[ 0  1  2  3  4]\n" +\
842        " [ 5  6  7  8  9]\n" +\
843        " [10 11 12 13 14]\n" +\
844        " [15 16 17 18 19]\n" +\
845        " [20 21 22 23 24]])>"
846
847
848@pytest.mark.skipif("not HAS_SCIPY")
849def test_tabular_str():
850    points = np.arange(5)
851    lt = np.arange(5)
852    t = models.Tabular1D(points, lt)
853    assert str(t) ==\
854        "Model: Tabular1D\n" +\
855        "N_inputs: 1\n" +\
856        "N_outputs: 1\n" +\
857        "Parameters: \n" +\
858        "  points: (array([0, 1, 2, 3, 4]),)\n" +\
859        "  lookup_table: [0 1 2 3 4]\n" +\
860        "  method: linear\n" +\
861        "  fill_value: nan\n" +\
862        "  bounds_error: True"
863
864    table = np.arange(5*5).reshape(5, 5)
865    points = np.arange(0, 5)
866    points = (points, points)
867    t = models.Tabular2D(points=points, lookup_table=table)
868    assert str(t) ==\
869        "Model: Tabular2D\n" +\
870        "N_inputs: 2\n" +\
871        "N_outputs: 1\n" +\
872        "Parameters: \n" +\
873        "  points: (array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4]))\n" +\
874        "  lookup_table: [[ 0  1  2  3  4]\n" +\
875        " [ 5  6  7  8  9]\n" +\
876        " [10 11 12 13 14]\n" +\
877        " [15 16 17 18 19]\n" +\
878        " [20 21 22 23 24]]\n" +\
879        "  method: linear\n" +\
880        "  fill_value: nan\n" +\
881        "  bounds_error: True"
882
883
884@pytest.mark.skipif("not HAS_SCIPY")
885def test_tabular_evaluate():
886    points = np.arange(5)
887    lt = np.arange(5)[::-1]
888    t = models.Tabular1D(points, lt)
889    assert (t.evaluate([1, 2, 3]) == [3, 2, 1]).all()
890    assert (t.evaluate(np.array([1, 2, 3]) * u.m) == [3, 2, 1]).all()
891
892    t.n_outputs = 2
893    value = [np.array([3, 2, 1]), np.array([1, 2, 3])]
894    with mk.patch.object(tabular_models, 'interpn', autospec=True, return_value=value) as mkInterpn:
895        outputs = t.evaluate([1, 2, 3])
896        for index, output in enumerate(outputs):
897            assert np.all(value[index] == output)
898        assert mkInterpn.call_count == 1
899
900
901@pytest.mark.skipif("not HAS_SCIPY")
902def test_tabular_module_name():
903    """
904    The module name must be set manually because
905    these classes are created dynamically.
906    """
907    for model in [models.Tabular1D, models.Tabular2D]:
908        assert model.__module__ == "astropy.modeling.tabular"
909
910
911class classmodel(FittableModel):
912    f = Parameter(default=1)
913    x = Parameter(default=0)
914    y = Parameter(default=2)
915
916    def __init__(self, f=f.default, x=x.default, y=y.default):
917        super().__init__(f, x, y)
918
919    def evaluate(self):
920        pass
921
922
923class subclassmodel(classmodel):
924    f = Parameter(default=3, fixed=True)
925    x = Parameter(default=10)
926    y = Parameter(default=12)
927    h = Parameter(default=5)
928
929    def __init__(self, f=f.default, x=x.default, y=y.default, h=h.default):
930        super().__init__(f, x, y)
931
932    def evaluate(self):
933        pass
934
935
936def test_parameter_inheritance():
937    b = subclassmodel()
938    assert b.param_names == ('f', 'x', 'y', 'h')
939    assert b.h == 5
940    assert b.f == 3
941    assert b.f.fixed == True  # noqa: E712
942
943
944def test_parameter_description():
945
946    model = models.Gaussian1D(1.5, 2.5, 3.5)
947    assert model.amplitude._description == "Amplitude (peak value) of the Gaussian"
948    assert model.mean._description == "Position of peak (Gaussian)"
949
950    model = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
951    assert model.amplitude_L._description == "The Lorentzian amplitude"
952    assert model.fwhm_L._description == "The Lorentzian full width at half maximum"
953    assert model.fwhm_G._description == "The Gaussian full width at half maximum"
954
955
956def test_SmoothlyBrokenPowerLaw1D_validators():
957    with pytest.raises(InputParameterError) as err:
958        SmoothlyBrokenPowerLaw1D(amplitude=-1)
959    assert str(err.value) ==\
960        "amplitude parameter must be > 0"
961
962    with pytest.raises(InputParameterError) as err:
963        SmoothlyBrokenPowerLaw1D(delta=0)
964    assert str(err.value) ==\
965        "delta parameter must be >= 0.001"
966
967
968@pytest.mark.skipif('not HAS_SCIPY')
969@pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')
970@pytest.mark.filterwarnings(r'ignore:The fit may be unsuccessful.*')
971def test_SmoothlyBrokenPowerLaw1D_fit_deriv():
972    x_lim = [0.01, 100]
973
974    x = np.logspace(x_lim[0], x_lim[1], 100)
975
976    parameters = {'parameters': [1, 10, -2, 2, 0.5],
977                  'constraints': {'fixed': {'x_break': True, 'delta': True}}}
978    model_with_deriv = create_model(SmoothlyBrokenPowerLaw1D, parameters,
979                                    use_constraints=False)
980    model_no_deriv = create_model(SmoothlyBrokenPowerLaw1D, parameters,
981                                  use_constraints=False)
982
983    # NOTE: PR 10644 replaced deprecated usage of RandomState but could not
984    #       find a new seed that did not cause test failure, resorted to hardcoding.
985    # add 10% noise to the amplitude
986    rsn_rand_1234567890 = np.array([
987        0.61879477, 0.59162363, 0.88868359, 0.89165480, 0.45756748,
988        0.77818808, 0.26706377, 0.99610621, 0.54009489, 0.53752161,
989        0.40099938, 0.70540579, 0.40518559, 0.94999075, 0.03075388,
990        0.13602495, 0.08297726, 0.42352224, 0.23449723, 0.74743526,
991        0.65177865, 0.68998682, 0.16413419, 0.87642114, 0.44733314,
992        0.57871104, 0.52377835, 0.62689056, 0.34869427, 0.26209748,
993        0.07498055, 0.17940570, 0.82999425, 0.98759822, 0.11326099,
994        0.63846415, 0.73056694, 0.88321124, 0.52721004, 0.66487673,
995        0.74209309, 0.94083846, 0.70123128, 0.29534353, 0.76134369,
996        0.77593881, 0.36985514, 0.89519067, 0.33082813, 0.86108824,
997        0.76897859, 0.61343376, 0.43870907, 0.91913538, 0.76958966,
998        0.51063556, 0.04443249, 0.57463611, 0.31382006, 0.41221713,
999        0.21531811, 0.03237521, 0.04166386, 0.73109303, 0.74556052,
1000        0.64716325, 0.77575353, 0.64599254, 0.16885816, 0.48485480,
1001        0.53844248, 0.99690349, 0.23657074, 0.04119088, 0.46501519,
1002        0.35739006, 0.23002665, 0.53420791, 0.71639475, 0.81857486,
1003        0.73994342, 0.07948837, 0.75688276, 0.13240193, 0.48465576,
1004        0.20624753, 0.02298276, 0.54257873, 0.68123230, 0.35887468,
1005        0.36296147, 0.67368397, 0.29505730, 0.66558885, 0.93652252,
1006        0.36755130, 0.91787687, 0.75922703, 0.48668067, 0.45967890])
1007    n = 0.1 * parameters['parameters'][0] * (rsn_rand_1234567890 - 0.5)
1008
1009    data = model_with_deriv(x) + n
1010    fitter_with_deriv = fitting.LevMarLSQFitter()
1011    new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data)
1012    fitter_no_deriv = fitting.LevMarLSQFitter()
1013    new_model_no_deriv = fitter_no_deriv(model_no_deriv, x, data,
1014                                         estimate_jacobian=True)
1015    assert_allclose(new_model_with_deriv.parameters,
1016                    new_model_no_deriv.parameters, atol=0.5)
1017