1"""
2Rolling OLS and WLS
3
4Implements an efficient rolling estimator that avoids repeated matrix
5multiplication.
6
7Copyright (c) 2019 Kevin Sheppard
8License: 3-clause BSD
9"""
10from statsmodels.compat.numpy import lstsq
11from statsmodels.compat.pandas import (
12    Appender,
13    Substitution,
14    cache_readonly,
15    call_cached_func,
16    get_cached_doc,
17)
18
19from collections import namedtuple
20
21import numpy as np
22from pandas import DataFrame, MultiIndex, Series
23from scipy import stats
24
25from statsmodels.base import model
26from statsmodels.base.model import LikelihoodModelResults, Model
27from statsmodels.regression.linear_model import (
28    RegressionModel,
29    RegressionResults,
30)
31from statsmodels.tools.validation import array_like, int_like, string_like
32
33
34def strip4(line):
35    if line.startswith(" "):
36        return line[4:]
37    return line
38
39
40RollingStore = namedtuple(
41    "RollingStore",
42    [
43        "params",
44        "ssr",
45        "llf",
46        "nobs",
47        "s2",
48        "xpxi",
49        "xeex",
50        "centered_tss",
51        "uncentered_tss",
52    ],
53)
54
55common_params = "\n".join(map(strip4, model._model_params_doc.split("\n")))
56window_parameters = """\
57window : int
58    Length of the rolling window. Must be strictly larger than the number
59    of variables in the model.
60"""
61
62weight_parameters = """
63weights : array_like, optional
64    A 1d array of weights.  If you supply 1/W then the variables are
65    pre- multiplied by 1/sqrt(W).  If no weights are supplied the
66    default value is 1 and WLS results are the same as OLS.
67"""
68
69_missing_param_doc = """\
70min_nobs : {int, None}
71    Minimum number of observations required to estimate a model when
72    data are missing.  If None, the minimum depends on the number of
73    regressors in the model. Must be smaller than window.
74missing : str, default "drop"
75    Available options are "drop", "skip" and "raise". If "drop", any
76    observations with nans are dropped and the estimates are computed using
77    only the non-missing values in each window. If 'skip' blocks containing
78    missing values are skipped and the corresponding results contains NaN.
79    If 'raise', an error is raised. Default is 'drop'.
80expanding : bool, default False
81    If True, then the initial observations after min_nobs are filled using
82    an expanding scheme until ``window`` observations are available, after
83    which rolling is used.
84"""
85
86
87extra_base = _missing_param_doc
88extra_parameters = window_parameters + weight_parameters + extra_base
89
90_doc = """
91Rolling %(model_type)s Least Squares
92
93%(parameters)s
94%(extra_parameters)s
95
96See Also
97--------
98statsmodels.regression.linear_model.%(model)s
99    %(model)s estimation and parameter testing.
100
101Notes
102-----
103Tested against %(model)s for accuracy.
104
105Results may differ from %(model)s applied to windows of data if this
106model contains an implicit constant (i.e., includes dummies for all
107categories) rather than an explicit constant (e.g., a column of 1s).
108
109Examples
110--------
111>>> from statsmodels.regression.rolling import Rolling%(model)s
112>>> from statsmodels.datasets import longley
113>>> data = longley.load()
114>>> exog = add_constant(data.exog, prepend=False)
115>>> mod = Rolling%(model)s(data.endog, exog)
116>>> rolling_res = mod.fit(reset=50)
117
118Use params_only to skip all calculations except parameter estimation
119
120>>> rolling_params = mod.fit(params_only=True)
121
122Use expanding and min_nobs to fill the initial results using an
123expanding scheme until window observation, and the roll.
124
125>>> mod = Rolling%(model)s(data.endog, exog, window=60, min_nobs=12,
126... expanding=True)
127>>> rolling_res = mod.fit()
128"""
129
130
131@Substitution(
132    model_type="Weighted",
133    model="WLS",
134    parameters=common_params,
135    extra_parameters=extra_parameters,
136)
137@Appender(_doc)
138class RollingWLS(object):
139    def __init__(
140        self,
141        endog,
142        exog,
143        window=None,
144        *,
145        weights=None,
146        min_nobs=None,
147        missing="drop",
148        expanding=False
149    ):
150        # Call Model.__init__ twice to use const detection in first pass
151        # But to not drop in the second pass
152        missing = string_like(
153            missing, "missing", options=("drop", "raise", "skip")
154        )
155        temp_msng = "drop" if missing != "raise" else "raise"
156        Model.__init__(self, endog, exog, missing=temp_msng, hasconst=None)
157        k_const = self.k_constant
158        const_idx = self.data.const_idx
159        Model.__init__(self, endog, exog, missing="none", hasconst=False)
160        self.k_constant = k_const
161        self.data.const_idx = const_idx
162        self._y = array_like(endog, "endog")
163        nobs = self._y.shape[0]
164        self._x = array_like(exog, "endog", ndim=2, shape=(nobs, None))
165        window = int_like(window, "window", optional=True)
166        weights = array_like(weights, "weights", optional=True, shape=(nobs,))
167        self._window = window if window is not None else self._y.shape[0]
168        self._weighted = weights is not None
169        self._weights = np.ones(nobs) if weights is None else weights
170        w12 = np.sqrt(self._weights)
171        self._wy = w12 * self._y
172        self._wx = w12[:, None] * self._x
173
174        min_nobs = int_like(min_nobs, "min_nobs", optional=True)
175        self._min_nobs = min_nobs if min_nobs is not None else self._x.shape[1]
176        if self._min_nobs < self._x.shape[1] or self._min_nobs > self._window:
177            raise ValueError(
178                "min_nobs must be larger than the number of "
179                "regressors in the model and less than window"
180            )
181
182        self._expanding = expanding
183
184        self._is_nan = np.zeros_like(self._y, dtype=bool)
185        self._has_nan = self._find_nans()
186        self.const_idx = self.data.const_idx
187        self._skip_missing = missing == "skip"
188
189    def _handle_data(self, endog, exog, missing, hasconst, **kwargs):
190        return Model._handle_data(
191            self, endog, exog, missing, hasconst, **kwargs
192        )
193
194    def _find_nans(self):
195        nans = np.isnan(self._y)
196        nans |= np.any(np.isnan(self._x), axis=1)
197        nans |= np.isnan(self._weights)
198        self._is_nan[:] = nans
199        has_nan = np.cumsum(nans)
200        w = self._window
201        has_nan[w - 1 :] = has_nan[w - 1 :] - has_nan[: -(w - 1)]
202        if self._expanding:
203            has_nan[: self._min_nobs] = False
204        else:
205            has_nan[: w - 1] = False
206
207        return has_nan.astype(bool)
208
209    def _get_data(self, idx):
210        window = self._window
211        if idx >= window:
212            loc = slice(idx - window, idx)
213        else:
214            loc = slice(idx)
215        y = self._y[loc]
216        wy = self._wy[loc]
217        wx = self._wx[loc]
218        weights = self._weights[loc]
219        missing = self._is_nan[loc]
220        not_missing = ~missing
221        if np.any(missing):
222            y = y[not_missing]
223            wy = wy[not_missing]
224            wx = wx[not_missing]
225            weights = weights[not_missing]
226        return y, wy, wx, weights, not_missing
227
228    def _fit_single(self, idx, wxpwx, wxpwy, nobs, store, params_only, method):
229        if nobs < self._min_nobs:
230            return
231        try:
232            if (method == "inv") or not params_only:
233                wxpwxi = np.linalg.inv(wxpwx)
234            if method == "inv":
235                params = wxpwxi @ wxpwy
236            else:
237                _, wy, wx, _, _ = self._get_data(idx)
238                if method == "lstsq":
239                    params = lstsq(wx, wy)[0]
240                else:  # 'pinv'
241                    wxpwxiwxp = np.linalg.pinv(wx)
242                    params = wxpwxiwxp @ wy
243
244        except np.linalg.LinAlgError:
245            return
246        store.params[idx - 1] = params
247        if params_only:
248            return
249        y, wy, wx, weights, _ = self._get_data(idx)
250
251        wresid, ssr, llf = self._loglike(params, wy, wx, weights, nobs)
252        wxwresid = wx * wresid[:, None]
253        wxepwxe = wxwresid.T @ wxwresid
254        tot_params = wx.shape[1]
255        s2 = ssr / (nobs - tot_params)
256
257        centered_tss, uncentered_tss = self._sum_of_squares(y, wy, weights)
258
259        store.ssr[idx - 1] = ssr
260        store.llf[idx - 1] = llf
261        store.nobs[idx - 1] = nobs
262        store.s2[idx - 1] = s2
263        store.xpxi[idx - 1] = wxpwxi
264        store.xeex[idx - 1] = wxepwxe
265        store.centered_tss[idx - 1] = centered_tss
266        store.uncentered_tss[idx - 1] = uncentered_tss
267
268    def _loglike(self, params, wy, wx, weights, nobs):
269        nobs2 = nobs / 2.0
270        wresid = wy - wx @ params
271        ssr = np.sum(wresid ** 2, axis=0)
272        llf = -np.log(ssr) * nobs2  # concentrated likelihood
273        llf -= (1 + np.log(np.pi / nobs2)) * nobs2  # with constant
274        llf += 0.5 * np.sum(np.log(weights))
275        return wresid, ssr, llf
276
277    def _sum_of_squares(self, y, wy, weights):
278        mean = np.average(y, weights=weights)
279        centered_tss = np.sum(weights * (y - mean) ** 2)
280        uncentered_tss = np.dot(wy, wy)
281        return centered_tss, uncentered_tss
282
283    def _reset(self, idx):
284        """Compute xpx and xpy using a single dot product"""
285        _, wy, wx, _, not_missing = self._get_data(idx)
286        nobs = not_missing.sum()
287        xpx = wx.T @ wx
288        xpy = wx.T @ wy
289        return xpx, xpy, nobs
290
291    def fit(
292        self,
293        method="inv",
294        cov_type="nonrobust",
295        cov_kwds=None,
296        reset=None,
297        use_t=False,
298        params_only=False,
299    ):
300        """
301        Estimate model parameters.
302
303        Parameters
304        ----------
305        method : {'inv', 'lstsq', 'pinv'}
306            Method to use when computing the the model parameters.
307
308            * 'inv' - use moving windows inner-products and matrix inversion.
309              This method is the fastest, but may be less accurate than the
310              other methods.
311            * 'lstsq' - Use numpy.linalg.lstsq
312            * 'pinv' - Use numpy.linalg.pinv. This method matches the default
313              estimator in non-moving regression estimators.
314        cov_type : {'nonrobust', 'HCCM', 'HC0'}
315            Covariance estimator:
316
317            * nonrobust - The classic OLS covariance estimator
318            * HCCM, HC0 - White heteroskedasticity robust covariance
319        cov_kwds : dict
320            Unused
321        reset : int, optional
322            Interval to recompute the moving window inner products used to
323            estimate the model parameters. Smaller values improve accuracy,
324            although in practice this setting is not required to be set.
325        use_t : bool, optional
326            Flag indicating to use the Student's t distribution when computing
327            p-values.
328        params_only : bool, optional
329            Flag indicating that only parameters should be computed. Avoids
330            calculating all other statistics or performing inference.
331
332        Returns
333        -------
334        RollingRegressionResults
335            Estimation results where all pre-sample values are nan-filled.
336        """
337        method = string_like(
338            method, "method", options=("inv", "lstsq", "pinv")
339        )
340        reset = int_like(reset, "reset", optional=True)
341        reset = self._y.shape[0] if reset is None else reset
342        if reset < 1:
343            raise ValueError("reset must be a positive integer")
344
345        nobs, k = self._x.shape
346        store = RollingStore(
347            params=np.full((nobs, k), np.nan),
348            ssr=np.full(nobs, np.nan),
349            llf=np.full(nobs, np.nan),
350            nobs=np.zeros(nobs, dtype=int),
351            s2=np.full(nobs, np.nan),
352            xpxi=np.full((nobs, k, k), np.nan),
353            xeex=np.full((nobs, k, k), np.nan),
354            centered_tss=np.full(nobs, np.nan),
355            uncentered_tss=np.full(nobs, np.nan),
356        )
357        w = self._window
358        first = self._min_nobs if self._expanding else w
359        xpx, xpy, nobs = self._reset(first)
360        if not (self._has_nan[first - 1] and self._skip_missing):
361            self._fit_single(first, xpx, xpy, nobs, store, params_only, method)
362        wx, wy = self._wx, self._wy
363        for i in range(first + 1, self._x.shape[0] + 1):
364            if self._has_nan[i - 1] and self._skip_missing:
365                continue
366            if i % reset == 0:
367                xpx, xpy, nobs = self._reset(i)
368            else:
369                if not self._is_nan[i - w - 1] and i > w:
370                    remove_x = wx[i - w - 1 : i - w]
371                    xpx -= remove_x.T @ remove_x
372                    xpy -= remove_x.T @ wy[i - w - 1 : i - w]
373                    nobs -= 1
374                if not self._is_nan[i - 1]:
375                    add_x = wx[i - 1 : i]
376                    xpx += add_x.T @ add_x
377                    xpy += add_x.T @ wy[i - 1 : i]
378                    nobs += 1
379
380            self._fit_single(i, xpx, xpy, nobs, store, params_only, method)
381
382        return RollingRegressionResults(
383            self, store, self.k_constant, use_t, cov_type
384        )
385
386    @classmethod
387    @Appender(Model.from_formula.__doc__)
388    def from_formula(
389        cls, formula, data, window, weights=None, subset=None, *args, **kwargs
390    ):
391        if subset is not None:
392            data = data.loc[subset]
393        eval_env = kwargs.pop("eval_env", None)
394        if eval_env is None:
395            eval_env = 2
396        elif eval_env == -1:
397            from patsy import EvalEnvironment
398
399            eval_env = EvalEnvironment({})
400        else:
401            eval_env += 1  # we're going down the stack again
402        missing = kwargs.get("missing", "skip")
403        from patsy import NAAction, dmatrices
404
405        na_action = NAAction(on_NA="raise", NA_types=[])
406        result = dmatrices(
407            formula,
408            data,
409            eval_env,
410            return_type="dataframe",
411            NA_action=na_action,
412        )
413
414        endog, exog = result
415        if (endog.ndim > 1 and endog.shape[1] > 1) or endog.ndim > 2:
416            raise ValueError(
417                "endog has evaluated to an array with multiple "
418                "columns that has shape {0}. This occurs when "
419                "the variable converted to endog is non-numeric"
420                " (e.g., bool or str).".format(endog.shape)
421            )
422
423        kwargs.update({"missing": missing, "window": window})
424        if weights is not None:
425            kwargs["weights"] = weights
426        mod = cls(endog, exog, *args, **kwargs)
427        mod.formula = formula
428        # since we got a dataframe, attach the original
429        mod.data.frame = data
430        return mod
431
432
433extra_parameters = window_parameters + extra_base
434
435
436@Substitution(
437    model_type="Ordinary",
438    model="OLS",
439    parameters=common_params,
440    extra_parameters=extra_parameters,
441)
442@Appender(_doc)
443class RollingOLS(RollingWLS):
444    def __init__(
445        self,
446        endog,
447        exog,
448        window=None,
449        *,
450        min_nobs=None,
451        missing="drop",
452        expanding=False
453    ):
454        super().__init__(
455            endog,
456            exog,
457            window,
458            weights=None,
459            min_nobs=min_nobs,
460            missing=missing,
461            expanding=expanding,
462        )
463
464
465class RollingRegressionResults(object):
466    """
467    Results from rolling regressions
468
469    Parameters
470    ----------
471    model : RollingWLS
472        Model instance
473    store : RollingStore
474        Container for raw moving window results
475    k_constant : bool
476        Flag indicating that the model contains a constant
477    use_t : bool
478        Flag indicating to use the Student's t distribution when computing
479        p-values.
480    cov_type : str
481        Name of covariance estimator
482    """
483
484    _data_in_cache = tuple()
485
486    def __init__(
487        self, model, store: RollingStore, k_constant, use_t, cov_type
488    ):
489        self.model = model
490        self._params = store.params
491        self._ssr = store.ssr
492        self._llf = store.llf
493        self._nobs = store.nobs
494        self._s2 = store.s2
495        self._xpxi = store.xpxi
496        self._xepxe = store.xeex
497        self._centered_tss = store.centered_tss
498        self._uncentered_tss = store.uncentered_tss
499        self._k_constant = k_constant
500        self._nvar = self._xpxi.shape[-1]
501        if use_t is None:
502            use_t = cov_type == "nonrobust"
503        self._use_t = use_t
504        self._cov_type = cov_type
505        self._use_pandas = self.model.data.row_labels is not None
506        self._data_attr = []
507        self._cache = {}
508
509    def _wrap(self, val):
510        """Wrap output as pandas Series or DataFrames as needed"""
511        if not self._use_pandas:
512            return val
513        col_names = self.model.data.param_names
514        row_names = self.model.data.row_labels
515        if val.ndim == 1:
516            return Series(val, index=row_names)
517        if val.ndim == 2:
518            return DataFrame(val, columns=col_names, index=row_names)
519        else:  # ndim == 3
520            mi = MultiIndex.from_product((row_names, col_names))
521            val = np.reshape(val, (-1, val.shape[-1]))
522            return DataFrame(val, columns=col_names, index=mi)
523
524    @cache_readonly
525    @Appender(get_cached_doc(RegressionResults.aic))
526    def aic(self):
527        return self._wrap(call_cached_func(RegressionResults.aic, self))
528
529    @cache_readonly
530    @Appender(get_cached_doc(RegressionResults.bic))
531    def bic(self):
532        with np.errstate(divide="ignore"):
533            return self._wrap(call_cached_func(RegressionResults.bic, self))
534
535    def info_criteria(self, crit, dk_params=0):
536        return self._wrap(
537            RegressionResults.info_criteria(self, crit, dk_params=dk_params)
538        )
539
540    @cache_readonly
541    def params(self):
542        """Estimated model parameters"""
543        return self._wrap(self._params)
544
545    @cache_readonly
546    @Appender(get_cached_doc(RegressionResults.ssr))
547    def ssr(self):
548        return self._wrap(self._ssr)
549
550    @cache_readonly
551    @Appender(get_cached_doc(RegressionResults.llf))
552    def llf(self):
553        return self._wrap(self._llf)
554
555    @cache_readonly
556    @Appender(RegressionModel.df_model.__doc__)
557    def df_model(self):
558        return self._nvar - self._k_constant
559
560    @cache_readonly
561    def k_constant(self):
562        """Flag indicating whether the model contains a constant"""
563        return self._k_constant
564
565    @cache_readonly
566    @Appender(get_cached_doc(RegressionResults.centered_tss))
567    def centered_tss(self):
568        return self._centered_tss
569
570    @cache_readonly
571    @Appender(get_cached_doc(RegressionResults.uncentered_tss))
572    def uncentered_tss(self):
573        return self._uncentered_tss
574
575    @cache_readonly
576    @Appender(get_cached_doc(RegressionResults.rsquared))
577    def rsquared(self):
578        return self._wrap(call_cached_func(RegressionResults.rsquared, self))
579
580    @cache_readonly
581    @Appender(get_cached_doc(RegressionResults.rsquared_adj))
582    def rsquared_adj(self):
583        return self._wrap(
584            call_cached_func(RegressionResults.rsquared_adj, self)
585        )
586
587    @cache_readonly
588    @Appender(get_cached_doc(RegressionResults.nobs))
589    def nobs(self):
590        return self._wrap(self._nobs)
591
592    @cache_readonly
593    @Appender(RegressionModel.df_resid.__doc__)
594    def df_resid(self):
595        return self._wrap(self._nobs - self.df_model - self._k_constant)
596
597    @cache_readonly
598    @Appender(RegressionResults.use_t.__doc__)
599    def use_t(self):
600        return self._use_t
601
602    @cache_readonly
603    @Appender(get_cached_doc(RegressionResults.ess))
604    def ess(self):
605        return self._wrap(call_cached_func(RegressionResults.ess, self))
606
607    @cache_readonly
608    @Appender(get_cached_doc(RegressionResults.mse_model))
609    def mse_model(self):
610        return self._wrap(call_cached_func(RegressionResults.mse_model, self))
611
612    @cache_readonly
613    @Appender(get_cached_doc(RegressionResults.mse_resid))
614    def mse_resid(self):
615        return self._wrap(call_cached_func(RegressionResults.mse_resid, self))
616
617    @cache_readonly
618    @Appender(get_cached_doc(RegressionResults.mse_total))
619    def mse_total(self):
620        return self._wrap(call_cached_func(RegressionResults.mse_total, self))
621
622    @cache_readonly
623    def _cov_params(self):
624        if self._cov_type == "nonrobust":
625            return self._s2[:, None, None] * self._xpxi
626        else:
627            return self._xpxi @ self._xepxe @ self._xpxi
628
629    def cov_params(self):
630        """
631        Estimated parameter covariance
632
633        Returns
634        -------
635        array_like
636            The estimated model covariances. If the original input is a numpy
637            array, the returned covariance is a 3-d array with shape
638            (nobs, nvar, nvar). If the original inputs are pandas types, then
639            the returned covariance is a DataFrame with a MultiIndex with
640            key (observation, variable), so that the covariance for
641            observation with index i is cov.loc[i].
642        """
643        return self._wrap(self._cov_params)
644
645    @cache_readonly
646    @Appender(get_cached_doc(RegressionResults.f_pvalue))
647    def f_pvalue(self):
648        with np.errstate(invalid="ignore"):
649            return self._wrap(
650                call_cached_func(RegressionResults.f_pvalue, self)
651            )
652
653    @cache_readonly
654    @Appender(get_cached_doc(RegressionResults.fvalue))
655    def fvalue(self):
656        if self._cov_type == "nonrobust":
657            return self.mse_model / self.mse_resid
658        else:
659            nobs = self._params.shape[0]
660            stat = np.full(nobs, np.nan)
661            k = self._params.shape[1]
662            r = np.eye(k)
663            locs = list(range(k))
664            if self.k_constant:
665                locs.pop(self.model.const_idx)
666            if not locs:
667                return stat
668            r = r[locs]
669            vcv = self._cov_params
670            rvcvr = r @ vcv @ r.T
671            p = self._params
672            for i in range(nobs):
673                rp = p[i : i + 1] @ r.T
674                denom = rp.shape[1]
675                inv_cov = np.linalg.inv(rvcvr[i])
676                stat[i] = (rp @ inv_cov @ rp.T) / denom
677            return stat
678
679    @cache_readonly
680    @Appender(get_cached_doc(RegressionResults.bse))
681    def bse(self):
682        with np.errstate(invalid="ignore"):
683            return self._wrap(np.sqrt(np.diagonal(self._cov_params, 0, 2)))
684
685    @cache_readonly
686    @Appender(get_cached_doc(LikelihoodModelResults.tvalues))
687    def tvalues(self):
688        with np.errstate(invalid="ignore"):
689            return self._wrap(
690                call_cached_func(LikelihoodModelResults.tvalues, self)
691            )
692
693    @cache_readonly
694    @Appender(get_cached_doc(LikelihoodModelResults.pvalues))
695    def pvalues(self):
696        if self.use_t:
697            df_resid = getattr(self, "df_resid_inference", self.df_resid)
698            df_resid = np.asarray(df_resid)[:, None]
699            with np.errstate(invalid="ignore"):
700                return stats.t.sf(np.abs(self.tvalues), df_resid) * 2
701        else:
702            with np.errstate(invalid="ignore"):
703                return stats.norm.sf(np.abs(self.tvalues)) * 2
704
705    def _conf_int(self, alpha, cols):
706        bse = np.asarray(self.bse)
707
708        if self.use_t:
709            dist = stats.t
710            df_resid = getattr(self, "df_resid_inference", self.df_resid)
711            df_resid = np.asarray(df_resid)[:, None]
712            q = dist.ppf(1 - alpha / 2, df_resid)
713        else:
714            dist = stats.norm
715            q = dist.ppf(1 - alpha / 2)
716
717        params = np.asarray(self.params)
718        lower = params - q * bse
719        upper = params + q * bse
720        if cols is not None:
721            cols = np.asarray(cols)
722            lower = lower[:, cols]
723            upper = upper[:, cols]
724        return np.asarray(list(zip(lower, upper)))
725
726    @Appender(LikelihoodModelResults.conf_int.__doc__)
727    def conf_int(self, alpha=0.05, cols=None):
728        ci = self._conf_int(alpha, cols)
729        if not self._use_pandas:
730            return ci
731        ci_names = ("lower", "upper")
732        row_names = self.model.data.row_labels
733        col_names = self.model.data.param_names
734        if cols is not None:
735            col_names = [col_names[i] for i in cols]
736        mi = MultiIndex.from_product((col_names, ci_names))
737        ci = np.reshape(np.swapaxes(ci, 1, 2), (ci.shape[0], -1))
738        return DataFrame(ci, columns=mi, index=row_names)
739
740    @property
741    def cov_type(self):
742        """Name of covariance estimator"""
743        return self._cov_type
744
745    @classmethod
746    @Appender(LikelihoodModelResults.load.__doc__)
747    def load(cls, fname):
748        return LikelihoodModelResults.load(fname)
749
750    remove_data = LikelihoodModelResults.remove_data
751
752    @Appender(LikelihoodModelResults.save.__doc__)
753    def save(self, fname, remove_data=False):
754        return LikelihoodModelResults.save(self, fname, remove_data)
755
756    def plot_recursive_coefficient(
757        self,
758        variables=None,
759        alpha=0.05,
760        legend_loc="upper left",
761        fig=None,
762        figsize=None,
763    ):
764        r"""
765        Plot the recursively estimated coefficients on a given variable
766
767        Parameters
768        ----------
769        variables : {int, str, Iterable[int], Iterable[str], None}, optional
770            Integer index or string name of the variables whose coefficients
771            to plot. Can also be an iterable of integers or strings. Default
772            plots all coefficients.
773        alpha : float, optional
774            The confidence intervals for the coefficient are (1 - alpha)%. Set
775            to None to exclude confidence intervals.
776        legend_loc : str, optional
777            The location of the legend in the plot. Default is upper left.
778        fig : Figure, optional
779            If given, subplots are created in this figure instead of in a new
780            figure. Note that the grid will be created in the provided
781            figure using `fig.add_subplot()`.
782        figsize : tuple, optional
783            If a figure is created, this argument allows specifying a size.
784            The tuple is (width, height).
785
786        Returns
787        -------
788        Figure
789            The matplotlib Figure object.
790        """
791        from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
792
793        if alpha is not None:
794            ci = self._conf_int(alpha, None)
795
796        row_labels = self.model.data.row_labels
797        if row_labels is None:
798            row_labels = np.arange(self._params.shape[0])
799        k_variables = self._params.shape[1]
800        param_names = self.model.data.param_names
801        if variables is None:
802            variable_idx = list(range(k_variables))
803        else:
804            if isinstance(variables, (int, str)):
805                variables = [variables]
806            variable_idx = []
807            for i in range(len(variables)):
808                variable = variables[i]
809                if variable in param_names:
810                    variable_idx.append(param_names.index(variable))
811                elif isinstance(variable, int):
812                    variable_idx.append(variable)
813                else:
814                    msg = (
815                        "variable {0} is not an integer and was not found "
816                        "in the list of variable "
817                        "names: {1}".format(
818                            variables[i], ", ".join(param_names)
819                        )
820                    )
821                    raise ValueError(msg)
822
823        _import_mpl()
824        fig = create_mpl_fig(fig, figsize)
825
826        loc = 0
827        import pandas as pd
828
829        if isinstance(row_labels, pd.PeriodIndex):
830            row_labels = row_labels.to_timestamp()
831        row_labels = np.asarray(row_labels)
832        for i in variable_idx:
833            ax = fig.add_subplot(len(variable_idx), 1, loc + 1)
834            params = self._params[:, i]
835            valid = ~np.isnan(self._params[:, i])
836            row_lbl = row_labels[valid]
837            ax.plot(row_lbl, params[valid])
838            if alpha is not None:
839                this_ci = np.reshape(ci[:, :, i], (-1, 2))
840                if not np.all(np.isnan(this_ci)):
841                    ax.plot(
842                        row_lbl, this_ci[:, 0][valid], "k:", label="Lower CI"
843                    )
844                    ax.plot(
845                        row_lbl, this_ci[:, 1][valid], "k:", label="Upper CI"
846                    )
847                    if loc == 0:
848                        ax.legend(loc=legend_loc)
849            ax.set_xlim(row_lbl[0], row_lbl[-1])
850            ax.set_title(param_names[i])
851            loc += 1
852
853        fig.tight_layout()
854        return fig
855