1import numpy as np
2from numpy.testing import assert_, assert_allclose, assert_raises
3import pytest
4
5import statsmodels.datasets.macrodata.data as macro
6from statsmodels.tsa.vector_ar.tests.JMulTi_results.parse_jmulti_vecm_output import (
7    sublists,
8)
9from statsmodels.tsa.vector_ar.var_model import VAR
10
11from .JMulTi_results.parse_jmulti_var_output import (
12    dt_s_tup_to_string,
13    load_results_jmulti,
14)
15
16atol = 0.001  # absolute tolerance
17rtol = 0.01  # relative tolerance
18datasets = []
19data = {}
20results_ref = {}
21results_sm = {}
22
23debug_mode = False
24dont_test_se_t_p = False
25deterministic_terms_list = ["nc", "c", "ct"]
26seasonal_list = [0, 4]
27dt_s_list = [
28    (det, s) for det in deterministic_terms_list for s in seasonal_list
29]
30all_tests = [
31    "coefs",
32    "det",
33    "Sigma_u",
34    "log_like",
35    "fc",
36    "causality",
37    "impulse-response",
38    "lag order",
39    "test normality",
40    "whiteness",
41    "exceptions",
42]
43to_test = all_tests  # ["coefs", "det", "Sigma_u", "log_like", "fc", "causality"]  # all_tests
44
45
46def load_data(dataset, data_dict):
47    dtset = dataset.load_pandas()
48    variables = dataset.variable_names
49    loaded = dtset.data[variables].astype(float).values
50    data_dict[dataset] = loaded.reshape((-1, len(variables)))
51
52
53def reorder_jmultis_det_terms(jmulti_output, constant, seasons):
54    """
55    In case of seasonal terms and a trend term we have to reorder them to make
56    the outputs from JMulTi and statsmodels comparable.
57    JMulTi's ordering is: [constant], [seasonal terms], [trend term] while
58    in statsmodels it is: [constant], [trend term], [seasonal terms]
59
60    Parameters
61    ----------
62    jmulti_output : ndarray (neqs x number_of_deterministic_terms)
63
64    constant : bool
65        Indicates whether there is a constant term or not in jmulti_output.
66    seasons : int
67        Number of seasons in the model. That means there are seasons-1
68        columns for seasonal terms in jmulti_output
69
70    Returns
71    -------
72    reordered : ndarray (neqs x number_of_deterministic_terms)
73        jmulti_output reordered such that the order of deterministic terms
74        matches that of statsmodels.
75    """
76    if seasons == 0:
77        return jmulti_output
78    constant = int(constant)
79    const_column = jmulti_output[:, :constant]
80    season_columns = jmulti_output[:, constant : constant + seasons - 1].copy()
81    trend_columns = jmulti_output[:, constant + seasons - 1 :].copy()
82    return np.hstack((const_column, trend_columns, season_columns))
83
84
85def generate_exog_from_season(seasons, endog_len):
86    """
87    Translate seasons to exog matrix.
88
89    Parameters
90    ----------
91    seasons : int
92        Number of seasons.
93    endog_len : int
94        Number of observations.
95
96    Returns
97    -------
98    exog : ndarray or None
99        If seasonal deterministic terms exist, the corresponding exog-matrix is
100        returned.
101        Otherwise, None is returned.
102    """
103
104    exog_stack = []
105    if seasons > 0:
106        season_exog = np.zeros((seasons - 1, endog_len))
107        for i in range(seasons - 1):
108            season_exog[i, i::seasons] = 1
109        # season_exog = season_exog[:, ::-1]
110        # season_exog = np.hstack((season_exog[:, 3:4],
111        #   season_exog[:, :-1]))
112        # season_exog = np.hstack((season_exog[:, 2:4],
113        #                          season_exog[:, :-2]))
114        # season_exog = np.hstack((season_exog[:, 1:4], season_exog[:, :-3]))
115        # season_exog[1] = -season_exog[1]
116        # the following line is commented out because seasonal terms are
117        # *not* centered in JMulTi's VAR-framework (in contrast to VECM)
118        # season_exog -= 1 / seasons
119        season_exog = season_exog.T
120        exog_stack.append(season_exog)
121    if exog_stack != []:
122        exog = np.column_stack(exog_stack)
123    else:
124        exog = None
125    return exog
126
127
128def load_results_statsmodels(dataset):
129    results_per_deterministic_terms = dict.fromkeys(dt_s_list)
130    for dt_s_tup in dt_s_list:
131        endog = data[dataset]
132        exog = generate_exog_from_season(dt_s_tup[1], len(endog))
133        warn_typ = FutureWarning if dt_s_tup[0] == "nc" else None
134
135        model = VAR(endog, exog)
136        with pytest.warns(warn_typ):
137            results_per_deterministic_terms[dt_s_tup] = model.fit(
138                maxlags=4, trend=dt_s_tup[0], method="ols"
139            )
140    return results_per_deterministic_terms
141
142
143def build_err_msg(ds, dt_s, parameter_str):
144    dt = dt_s_tup_to_string(dt_s)
145    seasons = dt_s[1]
146    err_msg = "Error in " + parameter_str + " for:\n"
147    err_msg += "- Dataset: " + ds.__str__() + "\n"
148    err_msg += "- Deterministic terms: "
149    err_msg += dt_s[0] if dt != "nc" else "no det. terms"
150    if seasons > 0:
151        err_msg += ", seasons: " + str(seasons)
152    return err_msg
153
154
155def setup():
156    datasets.append(macro)  # TODO: append more data sets for more test cases.
157
158    for ds in datasets:
159        load_data(ds, data)
160        results_ref[ds] = load_results_jmulti(ds, dt_s_list)
161        results_sm[ds] = load_results_statsmodels(ds)
162
163
164setup()
165
166
167def test_ols_coefs():
168    if debug_mode:
169        if "coefs" not in to_test:
170            return
171        print("\n\nESTIMATED PARAMETER MATRICES FOR LAGGED ENDOG", end="")
172    for ds in datasets:
173        for dt_s in dt_s_list:
174            if debug_mode:
175                print("\n" + dt_s_tup_to_string(dt_s) + ": ", end="")
176
177            # estimated parameter vector
178            err_msg = build_err_msg(ds, dt_s, "PARAMETER MATRICES ENDOG")
179            obtained = np.hstack(results_sm[ds][dt_s].coefs)
180            desired = results_ref[ds][dt_s]["est"]["Lagged endogenous term"]
181            assert_allclose(obtained, desired, rtol, atol, False, err_msg)
182            if debug_mode and dont_test_se_t_p:
183                continue
184            # standard errors
185            obt = results_sm[ds][dt_s].stderr_endog_lagged
186            des = results_ref[ds][dt_s]["se"]["Lagged endogenous term"].T
187            assert_allclose(
188                obt, des, rtol, atol, False, "STANDARD ERRORS\n" + err_msg
189            )
190            # t-values
191            obt = results_sm[ds][dt_s].tvalues_endog_lagged
192            des = results_ref[ds][dt_s]["t"]["Lagged endogenous term"].T
193            assert_allclose(
194                obt, des, rtol, atol, False, "t-VALUES\n" + err_msg
195            )
196            # p-values
197            obt = results_sm[ds][dt_s].pvalues_endog_lagged
198            des = results_ref[ds][dt_s]["p"]["Lagged endogenous term"].T
199            assert_allclose(
200                obt, des, rtol, atol, False, "p-VALUES\n" + err_msg
201            )
202
203
204def test_ols_det_terms():
205    if debug_mode:
206        if "det" not in to_test:
207            return
208        print("\n\nESTIMATED PARAMETERS FOR DETERMINISTIC TERMS", end="")
209    for ds in datasets:
210        for dt_s in dt_s_list:
211            if debug_mode:
212                print("\n" + dt_s_tup_to_string(dt_s) + ": ", end="")
213
214            err_msg = build_err_msg(ds, dt_s, "PARAMETER MATRICES EXOG")
215            det_key_ref = "Deterministic term"
216            # If there are no det. terms, just make sure we do not compute any:
217            if det_key_ref not in results_ref[ds][dt_s]["est"].keys():
218                assert_(
219                    (
220                        results_sm[ds][dt_s].coefs_exog.size == 0
221                        and results_sm[ds][dt_s].stderr_dt.size == 0
222                        and results_sm[ds][dt_s].tvalues_dt.size == 0
223                        and results_sm[ds][dt_s].pvalues_dt.size == 0
224                    ),
225                    err_msg,
226                )
227                continue
228            obtained = results_sm[ds][dt_s].coefs_exog
229            desired = results_ref[ds][dt_s]["est"][det_key_ref]
230            desired = reorder_jmultis_det_terms(
231                desired, dt_s[0].startswith("c"), dt_s[1]
232            )
233            assert_allclose(obtained, desired, rtol, atol, False, err_msg)
234            if debug_mode and dont_test_se_t_p:
235                continue
236            # standard errors
237            obt = results_sm[ds][dt_s].stderr_dt
238            des = results_ref[ds][dt_s]["se"][det_key_ref]
239            des = reorder_jmultis_det_terms(
240                des, dt_s[0].startswith("c"), dt_s[1]
241            ).T
242            assert_allclose(
243                obt, des, rtol, atol, False, "STANDARD ERRORS\n" + err_msg
244            )
245            # t-values
246            obt = results_sm[ds][dt_s].tvalues_dt
247            des = results_ref[ds][dt_s]["t"][det_key_ref]
248            des = reorder_jmultis_det_terms(
249                des, dt_s[0].startswith("c"), dt_s[1]
250            ).T
251            assert_allclose(
252                obt, des, rtol, atol, False, "t-VALUES\n" + err_msg
253            )
254            # p-values
255            obt = results_sm[ds][dt_s].pvalues_dt
256            des = results_ref[ds][dt_s]["p"][det_key_ref]
257            des = reorder_jmultis_det_terms(
258                des, dt_s[0].startswith("c"), dt_s[1]
259            ).T
260            assert_allclose(
261                obt, des, rtol, atol, False, "p-VALUES\n" + err_msg
262            )
263
264
265def test_ols_sigma():
266    if debug_mode:
267        if "Sigma_u" not in to_test:
268            return
269        print("\n\nSIGMA_U", end="")
270    for ds in datasets:
271        for dt in dt_s_list:
272            if debug_mode:
273                print("\n" + dt_s_tup_to_string(dt) + ": ", end="")
274
275            err_msg = build_err_msg(ds, dt, "Sigma_u")
276            obtained = results_sm[ds][dt].sigma_u
277            desired = results_ref[ds][dt]["est"]["Sigma_u"]
278            assert_allclose(obtained, desired, rtol, atol, False, err_msg)
279
280
281def test_log_like():
282    if debug_mode:
283        if "log_like" not in to_test:
284            return
285        else:
286            print("\n\nLOG LIKELIHOOD", end="")
287    for ds in datasets:
288        for dt in dt_s_list:
289            if debug_mode:
290                print("\n" + dt_s_tup_to_string(dt) + ": ", end="")
291
292            err_msg = build_err_msg(ds, dt, "Log Likelihood")
293            obtained = results_sm[ds][dt].llf
294            desired = results_ref[ds][dt]["log_like"]
295            assert_allclose(obtained, desired, rtol, atol, False, err_msg)
296
297
298def test_fc():
299    if debug_mode:
300        if "fc" not in to_test:
301            return
302        else:
303            print("\n\nFORECAST", end="")
304    for ds in datasets:
305        for dt in dt_s_list:
306            if debug_mode:
307                print("\n" + dt_s_tup_to_string(dt) + ": ", end="")
308            steps = 5  # parsed JMulTi output comprises 5 steps
309            last_observations = results_sm[ds][dt].endog[
310                -results_sm[ds][dt].k_ar :
311            ]
312            seasons = dt[1]
313            if seasons == 0:
314                exog_future = None
315            else:
316                exog_future = np.zeros((steps, seasons - 1))
317                # the following line is appropriate only if the last
318                # observation was in the next to last season (this is the case
319                # for macrodata)
320                exog_future[1:seasons] = np.identity(seasons - 1)
321            err_msg = build_err_msg(ds, dt, "FORECAST")
322            # test point forecast functionality of forecast method
323            obtained = results_sm[ds][dt].forecast(
324                y=last_observations, steps=steps, exog_future=exog_future
325            )
326            desired = results_ref[ds][dt]["fc"]["fc"]
327            assert_allclose(obtained, desired, rtol, atol, False, err_msg)
328
329            # test forecast method with confidence interval calculation
330            err_msg = build_err_msg(ds, dt, "FORECAST WITH INTERVALS")
331            obtained = results_sm[ds][dt].forecast_interval(
332                y=last_observations,
333                steps=steps,
334                alpha=0.05,
335                exog_future=exog_future,
336            )
337            obt = obtained[0]  # forecast
338            obt_l = obtained[1]  # lower bound
339            obt_u = obtained[2]  # upper bound
340            des = results_ref[ds][dt]["fc"]["fc"]
341            des_l = results_ref[ds][dt]["fc"]["lower"]
342            des_u = results_ref[ds][dt]["fc"]["upper"]
343            assert_allclose(obt, des, rtol, atol, False, err_msg)
344            assert_allclose(obt_l, des_l, rtol, atol, False, err_msg)
345            assert_allclose(obt_u, des_u, rtol, atol, False, err_msg)
346
347
348def test_causality():  # test Granger- and instantaneous causality
349    if debug_mode:
350        if "causality" not in to_test:
351            return
352        else:
353            print("\n\nCAUSALITY", end="")
354    for ds in datasets:
355        for dt in dt_s_list:
356            if debug_mode:
357                print("\n" + dt_s_tup_to_string(dt) + ": ", end="")
358
359            err_msg_g_p = build_err_msg(ds, dt, "GRANGER CAUS. - p-VALUE")
360            err_msg_g_t = build_err_msg(ds, dt, "GRANGER CAUS. - TEST STAT.")
361            err_msg_i_p = build_err_msg(ds, dt, "INSTANT. CAUS. - p-VALUE")
362            err_msg_i_t = build_err_msg(ds, dt, "INSTANT. CAUS. - TEST STAT.")
363            v_ind = range(len(ds.variable_names))
364            for causing_ind in sublists(v_ind, 1, len(v_ind) - 1):
365                causing_names = ["y" + str(i + 1) for i in causing_ind]
366                causing_key = tuple(ds.variable_names[i] for i in causing_ind)
367
368                caused_ind = [i for i in v_ind if i not in causing_ind]
369                caused_names = ["y" + str(i + 1) for i in caused_ind]
370                caused_key = tuple(ds.variable_names[i] for i in caused_ind)
371
372                # test Granger-causality ######################################
373                granger_sm_ind = results_sm[ds][dt].test_causality(
374                    caused_ind, causing_ind
375                )
376                granger_sm_str = results_sm[ds][dt].test_causality(
377                    caused_names, causing_names
378                )
379
380                # test test-statistic for Granger non-causality:
381                g_t_obt = granger_sm_ind.test_statistic
382                g_t_des = results_ref[ds][dt]["granger_caus"]["test_stat"][
383                    (causing_key, caused_key)
384                ]
385                assert_allclose(
386                    g_t_obt, g_t_des, rtol, atol, False, err_msg_g_t
387                )
388                # check whether string sequences as args work in the same way:
389                g_t_obt_str = granger_sm_str.test_statistic
390                assert_allclose(
391                    g_t_obt_str,
392                    g_t_obt,
393                    1e-07,
394                    0,
395                    False,
396                    err_msg_g_t
397                    + " - sequences of integers and ".upper()
398                    + "strings as arguments do not yield the same result!".upper(),
399                )
400                # check if int (e.g. 0) as index and list of int ([0]) yield
401                # the same result:
402                if len(causing_ind) == 1 or len(caused_ind) == 1:
403                    ci = (
404                        causing_ind[0]
405                        if len(causing_ind) == 1
406                        else causing_ind
407                    )
408                    ce = caused_ind[0] if len(caused_ind) == 1 else caused_ind
409                    granger_sm_single_ind = results_sm[ds][dt].test_causality(
410                        ce, ci
411                    )
412                    g_t_obt_single = granger_sm_single_ind.test_statistic
413                    assert_allclose(
414                        g_t_obt_single,
415                        g_t_obt,
416                        1e-07,
417                        0,
418                        False,
419                        err_msg_g_t
420                        + " - list of int and int as ".upper()
421                        + "argument do not yield the same result!".upper(),
422                    )
423
424                # test p-value for Granger non-causality:
425                g_p_obt = granger_sm_ind.pvalue
426                g_p_des = results_ref[ds][dt]["granger_caus"]["p"][
427                    (causing_key, caused_key)
428                ]
429                assert_allclose(
430                    g_p_obt, g_p_des, rtol, atol, False, err_msg_g_p
431                )
432                # check whether string sequences as args work in the same way:
433                g_p_obt_str = granger_sm_str.pvalue
434                assert_allclose(
435                    g_p_obt_str,
436                    g_p_obt,
437                    1e-07,
438                    0,
439                    False,
440                    err_msg_g_t
441                    + " - sequences of integers and ".upper()
442                    + "strings as arguments do not yield the same result!".upper(),
443                )
444                # check if int (e.g. 0) as index and list of int ([0]) yield
445                # the same result:
446                if len(causing_ind) == 1:
447                    g_p_obt_single = granger_sm_single_ind.pvalue
448                    assert_allclose(
449                        g_p_obt_single,
450                        g_p_obt,
451                        1e-07,
452                        0,
453                        False,
454                        err_msg_g_t
455                        + " - list of int and int as ".upper()
456                        + "argument do not yield the same result!".upper(),
457                    )
458
459                # test instantaneous causality ################################
460                inst_sm_ind = results_sm[ds][dt].test_inst_causality(
461                    causing_ind
462                )
463                inst_sm_str = results_sm[ds][dt].test_inst_causality(
464                    causing_names
465                )
466                # test test-statistic for instantaneous non-causality
467                t_obt = inst_sm_ind.test_statistic
468                t_des = results_ref[ds][dt]["inst_caus"]["test_stat"][
469                    (causing_key, caused_key)
470                ]
471                assert_allclose(t_obt, t_des, rtol, atol, False, err_msg_i_t)
472                # check whether string sequences as args work in the same way:
473                t_obt_str = inst_sm_str.test_statistic
474                assert_allclose(
475                    t_obt_str,
476                    t_obt,
477                    1e-07,
478                    0,
479                    False,
480                    err_msg_i_t
481                    + " - sequences of integers and ".upper()
482                    + "strings as arguments do not yield the same result!".upper(),
483                )
484                # check if int (e.g. 0) as index and list of int ([0]) yield
485                # the same result:
486                if len(causing_ind) == 1:
487                    inst_sm_single_ind = results_sm[ds][
488                        dt
489                    ].test_inst_causality(causing_ind[0])
490                    t_obt_single = inst_sm_single_ind.test_statistic
491                    assert_allclose(
492                        t_obt_single,
493                        t_obt,
494                        1e-07,
495                        0,
496                        False,
497                        err_msg_i_t
498                        + " - list of int and int as ".upper()
499                        + "argument do not yield the same result!".upper(),
500                    )
501
502                # test p-value for instantaneous non-causality
503                p_obt = (
504                    results_sm[ds][dt].test_inst_causality(causing_ind).pvalue
505                )
506                p_des = results_ref[ds][dt]["inst_caus"]["p"][
507                    (causing_key, caused_key)
508                ]
509                assert_allclose(p_obt, p_des, rtol, atol, False, err_msg_i_p)
510                # check whether string sequences as args work in the same way:
511                p_obt_str = inst_sm_str.pvalue
512                assert_allclose(
513                    p_obt_str,
514                    p_obt,
515                    1e-07,
516                    0,
517                    False,
518                    err_msg_i_p
519                    + " - sequences of integers and ".upper()
520                    + "strings as arguments do not yield the same result!".upper(),
521                )
522                # check if int (e.g. 0) as index and list of int ([0]) yield
523                # the same result:
524                if len(causing_ind) == 1:
525                    inst_sm_single_ind = results_sm[ds][
526                        dt
527                    ].test_inst_causality(causing_ind[0])
528                    p_obt_single = inst_sm_single_ind.pvalue
529                    assert_allclose(
530                        p_obt_single,
531                        p_obt,
532                        1e-07,
533                        0,
534                        False,
535                        err_msg_i_p
536                        + " - list of int and int as ".upper()
537                        + "argument do not yield the same result!".upper(),
538                    )
539
540
541def test_impulse_response():
542    if debug_mode:
543        if "impulse-response" not in to_test:
544            return
545        else:
546            print("\n\nIMPULSE-RESPONSE", end="")
547    for ds in datasets:
548        for dt in dt_s_list:
549            if debug_mode:
550                print("\n" + dt_s_tup_to_string(dt) + ": ", end="")
551            err_msg = build_err_msg(ds, dt, "IMULSE-RESPONSE")
552            periods = 20
553            obtained_all = results_sm[ds][dt].irf(periods=periods).irfs
554            # flatten inner arrays to make them comparable to parsed results:
555            obtained_all = obtained_all.reshape(periods + 1, -1)
556            desired_all = results_ref[ds][dt]["ir"]
557            assert_allclose(
558                obtained_all, desired_all, rtol, atol, False, err_msg
559            )
560
561
562def test_lag_order_selection():
563    if debug_mode:
564        if "lag order" not in to_test:
565            return
566        else:
567            print("\n\nLAG ORDER SELECTION", end="")
568    for ds in datasets:
569        for dt in dt_s_list:
570            if debug_mode:
571                print("\n" + dt_s_tup_to_string(dt) + ": ", end="")
572            endog_tot = data[ds]
573            exog = generate_exog_from_season(dt[1], len(endog_tot))
574            model = VAR(endog_tot, exog)
575            warn_typ = FutureWarning if dt[0] == "nc" else None
576            with pytest.warns(warn_typ):
577                obtained_all = model.select_order(10, trend=dt[0])
578            for ic in ["aic", "fpe", "hqic", "bic"]:
579                err_msg = build_err_msg(
580                    ds, dt, "LAG ORDER SELECTION - " + ic.upper()
581                )
582                obtained = getattr(obtained_all, ic)
583                desired = results_ref[ds][dt]["lagorder"][ic]
584                assert_allclose(obtained, desired, rtol, atol, False, err_msg)
585
586
587def test_normality():
588    if debug_mode:
589        if "test normality" not in to_test:
590            return
591        else:
592            print("\n\nTEST NON-NORMALITY", end="")
593    for ds in datasets:
594        for dt in dt_s_list:
595            if debug_mode:
596                print("\n" + dt_s_tup_to_string(dt) + ": ", end="")
597
598            obtained = results_sm[ds][dt].test_normality(signif=0.05)
599            err_msg = build_err_msg(ds, dt, "TEST NON-NORMALITY - STATISTIC")
600            obt_statistic = obtained.test_statistic
601            des_statistic = results_ref[ds][dt]["test_norm"][
602                "joint_test_statistic"
603            ]
604            assert_allclose(
605                obt_statistic, des_statistic, rtol, atol, False, err_msg
606            )
607            err_msg = build_err_msg(ds, dt, "TEST NON-NORMALITY - P-VALUE")
608            obt_pvalue = obtained.pvalue
609            des_pvalue = results_ref[ds][dt]["test_norm"]["joint_pvalue"]
610            assert_allclose(obt_pvalue, des_pvalue, rtol, atol, False, err_msg)
611            # call methods to assure they do not raise exceptions
612            obtained.summary()
613            str(obtained)  # __str__()
614
615
616def test_whiteness():
617    if debug_mode:
618        if "whiteness" not in to_test:
619            return
620        else:
621            print("\n\nTEST WHITENESS OF RESIDUALS", end="")
622    for ds in datasets:
623        for dt in dt_s_list:
624            if debug_mode:
625                print("\n" + dt_s_tup_to_string(dt) + ": ", end="")
626            lags = results_ref[ds][dt]["whiteness"]["tested order"]
627
628            obtained = results_sm[ds][dt].test_whiteness(nlags=lags)
629            # test statistic
630            err_msg = build_err_msg(
631                ds, dt, "WHITENESS OF RESIDUALS - " "TEST STATISTIC"
632            )
633            desired = results_ref[ds][dt]["whiteness"]["test statistic"]
634            assert_allclose(
635                obtained.test_statistic, desired, rtol, atol, False, err_msg
636            )
637            # p-value
638            err_msg = build_err_msg(
639                ds, dt, "WHITENESS OF RESIDUALS - " "P-VALUE"
640            )
641            desired = results_ref[ds][dt]["whiteness"]["p-value"]
642            assert_allclose(
643                obtained.pvalue, desired, rtol, atol, False, err_msg
644            )
645
646            obtained = results_sm[ds][dt].test_whiteness(
647                nlags=lags, adjusted=True
648            )
649            # test statistic (adjusted Portmanteau test)
650            err_msg = build_err_msg(
651                ds,
652                dt,
653                "WHITENESS OF RESIDUALS - " "TEST STATISTIC (ADJUSTED TEST)",
654            )
655            desired = results_ref[ds][dt]["whiteness"]["test statistic adj."]
656            assert_allclose(
657                obtained.test_statistic, desired, rtol, atol, False, err_msg
658            )
659            # p-value (adjusted Portmanteau test)
660            err_msg = build_err_msg(
661                ds, dt, "WHITENESS OF RESIDUALS - " "P-VALUE (ADJUSTED TEST)"
662            )
663            desired = results_ref[ds][dt]["whiteness"]["p-value adjusted"]
664            assert_allclose(
665                obtained.pvalue, desired, rtol, atol, False, err_msg
666            )
667
668
669def test_exceptions():
670    if debug_mode:
671        if "exceptions" not in to_test:
672            return
673        else:
674            print("\n\nEXCEPTIONS\n", end="")
675    for ds in datasets:
676        for dt in dt_s_list:
677            if debug_mode:
678                print("\n" + dt_s_tup_to_string(dt) + ": ", end="")
679
680            # instant causality:
681            ### 0<signif<1
682            assert_raises(
683                ValueError, results_sm[ds][dt].test_inst_causality, 0, 0
684            )  # this means signif=0
685            ### causing must be int, str or iterable of int or str
686            assert_raises(
687                TypeError, results_sm[ds][dt].test_inst_causality, [0.5]
688            )  # 0.5 not an int
689