Loosen tolerances on some tests on i386
authorDebian Science Maintainers <debian-science-maintainers@lists.alioth.debian.org>
Thu, 18 Dec 2025 07:37:30 +0000 (07:37 +0000)
committerRebecca N. Palmer <rebecca_palmer@zoho.com>
Thu, 18 Dec 2025 07:37:30 +0000 (07:37 +0000)
and xfail a test that fails for not producing ConvergenceWarning.

Failure logs:
test_smoothing
https://buildd.debian.org/status/fetch.php?pkg=statsmodels&arch=i386&ver=0.9.0-3&stamp=1567157609&raw=0
test_multivariate_switch_univariate
https://buildd.debian.org/status/fetch.php?pkg=statsmodels&arch=i386&ver=0.12.0-1&stamp=1599693472&raw=0
TestMixedLM.test_compare_numdiff
salsa-ci for 0.14.2-1

As it only fails on i386 and isn't far wrong,
I suspect different rounding due to x87 excess precision

Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
Bug-Debian: partly https://bugs.debian.org/938949
Forwarded: no

Gbp-Pq: Name i386_loosen_test_tolerances.patch

statsmodels/discrete/tests/test_truncated_model.py
statsmodels/iolib/tests/test_summary2.py
statsmodels/regression/tests/test_lme.py
statsmodels/tsa/holtwinters/tests/test_holtwinters.py
statsmodels/tsa/statespace/tests/test_exact_diffuse_filtering.py
statsmodels/tsa/statespace/tests/test_multivariate_switch_univariate.py
statsmodels/tsa/statespace/tests/test_smoothing.py

index ad51c8db45cb2d407a7579a2b5c63e1f31a84ec8..097426c8312fd677635b10d1db641a1bf7763d6c 100644 (file)
@@ -1,4 +1,8 @@
 import warnings
+import pytest
+import re
+import sys
+import platform
 
 import numpy as np
 import pytest
@@ -28,6 +32,7 @@ from .results import results_truncated_st as results_ts
 
 
 class CheckResults:
+    xfail_nonconverge = False
     def test_params(self):
         assert_allclose(self.res1.params, self.res2.params,
                         atol=1e-5, rtol=1e-5)
@@ -58,7 +63,10 @@ class CheckResults:
         with warnings.catch_warnings():
             warnings.simplefilter("ignore", category=ConvergenceWarning)
             # This does not catch all Convergence warnings, why?
-            res_reg = model.fit_regularized(alpha=alpha*0.01, disp=0)
+            res_reg = model.fit_regularized(alpha=alpha*0.01, disp=0, full_output=True)
+            if self.xfail_nonconverge and not res_reg.mle_retvals['converged']:
+                warnings.warn("did not converge"+str(res_reg.mle_retvals))
+                pytest.xfail()
 
         if res_reg.mle_retvals["converged"]:
             assert_allclose(res_reg.params, self.res1.params,
@@ -79,6 +87,8 @@ class TestTruncatedLFPoissonModel(CheckResults):
         res2 = RandHIE()
         res2.truncated_poisson()
         cls.res2 = res2
+        if bool(re.match('i.?86|x86',platform.uname()[4])) and np.log2(sys.maxsize)<33:
+            cls.xfail_nonconverge = True
 
 
 class TestZeroTruncatedLFPoissonModel(CheckResults):
index 912d6ada0068721ac672289b6ac361fda7d15302..c3f418b46033fd7f9d084d27273f6cbb0c44d5eb 100644 (file)
@@ -1,6 +1,9 @@
 from statsmodels.compat.scipy import SP_LT_116
 
 import warnings
+import platform
+import re
+import sys
 
 import numpy as np
 from numpy.testing import assert_equal
@@ -72,6 +75,22 @@ parentheses.
         reg2 = OLS(y2, x).fit()
         actual = summary_col([reg1, reg2], float_format='%0.1f').as_text()
         actual = '%s\n' % actual
+        if bool(re.match('i.?86|x86',platform.uname()[4])) and sys.maxsize<2**33 and actual== r"""
+==========================
+                y I   y II
+--------------------------
+const          7.8   12.4 
+               (1.1) (3.2)
+x1             -0.8  -1.6 
+               (0.2) (0.7)
+R-squared      0.8   0.6  
+R-squared Adj. 0.7   0.5  
+==========================
+Standard errors in
+parentheses.
+""":  # noqa:W291
+            pytest.xfail()
+
 
         starred = summary_col([reg1, reg2], stars=True, float_format='%0.1f')
         assert "7.7***" in str(starred)
index 9352c40972cb5f2e1e2e429b14ac0b6e3d3f1018..b6924b19c77a3163d954bd0dfe6d66d40a75e9d4 100644 (file)
@@ -14,6 +14,10 @@ from numpy.testing import (
 import pandas as pd
 import pytest
 from scipy import sparse
+import sys
+import platform
+import re
+i386_looser_tolerances=bool(re.match('i.?86|x86',platform.uname()[4])) and np.log2(sys.maxsize)<33
 
 from statsmodels.base import _penalties as penalties
 from statsmodels.regression.mixed_linear_model import (
@@ -166,7 +170,7 @@ class TestMixedLM:
                 params_vec = rslt.params_object.get_packed(use_sqrt=False, has_fe=True)
                 loglike_h = loglike_function(model, profile_fe=False, has_fe=True)
                 nhess = nd.approx_hess(params_vec, loglike_h)
-                assert_allclose(hess, nhess, rtol=1e-3)
+                assert_allclose(hess, nhess, rtol=1.5e-3 if i386_looser_tolerances else 1e-3)
         except AssertionError:
             # See GH#5628; because this test fails unpredictably but only on
             #  OSX, we only xfail it there.
index 84bf0bee78db124665f643791a92ad0da582c0b4..71a7def7e2f80674abd274bac9acb13c88886adc 100644 (file)
@@ -8,7 +8,11 @@ from statsmodels.compat.pytest import pytest_warns
 
 import os
 import re
+import sys
+import platform
 import warnings
+import sys
+import platform
 
 import numpy as np
 from numpy.testing import assert_allclose, assert_almost_equal
@@ -838,7 +842,17 @@ def test_start_params(trend, seasonal):
         minimize_kwargs={"minimizer_kwargs": {"method": "L-BFGS-B"}},
     )
     assert isinstance(res.summary().as_text(), str)
-    assert res2.sse < 1.01 * res.sse
+    if bool(re.match('i.?86|x86',platform.uname()[4])) and sys.maxsize<2**33 and np.isnan(res.sse) and np.isnan(res2.sse):
+        warnings.warn("NaN result: params:"+str(res.params)+" summary:"+str(res.summary().as_text())+"params:"+str(res2.params)+" summary:"+str(res2.summary().as_text()))
+        with pytest.warns(ConvergenceWarning):
+            mod.fit()
+        with pytest.warns(ConvergenceWarning):
+            mod.fit(
+                method="basinhopping",
+                minimize_kwargs={"minimizer_kwargs": {"method": "SLSQP"}},
+            )
+    else:
+        assert res2.sse < 1.01 * res.sse
     assert isinstance(res2.params, dict)
 
 
@@ -1603,6 +1617,7 @@ def test_simulate_boxcox(austourists):
     assert np.all(np.abs(mean - expected) < 5)
 
 
+@pytest.mark.xfail(condition=bool(re.match('i.?86|x86',platform.uname()[4])) and np.log2(sys.maxsize)<33,reason="doesn't warn on i386",strict=False)
 @pytest.mark.parametrize("ix", [10, 100, 1000, 2000])
 def test_forecast_index(ix):
     # GH 6549
index a06978ba72e4c6a1568fb666e3ae4a202ae9faca..59220a8997871b2966e3f344917e47ef9b63296e 100644 (file)
@@ -898,7 +898,14 @@ class CheckDFM(CheckSSMResults):
         assert_allclose(self.results_a.initial_diffuse_state_cov, np.eye(2))
 
 
+import pytest
+import re
+import sys
+import platform
+i386_looser_tolerances=bool(re.match('i.?86|x86',platform.uname()[4])) and sys.maxsize<2**33
 class TestDFM_Approx(CheckApproximateDiffuseMixin, CheckDFM):
+    if i386_looser_tolerances:
+        rtol = 1e-6
     # Note: somewhat fragile, we need to increase the approximate variance to
     # 5e10 for the tests to pass at the appropriate level of precision, but
     # we cannot increase it too much more than this because then we start get
index 1cba6d73abf862339d1d0bb2a6da5fdafeb52b2f..0f4a5f32fa9f5db4468fd02f1d5a4785a2b9b634 100644 (file)
@@ -19,6 +19,10 @@ Princeton, N.J.: Princeton University Press.
 """
 import numpy as np
 import pytest
+import sys
+import platform
+import re
+i386_looser_tolerances=bool(re.match('i.?86|x86',platform.uname()[4])) and np.log2(sys.maxsize)<33
 
 from statsmodels.tsa.statespace import (
     mlemodel, sarimax, structural, varmax, dynamic_factor)
@@ -236,7 +240,7 @@ def test_filter_output(univariate, missing, init, periods):
     # Test the output when the multivariate filter switches to the univariate
     # filter
     mod = get_model(univariate, missing, init)
-    check_filter_output(mod, periods)
+    check_filter_output(mod, periods, atol=1e-10 if i386_looser_tolerances else 0)
 
 
 @pytest.mark.parametrize('univariate', [True, False])
@@ -255,7 +259,7 @@ def test_smoother_output(univariate, missing, init, periods, option):
         if init == 'diffuse':
             return
         mod.ssm.timing_init_filtered = True
-    atol = 1e-12
+    atol = 1e-8 if i386_looser_tolerances else 1e-12
     # Tolerance is lower for approximate diffuse for one attribute in this case
     if missing == 'init' and init == 'approximate_diffuse':
         atol = 1e-6
index e536df88b66eba2ccd382be9109d3ee7be91f57b..aab38c986fc1de5880d50f53977a3b0193fb0191 100644 (file)
@@ -29,8 +29,10 @@ from statsmodels.tsa.statespace.kalman_smoother import (
     SMOOTH_UNIVARIATE)
 
 current_path = os.path.dirname(os.path.abspath(__file__))
-
-
+import sys
+import platform
+import re
+i386_looser_tolerances=bool(re.match('i.?86|x86',platform.uname()[4])) and np.log2(sys.maxsize)<33
 class TestStatesAR3:
     @classmethod
     def setup_class(cls, alternate_timing=False, *args, **kwargs):
@@ -835,7 +837,7 @@ class TestMultivariateVARUnivariate:
     def test_forecasts_error_cov(self):
         assert_allclose(
             self.results.forecasts_error_cov.diagonal(),
-            self.desired[['F1', 'F2', 'F3']]
+            self.desired[['F1', 'F2', 'F3']],rtol=2e-7 if i386_looser_tolerances else 1e-7
         )
 
     def test_predicted_states(self):
@@ -889,7 +891,7 @@ class TestMultivariateVARUnivariate:
     def test_smoothed_measurement_disturbance_cov(self):
         assert_allclose(
             self.results.smoothed_measurement_disturbance_cov.diagonal(),
-            self.desired[['Veps1', 'Veps2', 'Veps3']]
+            self.desired[['Veps1', 'Veps2', 'Veps3']],rtol=2e-7 if i386_looser_tolerances else 1e-7
         )