--- /dev/null
--- /dev/null
++This repository includes the upstream Git history.
++
++Setting up the second remote (once per clone):
++git remote add upstream https://github.com/statsmodels/statsmodels
++
++Importing a new upstream version:
++git fetch upstream
++gbp import-orig --upstream-branch=main --uscan --pristine-tar "--upstream-vcs-tag=v%(version)s" --debian-branch=debian
++
++Remember that the source package the buildds see isn't a Git repo:
++we (unlike upstream) can't use git as a clean command during the build.
--- /dev/null
--- /dev/null
++statsmodels (0.13.1+dfsg-3) unstable; urgency=medium
++
++ * Warn on import and ignore (crashing) tests on mips64el,
++ and not on armel (see #968210).
++ * Remove obsolete rules code and nose test-depends.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 17 Dec 2021 22:32:59 +0000
++
++statsmodels (0.13.1+dfsg-2) unstable; urgency=medium
++
++ * Docs: use *_path and symlinks for local JavaScript,
++ instead of (broken by CORS) sed replacements.
++ * Xfail another possible rounding issue on i386.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Mon, 29 Nov 2021 22:05:33 +0000
++
++statsmodels (0.13.1+dfsg-1) unstable; urgency=medium
++
++ * New upstream release. Drop / refresh patches.
++ * Update watch file.
++ * Update d/copyright, exclude possibly illegal data file.
++ * Xfail some probable rounding issues. Closes: #997081.
++ * Docs: update MathJax URL replacement.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 28 Nov 2021 16:30:27 +0000
++
++statsmodels (0.12.2-2) unstable; urgency=medium
++
++ * Fix test failures with scipy 1.7. Closes: #992681.
++ * README.source: update upstream branch name.
++ * Bump Standards-Version to 4.6.0 (no changes needed).
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 29 Aug 2021 20:09:28 +0100
++
++statsmodels (0.12.2-1) unstable; urgency=medium
++
++ * Upstream bugfix release. Drop / refresh patches.
++ * Don't use the multiarch path for architecture detection. Closes: #973854.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sat, 06 Feb 2021 16:48:16 +0000
++
++statsmodels (0.12.1-2) unstable; urgency=medium
++
++ * Team upload.
++ * debian patch scipy_1.6_test_bytestring.patch uses string rather
++ than bytes in expected message with scipy 1.6 in test_arma_kwargs
++ (test_gls.py)
++ * Standards-Version: 4.5.1
++
++ -- Drew Parsons <dparsons@debian.org> Mon, 18 Jan 2021 01:02:15 +1100
++
++statsmodels (0.12.1-1) unstable; urgency=medium
++
++ * Upstream bugfix release.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Thu, 05 Nov 2020 07:28:38 +0000
++
++statsmodels (0.12.0-2) unstable; urgency=medium
++
++ * Loosen some test tolerances on i386.
++ * Require scipy >= 1.5 to avoid autopkgtest failure.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 11 Sep 2020 07:24:40 +0100
++
++statsmodels (0.12.0-1) unstable; urgency=medium
++
++ * New upstream release. Drop / refresh patches.
++ * Update d/copyright.
++ * Stop using pytest-forked everywhere: it doesn't fully
++ handle conditional xfails.
++ * Skip slow autopkgtests on non-amd64.
++ * On armel, warn on import and ignore all tests (see #968210).
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Tue, 08 Sep 2020 18:35:08 +0100
++
++statsmodels (0.11.1-5) unstable; urgency=medium
++
++ * Ignore test failures on arm* (see #968210).
++ * Use pytest-forked in case there are more crashing tests.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 16 Aug 2020 19:00:44 +0100
++
++statsmodels (0.11.1-4) unstable; urgency=medium
++
++ * Tests: ignore changes due to matplotlib 3.3 and scipy 1.5.
++ Closes: #966712.
++ * Bump debhelper compat to 13.
++ * Override Lintian warning from test references.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 09 Aug 2020 22:12:11 +0100
++
++statsmodels (0.11.1-3) unstable; urgency=medium
++
++ * Tests: ignore harmless new warnings. Closes: #964700.
++ * Add matplotlib etc to autopkgtest Depends to skip fewer tests.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 10 Jul 2020 06:46:36 +0100
++
++statsmodels (0.11.1-2) unstable; urgency=medium
++
++ * Tests: don't require a numpy warning that doesn't exist on armel.
++ Closes: #956882.
++ * Tests: allow no-multiprocessing failures (hurd-i386).
++ * Remove inconveniently licensed (CC-BY-SA) snippet.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 24 Apr 2020 15:17:04 +0100
++
++statsmodels (0.11.1-1) unstable; urgency=medium
++
++ * Upstream bugfix release.
++ * Drop patches applied upstream, refresh others.
++ * Don't require a warning scipy only produces on some architectures.
++ * Warn that Markov(Auto)regression can give wrong answers on armhf
++ and DynamicFactor on ppc64el, and xfail tests.
++ * Upload to unstable. Closes: #950429.
++ * Re-enable checking the test suite.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Mon, 24 Feb 2020 22:49:06 +0000
++
++statsmodels (0.11.0-1) experimental; urgency=medium
++
++ * New upstream release.
++ * Upload to experimental, as this is an API break (see #950429).
++ * Drop patches applied upstream, refresh others.
++ * Update and sort dependencies.
++ * Docs: use nature theme because we don't have sphinx_material,
++ remove obsolete GLMNotes,
++ don't skip unrunnable examples.
++ * Update and sort d/copyright.
++ * Fix some test failures (that were not real problems).
++ * Use local requirejs.
++ * Fix spelling and whitespace.
++ * Reproducibility: remove more build paths.
++ * Loosen tolerance on another flaky test.
++ * Temporarily ignore the test suite to get a first build.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 02 Feb 2020 21:51:06 +0000
++
++statsmodels (0.10.2-2) unstable; urgency=medium
++
++ * Docs: don't fail the build on no-network exceptions. Closes: #950087.
++ * Remove #945501 cleanup code: not worth the risk in (future) stable.
++ * Bump Standards-Version to 4.5.0 (no changes needed).
++ * Include offset and exposure in GEE fittedvalues (upstream bug 5253).
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Tue, 28 Jan 2020 22:29:29 +0000
++
++statsmodels (0.10.2-1) unstable; urgency=medium
++
++ * New upstream release.
++ * Drop patches applied upstream, refresh others.
++ * Use symlink_to_dir for documentation move,
++ and clean up old files. Closes: #945501.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Tue, 03 Dec 2019 21:23:04 +0000
++
++statsmodels (0.10.1-5) unstable; urgency=medium
++
++ * No-change upload to unstable. Closes: #934870, #931540.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 10 Nov 2019 16:32:41 +0000
++
++statsmodels (0.10.1-4) experimental; urgency=medium
++
++ * Xfail another actual failure
++ and a few other similar tests as #924036.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sat, 09 Nov 2019 22:31:23 +0000
++
++statsmodels (0.10.1-3) experimental; urgency=medium
++
++ * Xfail another (intermittent?) instance of #924036.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sat, 09 Nov 2019 09:51:35 +0000
++
++statsmodels (0.10.1-2) experimental; urgency=medium
++
++ * Fail the build on test failures.
++ * Xfail or loosen tolerances on some tests.
++ Together, the above Closes: #944054.
++ * Use correct Python version for test_lazy_imports.
++ * Xfail more tests affected by #924036.
++ * Documentation build: fix errors loading pre-cached data,
++ load autosummary.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 08 Nov 2019 22:27:02 +0000
++
++statsmodels (0.10.1-1) experimental; urgency=medium
++
++ * New upstream release.
++ * Upload to experimental, as this might be an API break
++ (see #931540).
++ * Simplify gbp.conf, import both tarball and upstream git,
++ and document this in README.source.
++ * Drop patches no longer needed, refresh others.
++ * Don't map r-cran-cardata to car: upstream now use the new name.
++ * Drop Python 2 packages (see #934870).
++ * Use Python 3 shebangs.
++ * Don't use setup.py clean, as upstream have disabled it.
++ * Use http(s)_proxy instead of a patch to block test downloads.
++ * Run build-time tests with the paths they expect.
++ * Update d/copyright.
++ * Don't install setup.cfg.
++ * Don't write to the source tree during tests.
++ * Bump Standards-Version to 4.4.1 (no changes needed).
++ * Reproducibility: fix date replacement bug, use UTC time.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sat, 02 Nov 2019 16:56:50 +0000
++
++statsmodels (0.9.0-6) unstable; urgency=medium
++
++ * Don't change global pickle settings,
++ as this can break unrelated code.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 20 Sep 2019 19:23:49 +0100
++
++statsmodels (0.9.0-5) unstable; urgency=medium
++
++ * Update import paths to be compatible with scipy 1.3.
++ * Xfail another KalmanFilter test on armhf (#924036).
++ * Mark -lib packages Multi-Arch: same.
++ * Delete unused patches, document patches upstream status.
++ * Upload to unstable.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sun, 15 Sep 2019 21:50:15 +0100
++
++statsmodels (0.9.0-4) experimental; urgency=medium
++
++ * Loosen singularity detection threshold of VAR model.
++ * Make KalmanFilter warning (#924036) work on multiarch arm64/armhf
++ and xfail some more related tests.
++ * Adapt TestLagmat.test_unknown_trim to work with newer numpy.
++ * Loosen TestMultivariateVARUnivariate tolerances on i386.
++ * Xfail TestHoltWinters.test_forecast (like upstream).
++ Together, the above Closes: #938949.
++ * Use Breaks+Replaces for examples move to -doc. Closes: #938995.
++ * Remove no longer needed s390x xfails/warning.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sat, 14 Sep 2019 10:38:33 +0100
++
++statsmodels (0.9.0-3) experimental; urgency=medium
++
++ * Upload to experimental, as 0.9 is an API break (see #931540).
++ * Merge 0.8.0-9 packaging into 0.9. Drop patches no longer needed,
++ refresh others.
++ * Revert "Disable KalmanFilter on armhf" as it never actually
++ worked (see #924036 / LP#1819227). Instead,
++ warn on use and xfail the tests.
++ * Add self to Uploaders.
++ * Tests now use pytest: add build-dependencies, adapt autopkgtest.
++ * Remove test skips that are no longer needed.
++ * Change s390x test skips to xfails (to check whether they are
++ still needed), and warn if l1_cvxopt_cp is used on s390x.
++ * Fix issues due to newer versions of dependencies:
++ - AnovaRM column order
++ - various issues within test code
++ * Fix midrule bug in LaTeX summary output.
++ * Clean up stray whitespace.
++ * Fix typos.
++ * Clarify -lib description.
++ * Use https in links, and update Homepage.
++ * Update and sort d/copyright.
++ * Bump Standards-Version to 4.4.0 (no changes needed).
++ * Give a more useful error if cvxopt is not available.
++ * Use debhelper-compat.
++ * Move examples to the -doc package.
++ * Remove obsolete symlink, add symlinks to ease finding docs.
++ * Strip timestamps and build paths for reproducibility.
++ * Deduplicate documentation images.
++ * Fix/improve nocheck/nodoc dependency marking.
++ * Categorize build-dependencies.
++ * Make -doc Suggest the Python 3 version.
++ * Run the R-using examples.
++ * Use data from R packages for building examples, instead of
++ embedding a copy. Modify one example to not need r-cran-count.
++ * Set Rules-Requires-Root: no.
++ * Remove outdated cythonized-files.
++ * Don't use pickle for get_rdataset caching, as it is a security hole
++ if the cache is shared. (This invalidates existing cached data;
++ it will automatically be re-downloaded if needed.)
++ * Warn that load_pickle is for trusted files only.
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 30 Aug 2019 07:48:41 +0100
++
++statsmodels (0.8.0-9) unstable; urgency=medium
++
++ * Team upload.
++ * Disable KalmanFilter on armhf, as it gives wrong results
++ (and probably always has).
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Sat, 02 Mar 2019 14:59:35 +0000
++
++statsmodels (0.8.0-8) unstable; urgency=medium
++
++ * Team upload.
++ * Remove outdated information from description.
++ * Use sphinx.ext.mathjax rather than no-longer-existing pngmath.
++ This also fixes equation display Closes: #687301
++ * Fix spelling.
++ * Fix remote -> local requirejs substitution.
++ * Use text instead of fetching a logo.
++ * Make dataset caching work in Python 3.
++ * Extend use_cached_datasets to build more of the examples.
++ * Fix path and data layout exceptions in examples.
++ * Re-enable build-time tests, and clean up files they leave behind.
++ * Use upstream tests as autopkgtest.
++ * Move metadata to d/u/metadata, update and extend it.
++ * Update d/copyright.
++ * Fix exceptions due to newer versions of dependencies:
++ - "no attribute 'chisqprob'" in discrete_model
++ - "shape mismatch...could not be broadcast" in imputation
++ - "ufunc 'gammaln' not supported" in discrete_model
++ - "object arrays are not supported" in tsa.statespace
++ - "module 'pandas' has no attribute 'ols'" in tsa.vector_ar
++ - various IndexErrors in KaplanMeier Closes: #860449
++ - "unexpected keyword argument 'format'" in datasets.co2
++ - UnicodeDecodeError in parse_lutkepohl_data
++ - various issues within test and example code
++ * Backport the following from experimental:
++
++ [ Julian Taylor ]
++ * Use python3 also for fold_toc.py
++ * Use fixed locale for documentation build
++
++ [ Andreas Tille ]
++ * hardening=+all
++ * Use libjs-mathjax
++ * Extend description of -doc package
++ * Remove matplotlib.sphinxext.only_directives option which is
++ not used. Closes: #917754
++ * Do not try to access remote locations on buildtime test
++ Closes: #882641
++
++ -- Rebecca N. Palmer <rebecca_palmer@zoho.com> Fri, 01 Mar 2019 22:49:09 +0000
++
++statsmodels (0.9.0-2) experimental; urgency=medium
++
++ * Team upload.
++ * Version 0.9 closes: #880245
++ * Remove matplotlib.sphinxext.only_directives option which is not used
++ Closes: #917754
++ * Do not try to access remote locations on buildtime test
++ Closes: #882641
++ * debhelper 12
++
++ -- Andreas Tille <tille@debian.org> Wed, 13 Feb 2019 09:27:21 +0100
++
++statsmodels (0.9.0-1) experimental; urgency=medium
++
++ [ Andreas Tille ]
++ * Team upload.
++ * Fix watch file (do not report development tags)
++ * debhelper 11
++ * Standards-Version: 4.2.1
++ * New Build-Depends / Recommends: python*-colorama
++ * Drop useless get-orig-source target
++ * Fix permissions
++ * hardening=+all
++ * Use libjs-mathjax
++
++ [ Julian Taylor ]
++ * adapt use-python3-for-doc-build to also use python3 for fold_toc.py
++
++ -- Andreas Tille <tille@debian.org> Sun, 28 Oct 2018 18:56:40 +0100
++
++statsmodels (0.8.0-7) unstable; urgency=medium
++
++ * Team upload
++ * Use pybuild --print build_dir to fix FTBFS with
++ dh-python >= 3.20180313 (Closes: #894572)
++ * Update Vcs-* URIs for move to salsa.debian.org
++ * Remove ancient Python version fields
++
++ -- Graham Inggs <ginggs@debian.org> Wed, 27 Jun 2018 17:02:07 +0000
++
++statsmodels (0.8.0-6) unstable; urgency=medium
++
++ * Build the architecture dependent files before building documentation
++ * Add myself to uploaders.
++
++ -- Diane Trout <diane@ghic.org> Fri, 29 Sep 2017 22:17:33 -0700
++
++statsmodels (0.8.0-5) unstable; urgency=medium
++
++ * Team Upload
++
++ [ Diane Trout ]
++ * add sphinx-common to Build-Depends which fixes a FTBFS caused by
++ Sphinxdoc.pm not being present when building binary only packages.
++ * Split dh_auto_build-arch and dh_auto_build-indep targets so we only
++ build the docs once.
++
++ -- Diane Trout <diane@ghic.org> Fri, 29 Sep 2017 16:48:20 -0700
++
++statsmodels (0.8.0-4) unstable; urgency=medium
++
++ * Team upload
++
++ [ Diane Trout ]
++ * Add python-dateutil, patsy, tk as build depends
++ Marking nose & tk as !nocheck
++ * Add Python3 support
++ (Closes: #729956) (Closes: #782963) (Closes: #834495)
++ * Update Python 3 build depends, add components necessary to build
++ notebook examples
++ * Lintian warned that Conflicts with version should probably be Breaks
++ * Add use-system-inventory patch to use intersphinx inventory files from
++ other Debian doc packages.
++ Added those packages to the -doc package suggests list
++ * Let dh_sphinxdoc manage jquery dependencies.
++ Add libjs-requirejs for building notebooks.
++ * Use Python 3 for documentation building, and enable building notebook
++ examples as jupyter components are now available in stable.
++ * Extend a python3-statsmodels-lib description to make lintian happier
++ * Cleanup more files that might be generated during build
++ * Exclude test_sarimax test_structural test_dynamic_factor test_varmax
++ from unit testing
++ Closes: #873512
++ * add patch use-cached-datasets, and cache all of the get_rdataset
++ datasets in debian/datasets/. This is needed so the documentation will
++ build without network access.
++ * Document data files needed for documentation in debian/README.source
++
++ [ Andreas Tille ]
++ * Standards-Version: 4.1.0 (no changes needed)
++ * Add remark about documenting R data to debian/README.source
++ * Updating d/copyright
++ * DEP3
++
++ -- Diane Trout <diane@ghic.org> Tue, 26 Sep 2017 23:55:47 -0700
++
++statsmodels (0.8.0-3) unstable; urgency=medium
++
++ * Team upload.
++ * d/rules: do not run the testsuite when only building arch:all packages.
++ (Closes: #872603)
++ * d/copyright: use https form for the copyright-format URL.
++ * d/control:
++ + promote the package to priority optional, per policy 4.0.1.
++ + bump Standards-Version to 4.0.1.
++
++ -- Sébastien Villemot <sebastien@debian.org> Sat, 19 Aug 2017 11:42:08 +0200
++
++statsmodels (0.8.0-2) unstable; urgency=medium
++
++ * Team upload.
++
++ [ Sébastien Villemot ]
++ * Upload to unstable
++ * d/rules: fix logic of {build,binary}-indep for building doc.
++ * d/control:
++ + add missing build-dependency on dh-python.
++ + promote python-pandas to a Depends. It is really needed if one wants to
++ import statsmodels (and otherwise autopkgtest fails).
++
++ [ Andreas Tille ]
++ * d/p/skip_tests_failing_due_to_panda_issue.patch: new patch, removes tests
++ from test suite that fail because of pandas and can not easily be fixed.
++ * d/p/fix_numdocpy_issue.patch: new patch, fixes other issue with numpydoc.
++ (Closes: #868977)
++
++ [ Yaroslav Halchenko ]
++ * Bump to debhelper compat level 9.
++
++ -- Sébastien Villemot <sebastien@debian.org> Fri, 18 Aug 2017 22:40:39 +0200
++
++statsmodels (0.8.0-1) experimental; urgency=medium
++
++ * Fresh upstream release resolving various compatibility issues etc
++ - absborbs some of previously carried patches (dropped from quilt):
++ fix_wrong_index_type.patch
++ workaround_panda_readtable_bug.patch
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 08 Feb 2017 13:08:38 -0500
++
++statsmodels (0.8.0~rc1+git59-gef47cd9-5) unstable; urgency=medium
++
++ * Team upload.
++ * Fix test skipping for s390
++
++ -- Ole Streicher <olebole@debian.org> Mon, 26 Dec 2016 18:33:56 +0100
++
++statsmodels (0.8.0~rc1+git59-gef47cd9-4) unstable; urgency=medium
++
++ * Team upload.
++ * Disable test_smoke_plot_and_repr() and test_pandas() on s390x:
++ probably a matplotlib bug there
++
++ -- Ole Streicher <olebole@debian.org> Mon, 26 Dec 2016 15:38:06 +0100
++
++statsmodels (0.8.0~rc1+git59-gef47cd9-3) unstable; urgency=medium
++
++ * Work around a Pandas readttable bug to fix (temporary) FTBFS on i386
++
++ -- Ole Streicher <olebole@debian.org> Mon, 26 Dec 2016 11:46:23 +0100
++
++statsmodels (0.8.0~rc1+git59-gef47cd9-2) unstable; urgency=medium
++
++ * Team upload.
++ * Move package to Debian Science Team maintenance (permission from
++ Yaroslav Halchenko)
++ * Fix index type in `reshape` to be integer. Closes: #848782
++
++ -- Ole Streicher <olebole@debian.org> Sun, 25 Dec 2016 14:52:31 +0100
++
++statsmodels (0.8.0~rc1+git59-gef47cd9-1) unstable; urgency=medium
++
++ * Fresh upstream rc snapshot which hopefully addresses some
++ outstanding FTBFS etc issues
++ * debian/control
++ - allow for ipython >= 5 as alternative to ipython for now,
++ and also for traitlets explicitly on those newer systems
++ (Closes: #843414)
++ - adjusted vcs-browser (Closes: #843407)
++ * debian/rules
++ - skip test_plot_month test
++ (see https://github.com/statsmodels/statsmodels/issues/3283)
++ * debian/patches/up_3239
++ - to cast index into array before sorting (Closes: #841610)
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Sat, 19 Nov 2016 19:37:43 -0500
++
++statsmodels (0.8.0~rc1+git43-g1ac3f11-1) unstable; urgency=medium
++
++ * Snapshot of a new upstream release candidate
++ - should resolve some failing tests causing FTBFS (Closes: #837232)
++ * debian/patches
++ - all changes* were dropped (upstreamed)
++ + deb_no_nbformat_for_now - since no nbformat, do not generate ipython
++ notebooks for now
++ * debian/control
++ - boost policy to 3.9.8
++ - python-numpydoc to build-depends-indep
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Mon, 19 Sep 2016 09:23:49 -0400
++
++statsmodels (0.6.1-10) UNRELEASED; urgency=medium
++
++ * debian/control
++ - apparently on e.g. 14.04 we have to depend on pandoc, so making it
++ not an alternative to nodejs
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Fri, 29 Apr 2016 09:30:57 -0400
++
++statsmodels (0.6.1-9) neurodebian; urgency=medium
++
++ * CP df820642c16d8486e8b676e07675afbbfd4824d8 to stay compatible with older
++ versions on nose (precise and alike)
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 27 Apr 2016 22:12:50 -0400
++
++statsmodels (0.6.1-8) unstable; urgency=medium
++
++ * Resolving by skipping the very last standing failing test on s390
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Tue, 26 Apr 2016 21:42:10 -0400
++
++statsmodels (0.6.1-7) unstable; urgency=medium
++
++ * BF: adjusted changeset_3faba6e002641e95a0c6007c826fabb9c423686e.diff to
++ not use kwarg for compat with elderly pandas. Should resolve FTBFS on
++ many platforms still carrying elderly pandas
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Tue, 26 Apr 2016 08:42:12 -0400
++
++statsmodels (0.6.1-6) unstable; urgency=medium
++
++ * debian/patches
++ - picked up a set of patches to hopefully mitigate compatibility issues
++ with elderly pandas available on some platforms
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Mon, 25 Apr 2016 18:32:29 -0400
++
++statsmodels (0.6.1-5) unstable; urgency=medium
++
++ * Fixes for compatibility with recent numpy and pandas
++ (Closes: #807025, #659405)
++ * examples/ were moved out of the module, so now they get manually installed
++ under /usr/share/doc/python-statsmodels/examples (Closes: #752977)
++ * debian/control
++ - policy boosted to 3.9.7
++ - removed obsolete transitional package python-scikits.statsmodels
++ * debian/watch
++ - fixed to use github.com tags directly
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Sun, 24 Apr 2016 18:55:01 -0400
++
++statsmodels (0.6.1-4) unstable; urgency=medium
++
++ * Move ipython-notebook and zmq to build-depends-indep since needed only for
++ documentation building
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 20 May 2015 17:29:08 -0400
++
++statsmodels (0.6.1-3) unstable; urgency=medium
++
++ * upload to unstable -- jessie is out
++ * debian/control
++ - raised minimal cython version to be used to 0.19
++ - provide Build-Depends-Indep to separate handling for building
++ binary-indep packages, and allow pandoc instead of nodejs
++ for extra robustness (thanks Edmund for the hint) (Closes: #770631)
++ * debian/{control,rules}
++ - Recommend libjs-mathjax and refer to locally installed MathJax.js
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Sun, 17 May 2015 21:12:33 -0400
++
++statsmodels (0.6.1-1) experimental; urgency=medium
++
++ * New upstream release
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 03 Dec 2014 11:06:18 -0500
++
++statsmodels (0.6.0~rc2-1) unstable; urgency=medium
++
++ * Upstream release candidate
++ * debian/patches
++ - changeset_ee75d978ec4c46b506b5429d4f65fdf38866ec4d.diff redone into
++ deb_use_internal_ipythonsphinx_extensions to facilate building
++ documentation on older systems
++ - up_explicit_path_to_nbgenerate to provide explict path to notebooks
++ * debian/rules
++ - clean up cythonize.dat
++ * debian/control
++ - nodejs was added to build-depends (neccessary for docs building)
++ - boost policy to 3.9.6 (no changes)
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 22 Oct 2014 19:32:58 -0400
++
++statsmodels (0.5.0+git13-g8e07d34-2) unstable; urgency=medium
++
++ * debian/patches
++ - changeset_ee75d978ec4c46b506b5429d4f65fdf38866ec4d.diff
++ to resolve FTBFS by using packaged ipython provided
++ extensions (Closes: #753232)
++ - up_reduce_test_precision to resolve failing
++ TestRegressionNM.test_ci_beta2 test on 32bit
++ * debian/control
++ - suggest -doc package (Closes: #740348)
++ * debian/rules
++ - exclude testing of the sandbox (let kids do wild things there and
++ mentors keep their hair) -- otherwise test break due to scipy 0.14.0 API
++ breakage (f_gen)
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 16 Jul 2014 12:03:55 -0400
++
++statsmodels (0.5.0+git13-g8e07d34-1) unstable; urgency=low
++
++ * Fresh snapshot of 0.5.x maintenance branch
++ - resolved failing test causing to FTBFS (Closes: #730727)
++ * debian/control
++ - python-cvxopt added to Build-Depends (testing) and Recommends
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 28 Nov 2013 20:17:42 -0500
++
++statsmodels (0.5.0-1) unstable; urgency=low
++
++ * Fresh upstream release
++ * debian/rules
++ - assure having $HOME for clean and build operations to avoid crash of
++ matplotlib
++ * debian/control, debian/rules
++ - boost required ipython to 0.13.1 for building documentation
++ - add python-zmq explicitly to build-depends for correct IPython
++ operation
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 15 Aug 2013 23:45:31 -0400
++
++statsmodels (0.5.0~rc1-1) UNRELEASED; urgency=low
++
++ * New upstream snapshot from v0.4.3-1962-g13bbf88 at v0.4.3-1962-
++ g13bbf88
++ - depends on patsy
++ * debian/control
++ - requires ipython-notebook for building documentation
++ (Build-Depends adjusted)
++ - build for Python >= 2.7 due to absent python 2.6 build of pandas
++ for wheezy (debian/pyversions removed, use field X-Python-Version) now
++ instead
++ - boosted policy to 3.9.4 (no further changes)
++ * debian/copyright
++ - years adjusted
++ * debian/rules
++ - migrate to dh_python2 from dh_pysupport
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 01 Aug 2013 09:42:43 -0400
++
++statsmodels (0.4.2-1) unstable; urgency=low
++
++ * Fresh upstream release addressing FTBFS across big-endian architectures.
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Fri, 29 Jun 2012 17:26:36 -0400
++
++statsmodels (0.4.1-1) unstable; urgency=low
++
++ * Fresh upstream release
++ - dropped up_versioncomparison_with_rc
++ - require Python >= 2.6
++ * Do fail if unittests fail
++ * Added joblib to build-depends since seems could be used during tests
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Fri, 15 Jun 2012 17:57:17 -0400
++
++statsmodels (0.4.0-2) unstable; urgency=low
++
++ * Removed python-rpy from Recommends to avoid pulling Rtillery with
++ with the default installation of statsmodels and pandas, since rpy apparently
++ is mentioned only in some legacy code
++ * Added patch up_versioncomparison_with_rc to "cherry-pick" 45ee896 for
++ robust version comparison with rc versions of numpy/scipy
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Sun, 13 May 2012 20:40:37 -0400
++
++statsmodels (0.4.0-1) unstable; urgency=low
++
++ * Fresh upstream release
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Tue, 01 May 2012 09:34:58 -0400
++
++statsmodels (0.4.0~rc2+git6-g756f2d0-1) experimental; urgency=low
++
++ * Fresh upstream pre-release
++ - all patches dropped (upstreamed)
++ - scikits. name space is getting deprecated:
++ - python-scikits.statsmodels is provided to ease code transition
++ - python-statsmodels-doc replaces python-scikits.statsmodels-doc
++ - python-statsmodels-lib is introduced to carry arch-dependent
++ extensions
++ * debian/copyright:
++ - entry for statsmodels/stats/libqsturng
++ * debian/watch:
++ - use githubredir.debian.net
++ * debian/{rules,control}
++ - rule cythonize to (re-)cythonize .pyx files so they could be
++ built on older releases
++ - use custom IPython01x available from NeuroDebian on older releases
++ to build documentation
++ - build-depend on python-all-dev now to build extensions
++ - recommend python-joblib
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 11 Apr 2012 08:30:50 -0400
++
++statsmodels (0.3.1-4) unstable; urgency=low
++
++ * Added patch from Ubuntu: fix_test_discrete.patch. Thanks Michael Terry for
++ the patch and Gregor Herrmann for the reminder (Closes: #663986, #648845)
++ * upon clean -- remove autogenerated version.py
++ * debian/copyright: adjusted for DEP-5
++ * debian/control: boosted policy to 3.9.3 (no changes)
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 29 Mar 2012 12:44:27 -0400
++
++statsmodels (0.3.1-3) unstable; urgency=low
++
++ * Few bugfixes:
++ - up_broken_imports
++ - up_doc_fixes
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Sat, 21 Jan 2012 12:07:54 -0500
++
++statsmodels (0.3.1-2) unstable; urgency=low
++
++ * debian/copyright: extended list of copyrights/licenses. Thanks to
++ Luca Falavigna for the thorough review
++ * debian/rules: remove shipped COPYING files due to duplication of
++ debian/copyright
++ * Initial release -- upload to Debian (Closes: #570604)
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 25 Aug 2011 23:23:37 +0200
++
++statsmodels (0.3.1-1) UNRELEASED; urgency=low
++
++ * Added get-orig-source rule to fetch from github using uscan
++ * Upload to Debian was rejected
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Wed, 24 Aug 2011 23:14:01 +0200
++
++statsmodels (0.3.0-1) neurodebian; urgency=low
++
++ * Upstream release -- upload to Debian proper is pending the removal of
++ WFS dataset (research only restriction)
++ * debian/copyright: expanded to mention copyrights for the included
++ datasets, adjusted BSD-3 license text to use project's name instead
++ of "stock" regents.
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Tue, 19 Jul 2011 22:40:18 -0400
++
++statsmodels (0.3.0~rc2+4-gc78fbe1-1) UNRELEASED; urgency=low
++
++ * Upstream snapshot post RC2:
++ - upstream moved to GIT, Debian packaging repository reinitilized
++ borrowing debian/ branch from previous one (git-bzr imported)
++ * debian/control: boosted policy to 3.9.2 -- no changes
++ * debian/watch: fetch from github
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 07 Jul 2011 22:43:21 -0400
++
++statsmodels (0.3.0~bzr20110330-1) neurodebian; urgency=low
++
++ * Fresh snapshot of BZR 0.3-devel series
++ * Initial upload to Debian was rejected due to data licensing issues
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Tue, 29 Mar 2011 17:48:04 -0400
++
++statsmodels (0.2.0+bzr1990-1) neurodebian; urgency=low
++
++ * Initial release -- rejected due to full list of copyrights
++
++ -- Yaroslav Halchenko <debian@onerussian.com> Thu, 06 May 2010 23:02:18 -0400
--- /dev/null
--- /dev/null
++Source: statsmodels
++Maintainer: Debian Science Maintainers <debian-science-maintainers@lists.alioth.debian.org>
++Uploaders: Yaroslav Halchenko <debian@onerussian.com>,
++ Michael Hanke <michael.hanke@gmail.com>,
++ Diane Trout <diane@ghic.org>,
++ Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Section: python
++Priority: optional
++Build-Depends: debhelper-compat (= 13),
++ sphinx-common,
++ dh-python (>= 3.20180313~),
++ cython3,
++ python3-all-dev,
++ python3-colorama <!nocheck> <!nodoc>,
++ python3-cvxopt <!nocheck> <!nodoc>,
++ python3-dateutil <!nocheck> <!nodoc>,
++ python3-joblib <!nocheck> <!nodoc>,
++ python3-matplotlib,
++ python3-numpy,
++ python3-pandas,
++ python3-patsy (>= 0.5.1) <!nocheck> <!nodoc>,
++ python3-pytest <!nocheck>,
++ python3-pytest-forked <!nocheck>,
++ python3-scipy,
++ python3-setuptools,
++ python3-tk <!nocheck> <!nodoc>
++Build-Depends-Indep: python3-jupyter-client <!nodoc>,
++ python3-nbconvert <!nodoc>,
++ python3-nbsphinx <!nodoc>,
++ python3-notebook <!nodoc>,
++ python3-numpydoc <!nodoc>,
++ python3-seaborn <!nodoc>,
++ python3-simplegeneric <!nodoc>,
++ python3-sphinx <!nodoc>,
++ python3-yaml <!nodoc>,
++ jdupes <!nodoc>,
++# for intersphinx inventories
++ python-matplotlib-doc <!nodoc>,
++ python-numpy-doc <!nodoc>,
++ python-scipy-doc <!nodoc>,
++ python-pandas-doc <!nodoc>,
++ python3-doc <!nodoc>,
++# for the comparing-with-R examples, and example datasets
++ r-base-core <!nodoc>,
++ python3-rpy2 <!nodoc>,
++ r-cran-cardata <!nodoc>,
++ r-cran-geepack <!nodoc>,
++ r-cran-guerry <!nodoc>,
++ r-cran-lme4 <!nodoc>,
++ r-cran-mass <!nodoc>,
++ r-cran-robustbase <!nodoc>,
++ r-cran-vcd <!nodoc>,
++ pandoc <!nodoc>
++Standards-Version: 4.6.0
++Rules-Requires-Root: no
++Vcs-Browser: https://salsa.debian.org/science-team/statsmodels
++Vcs-Git: https://salsa.debian.org/science-team/statsmodels.git
++Homepage: https://www.statsmodels.org
++
++Package: python-statsmodels-doc
++Architecture: all
++Section: doc
++Depends: ${misc:Depends},
++ ${sphinxdoc:Depends},
++ libjs-requirejs,
++ libjs-mathjax
++Suggests: python3-statsmodels,
++ python3-doc,
++ python-numpy-doc,
++ python-patsy-doc,
++ python-pandas-doc,
++ python-scipy-doc
++Breaks: python-scikits-statsmodels-doc,
++ python-scikits.statsmodels-doc,
++ python-statsmodels (<< 0.9.0-3~)
++Replaces: python-scikits-statsmodels-doc,
++ python-scikits.statsmodels-doc,
++ python-statsmodels (<< 0.9.0-3~)
++Description: documentation and examples for statsmodels
++ Statsmodels Python modules are providing classes and functions for
++ the estimation of several categories of statistical models. These
++ currently include linear regression models, OLS, GLS, WLS and GLS
++ with AR(p) errors, generalized linear models for several distribution
++ families and M-estimators for robust linear models. An extensive list
++ of result statistics are available for each estimation problem.
++ .
++ This package contains HTML documentation and example scripts for
++ python-statsmodels.
++
++Package: python3-statsmodels
++Architecture: all
++Depends: ${misc:Depends},
++ ${python3:Depends},
++ python3-numpy,
++# upstream say >1.2 but the amd64 autopkgtest failure happens with 1.4 not 1.5
++# maybe the real problem is what scipy was built with
++ python3-scipy (>= 1.5~),
++ python3-statsmodels-lib (>= ${source:Version}),
++ python3-patsy (>= 0.5.1),
++ python3-pandas
++Recommends: python3-matplotlib,
++ python3-joblib,
++ python3-colorama,
++ python3-cvxopt
++Suggests: python-statsmodels-doc
++Provides: ${python3:Provides}
++Description: Python3 module for the estimation of statistical models
++ statsmodels Python3 module provides classes and functions for the
++ estimation of several categories of statistical models. These
++ currently include linear regression models, OLS, GLS, WLS and GLS
++ with AR(p) errors, generalized linear models for several distribution
++ families and M-estimators for robust linear models. An extensive list
++ of result statistics are available for each estimation problem.
++
++Package: python3-statsmodels-lib
++Architecture: any
++Multi-Arch: same
++Depends: ${misc:Depends},
++ ${python3:Depends},
++ ${shlibs:Depends},
++ python3-numpy
++Description: Python3 low-level implementations and bindings for statsmodels
++ Statsmodels is a Python module for the estimation of statistical models.
++ .
++ This package contains internal libraries for python3-statsmodels. Users
++ should not need to install it directly.
--- /dev/null
--- /dev/null
++Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
++Upstream-Name: statsmodels
++Upstream-Contact: pystatsmodels@googlegroups.com
++Source: https://github.com/statsmodels/statsmodels
++Files-Excluded: statsmodels/datasets/scotland/src/scotland_changes.html
++
++Files: *
++Copyright: 2006, Jonathan E. Taylor
++ 2006-2008, Scipy Developers.
++ 2009-2021, Statsmodels Developers.
++License: BSD-3
++Comment: the original statsmodels/tools/sequences.py contains a CC-BY-SA 2.5 snippet by "Robert William Hanks" https://stackoverflow.com/users/350331, but it may be too small to be copyrightable, and is patched out in this package
++
++Files: docs/source/_static/facebox.js
++Copyright: 2007, 2008 Chris Wanstrath, Kyle Neath
++License: Expat
++
++Files: docs/source/_static/mktree.js
++Copyright: 2005-2009 Matt Kruse
++License: Expat
++
++Files: setup.py
++Copyright: 2009-2012, Brian Granger, Min Ragan-Kelley (from pyzmq)
++ 2004 Infrae (from lxml)
++ 2008-2013, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team
++ 2009-2021, Statsmodels Developers
++License: BSD-3
++
++Files: statsmodels/compat/numpy.py
++Copyright: 2005-2015 NumPy Developers
++License: BSD-3
++
++Files: statsmodels/datasets/*
++Copyright: 2007, David Cournapeau <cournape@gmail.com>
++License: BSD-3
++Comment: concerns code parts, corresponding data files license terms
++ described below
++
++Files: statsmodels/datasets/utils.py
++Copyright: 2007-2010, Scikit-Learn Developers
++ 2009-2021, Statsmodels Developers
++License: BSD-3
++
++Files: statsmodels/datasets/anes96/anes96.csv
++ statsmodels/datasets/grunfeld/grunfeld.csv
++ statsmodels/datasets/longley/longley.csv
++ statsmodels/datasets/macrodata/macrodata.csv
++ statsmodels/datasets/nile/nile.csv
++ statsmodels/datasets/randhie/randhie.csv
++ statsmodels/datasets/stackloss/stackloss.csv
++ statsmodels/datasets/strikes/strikes.csv
++ statsmodels/datasets/sunspots/sunspots.csv
++Copyright: N/A
++License: public-domain
++
++Files: statsmodels/datasets/ccard/ccard.csv
++Copyright: William Greene
++License: BSD-3
++Comment: Used with express permission of the original author, who retains all rights
++
++Files: statsmodels/datasets/committee/committee.csv
++ statsmodels/datasets/copper/copper.csv
++ statsmodels/datasets/cpunish/cpunish.csv
++ statsmodels/datasets/scotland/scotvote.csv
++ statsmodels/datasets/star98/star98.csv
++Copyright: Jeff Gill
++License: BSD-3
++Comment: Used with express permission of the original author, who retains all rights
++
++Files: statsmodels/datasets/spector/spector.csv
++Copyright: Lee Spector
++License: BSD-3
++Comment: Used with express permission of the original author, who retains all rights
++
++Files: statsmodels/distributions/tests/test_mixture.py
++Copyright: 2013 Ana Martinez Pardo
++License: BSD-3
++
++Files: statsmodels/graphics/tsaplots.py
++ statsmodels/sandbox/tsa/example_arma.py
++Copyright: 2002-2012, John D. Hunter and Matplotlib contributors
++ 2009-2021, Statsmodels Developers
++License: BSD-3 and matplotlib
++
++Files: statsmodels/iolib/*
++Copyright: 2007, Joe Presbrey <presbrey@mit.edu>
++ 2009-2021, Statsmodels Developers
++License: Expat and BSD-3
++
++Files: statsmodels/multivariate/factor_rotation/*
++Copyright: 2015 Martin van der Schans, C.A. Bernaards, R.I. Jennrich
++License: BSD-3
++
++Files: statsmodels/regression/rolling.py
++Copyright: 2019 Kevin Sheppard
++License: BSD-3
++
++Files: statsmodels/sandbox/pca.py
++Copyright: 2008 Erik Tollerud <etolleru@uci.edu>
++License: BSD-3
++
++Files: statsmodels/sandbox/tsa/diffusion2.py
++Copyright: 2008, Attilio Meucci
++License: BSD-2
++
++Files: statsmodels/stats/libqsturng/*
++Copyright: 2011, Roger Lew
++License: BSD-3
++
++Files: statsmodels/stats/tests/results/results_multinomial_proportions.py
++Copyright: 2016 Sébastien Lerique
++License: BSD-3
++
++Files: statsmodels/tsa/statespace/tests/results/frbny_nowcast/Nowcasting/*
++Copyright: 2018, Federal Reserve Bank of New York
++License: BSD-3
++
++Files: debian/*
++Copyright: 2010-2017, Yaroslav Halchenko <debian@onerussian.com>
++ 2017-2021 Debian Science Team
++License: BSD-3
++
++Files: debian/datasets/Rdatasets.R
++ debian/datasets/rst.sh
++Copyright: 2012-2019 Vincent Arel-Bundock and contributors
++License: GPL-3
++Comment: from https://vincentarelbundock.github.io/Rdatasets with some changes
++
++
++License: BSD-2
++ Redistribution and use in source and binary forms, with or without
++ modification, are permitted provided that the following conditions are
++ met:
++ .
++ * Redistributions of source code must retain the above copyright
++ notice, this list of conditions and the following disclaimer.
++ * Redistributions in binary form must reproduce the above copyright
++ notice, this list of conditions and the following disclaimer in
++ the documentation and/or other materials provided with the distribution
++ .
++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
++ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ POSSIBILITY OF SUCH DAMAGE.
++
++License: BSD-3
++ Redistribution and use in source and binary forms, with or without
++ modification, are permitted provided that the following conditions are met:
++ .
++ a. Redistributions of source code must retain the above copyright notice,
++ this list of conditions and the following disclaimer.
++ b. Redistributions in binary form must reproduce the above copyright
++ notice, this list of conditions and the following disclaimer in the
++ documentation and/or other materials provided with the distribution.
++ c. Neither the name of Statsmodels nor the names of its contributors
++ may be used to endorse or promote products derived from this software
++ without specific prior written permission.
++ .
++ .
++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ ARE DISCLAIMED. IN NO EVENT SHALL STATSMODELS OR CONTRIBUTORS BE LIABLE FOR
++ ANY DIRECT, INDIRMITECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
++ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
++ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
++ DAMAGE.
++Comment: the parts copied from other packages replace the name "Statsmodels" with "the NumPy Developers", "the Enthought" (scipy), "the copyright holder" (pandas) or "the Scikit-learn Developers ".
++
++License: Expat
++ Permission is hereby granted, free of charge, to any person
++ obtaining a copy of this software and associated documentation
++ files (the "Software"), to deal in the Software without
++ restriction, including without limitation the rights to use,
++ copy, modify, merge, publish, distribute, sublicense, and/or sell
++ copies of the Software, and to permit persons to whom the
++ Software is furnished to do so, subject to the following
++ conditions:
++ .
++ The above copyright notice and this permission notice shall be
++ included in all copies or substantial portions of the Software.
++ .
++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
++ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
++ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ OTHER DEALINGS IN THE SOFTWARE.
++ .
++ Except as contained in this notice, the name(s) of the above
++ copyright holders shall not be used in advertising or otherwise
++ to promote the sale, use or other dealings in this Software
++ without prior written authorization.
++
++License: matplotlib
++ LICENSE AGREEMENT FOR MATPLOTLIB %(version)s
++ --------------------------------------
++ .
++ 1. This LICENSE AGREEMENT is between John D. Hunter ("JDH"), and the
++ Individual or Organization ("Licensee") accessing and otherwise using
++ matplotlib software in source or binary form and its associated
++ documentation.
++ .
++ 2. Subject to the terms and conditions of this License Agreement, JDH
++ hereby grants Licensee a nonexclusive, royalty-free, world-wide license
++ to reproduce, analyze, test, perform and/or display publicly, prepare
++ derivative works, distribute, and otherwise use matplotlib %(version)s
++ alone or in any derivative version, provided, however, that JDH's
++ License Agreement and JDH's notice of copyright, i.e., "Copyright (c)
++ 2002-%(year)d John D. Hunter; All Rights Reserved" are retained in
++ matplotlib %(version)s alone or in any derivative version prepared by
++ Licensee.
++ .
++ 3. In the event Licensee prepares a derivative work that is based on or
++ incorporates matplotlib %(version)s or any part thereof, and wants to
++ make the derivative work available to others as provided herein, then
++ Licensee hereby agrees to include in any such work a brief summary of
++ the changes made to matplotlib %(version)s.
++ .
++ 4. JDH is making matplotlib %(version)s available to Licensee on an "AS
++ IS" basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
++ IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND
++ DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
++ FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB %(version)s
++ WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
++ .
++ 5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB
++ %(version)s FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR
++ LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING
++ MATPLOTLIB %(version)s, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF
++ THE POSSIBILITY THEREOF.
++ .
++ 6. This License Agreement will automatically terminate upon a material
++ breach of its terms and conditions.
++ .
++ 7. Nothing in this License Agreement shall be deemed to create any
++ relationship of agency, partnership, or joint venture between JDH and
++ Licensee. This License Agreement does not grant permission to use JDH
++ trademarks or trade name in a trademark sense to endorse or promote
++ products or services of Licensee, or any third party.
++ .
++ 8. By copying, installing or otherwise using matplotlib %(version)s,
++ Licensee agrees to be bound by the terms and conditions of this License
++ Agreement.
++
++License: public-domain
++ The copyright statements for the datasets are attached to the individual
++ datasets, most datasets are in public domain, and we[statsmodels] don't claim any copyright
++ on any of them.
++Comment: the above is from COPYRIGHTS.txt; factual data may also be inherently uncopyrightable
++
++License: GPL-3
++ see /usr/share/common-licenses/GPL-3
--- /dev/null
--- /dev/null
++# Originally from https://github.com/vincentarelbundock/Rdatasets/blob/master/Rdatasets.R
++# License: GPL-3
++# Modified to remove not-in-Debian dependencies -- Rebecca Palmer
++can_do_index=require(R2HTML)
++
++# Data packages
++packages = c("boot", "car", "carData", "cluster", "COUNT", "DAAG", "datasets",
++ "drc", "Ecdat", "evir", "forecast", "fpp2", "gamclass", "gap",
++ "geepack", "ggplot2", "Guerry", "HSAUR", "hwde", "ISLR", "KMsurv",
++ "lattice", "lme4", "lmec", "MASS", "mediation", "mi", "mosaicData",
++ "multgee", "plm", "plyr", "pscl", "psych", "quantreg", "reshape2",
++ "robustbase", "rpart", "sandwich", "sem", "Stat2Data", "survival",
++ "texmex", "vcd", "Zelig", "purrr")
++for(package in packages) {
++ require(package, character.only=TRUE)
++}
++
++# Functions
++get_doc = function(package = 'mi', dataset = 'nlsyV') {
++ help.ref = try(help(eval(dataset), package=eval(package)), silent = TRUE)
++ out = try(utils:::.getHelpFile(help.ref), silent = TRUE)
++ return(out)
++}
++
++get_data = function(package = 'mi', dataset = 'nlsyV') {
++ e = new.env(hash = TRUE, parent = parent.frame(), size = 29L)
++ data(list = dataset, package = package, envir = e)
++ out = e[[dataset]]
++ return(out)
++}
++
++tidy_data = function(dat) {
++ if(class(dat)[1]=='ts'){
++ dat = try(data.frame('time' = time(dat), 'value' = dat), silent = TRUE)
++ } else {
++ dat = try(as.data.frame(dat), silent = TRUE)
++ }
++ if (class(dat)[1] == 'data.frame') {
++ out = dat
++ } else {
++ out = NA
++ class(out) = 'try-error'
++ }
++ return(out)
++}
++
++write_data = function(i) {
++ package = index$Package[i]
++ dataset = index$Item[i]
++ dat = data[[i]]
++ doc = docs[[i]]
++ cat(package, ' -- ', dataset, '\n')
++ try(dir.create('csv'), silent = TRUE)
++ try(dir.create('doc'), silent = TRUE)
++ try(dir.create(paste0('csv/', package)), silent = TRUE)
++ try(dir.create(paste0('doc/', package)), silent = TRUE)
++ fn_csv = paste0('csv/', package, '/', dataset, '.csv')
++ fn_doc = paste0('doc/', package, '/', dataset, '.html')
++ write.csv(data[[i]], file = fn_csv)
++ tools::Rd2HTML(docs[[i]], out = fn_doc)
++}
++
++# Index
++index = data(package=packages)$results[,c(1,3,4)]
++index = data.frame(index, stringsAsFactors=FALSE)
++
++# Extract Data and Docs and exclude non-data.frames and errors
++data = lapply(1:nrow(index), function(i) get_data(index$Package[i], index$Item[i]))
++docs = lapply(1:nrow(index), function(i) get_doc(index$Package[i], index$Item[i]))
++data = lapply(data, tidy_data)
++idx1 = sapply(docs, class) != 'try-error'
++idx2 = sapply(data, class) != 'try-error'
++idx = as.logical(pmin(idx1, idx2))
++data = data[idx]
++docs = docs[idx]
++index = index[idx,]
++
++# remap names to what statsmodels expects
++index$Package[index$Package == "Guerry"] <- "HistData" # the dataset we want is in both
++
++# Write to file
++for (i in 1:nrow(index)) {
++ write_data(i)
++}
++
++# Index
++is.binary <- function(x) {
++ tryCatch(length(unique(na.omit(x))) == 2,
++ error = function(e) FALSE, silent = TRUE)
++}
++index$Rows = sapply(data, nrow)
++index$Cols = sapply(data, ncol)
++index$n_binary <- sapply(data, function(x) sum(sapply(x, is.binary)))
++index$n_character <- sapply(data, function(x) sum(sapply(x, is.character)))
++index$n_factor <- sapply(data, function(x) sum(sapply(x, is.factor)))
++index$n_logical <- sapply(data, function(x) sum(sapply(x, is.logical)))
++index$n_numeric <- sapply(data, function(x) sum(sapply(x, is.numeric)))
++
++index$CSV = paste('https://raw.github.com/vincentarelbundock/Rdatasets/master/csv/',
++ index$Package, '/', index$Item, '.csv', sep='')
++index$Doc = paste('https://raw.github.com/vincentarelbundock/Rdatasets/master/doc/',
++ index$Package, '/', index$Item, '.html', sep='')
++index = index[order(index$Package, index$Item),]
++
++# Index CSV
++write.csv(index, file = 'datasets.csv', row.names = FALSE)
++
++if (!can_do_index){
++ print("Can't create the HTML index, as R2HTML is not installed")
++ q()
++}
++# Index HTML
++index$CSV = paste("<a href='", index$CSV, "'> CSV </a>", sep='')
++index$Doc = paste("<a href='", index$Doc, "'> DOC </a>", sep='')
++unlink('datasets.html')
++rss = '
++<style type="text/css">
++ tr:nth-child(even){
++ background-color: #E5E7E5;
++ }
++</style>
++'
++cat(rss, file='datasets.html')
++HTML(index, file='datasets.html', row.names=FALSE, append=TRUE)
--- /dev/null
--- /dev/null
++#!/usr/bin/python3
++
++"""Place data from R into the statsmodels cache, as Debian does not allow Internet access during builds"""
++
++import os
++import subprocess
++import glob
++import pickle
++import zlib
++
++buildroot_directory = os.getcwd()
++Rtmp_directory = buildroot_directory + '/build/Rtmp'
++target_directory = buildroot_directory + '/build/datacache'
++os.makedirs(target_directory)
++os.makedirs(Rtmp_directory)
++
++# R packages (datasets) used:
++# car (Duncan, Moore) # now split off as carData
++# COUNT (medpar) # not in Debian, use removed by use_available_data.patch
++# geepack (dietox)
++# HistData (Guerry) # not in Debian, but Guerry is and has the same dataset
++# MASS (epil, Sitka)
++# robustbase (starsCYG)
++# vcd (Arthritis, VisualAcuity)
++
++# duration.rst would use survival (flchain) but that example isn't run during build
++
++# R-using examples use lme4, geepack, robustbase
++
++subprocess.run(['R', 'CMD', 'BATCH', buildroot_directory + '/debian/datasets/Rdatasets.R'], cwd=Rtmp_directory, check=True)
++subprocess.run([buildroot_directory + '/debian/datasets/rst.sh'], cwd=Rtmp_directory + '/doc', check=True)
++
++for fname_in in glob.glob(Rtmp_directory + '/**/*', recursive=True):
++ if os.path.isfile(fname_in):
++ with open(fname_in,'rb') as fd:
++ data = fd.read()
++ fname_out = target_directory + '/raw.githubusercontent.com,vincentarelbundock,Rdatasets,master,' + os.path.relpath('-v2.'.join(fname_in.rsplit('.',1)), start=Rtmp_directory).replace('/',',') + '.zip'
++ data2 = zlib.compress(data)
++ with open(fname_out, 'wb') as fd:
++ fd.write(data2)
--- /dev/null
--- /dev/null
++#!/usr/bin/env bash
++# IMPORTANT: Run from inside the "doc" directory
++
++# create rst directories if they do not exist
++for d in $(find . -maxdepth 1 -type d)
++do
++ mkdir "$d/rst"
++done
++
++# use pandoc to convert html to rst
++for f in $(find . -name '*.html')
++do
++ old=$(basename -- "$f")
++ new=$"`basename "$f" .html`.rst"
++ dir=$(dirname -- "$f")
++ echo "$f"
++ pandoc -o $dir/rst/$base$new $f
++done
--- /dev/null
--- /dev/null
++README.*
--- /dev/null
--- /dev/null
++[DEFAULT]
++# the default branch for upstream sources:
++upstream-branch = master
++# the default branch for the debian patch:
++debian-branch = debian
++# the default tag formats used:
++upstream-vcs-tag = v%(version)s
++debian-tag = debian/%(version)s
--- /dev/null
--- /dev/null
++Description: Xfail or loosen tolerances on flaky tests
++
++The test_nondiagonal_obs_cov one hasn't yet been seen in Debian
++but is known in upstream CI:
++https://github.com/statsmodels/statsmodels/pull/6475#issuecomment-581151072
++
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>, Graham Inggs
++Bug-Debian: https://bugs.debian.org/944054
++Forwarded: no
++
++--- a/statsmodels/base/tests/test_penalized.py
+++++ b/statsmodels/base/tests/test_penalized.py
++@@ -312,6 +312,7 @@ class TestPenalizedPoissonOraclePenalize
++ cls.atol = 1e-3
++
++
+++@pytest.mark.xfail(reason="flaky convergence, https://bugs.debian.org/944054",strict=False)
++ class TestPenalizedPoissonOraclePenalized2(CheckPenalizedPoisson):
++
++ @classmethod
++@@ -348,6 +349,7 @@ class TestPenalizedPoissonOraclePenalize
++ assert_equal(self.res1.bse[self.k_nonzero:], 0)
++
++
+++@pytest.mark.xfail(reason="flaky convergence, https://bugs.debian.org/944054",strict=False)
++ class TestPenalizedPoissonOraclePenalized2HC(CheckPenalizedPoisson):
++
++ @classmethod
++--- a/statsmodels/tsa/statespace/tests/test_exact_diffuse_filtering.py
+++++ b/statsmodels/tsa/statespace/tests/test_exact_diffuse_filtering.py
++@@ -905,6 +905,7 @@ class TestDFM_Approx(CheckApproximateDif
++ # we cannot increase it too much more than this because then we start get
++ # numerical errors (e.g. 1e11 works but 1e12 does not pass)
++ approximate_diffuse_variance = 5e10
+++ rtol = 1e-6
++
++
++ class TestDFM_KFAS(CheckKFASMixin, CheckDFM):
--- /dev/null
--- /dev/null
++Description: Ignore test failures on arm64, warn on mips64el
++
++arm64 - TestZeroInflatedModel_probit convergence issues
++mips64el - tests crash, hence
++unknown whether there are any wrong answers (ignored in d/rules)
++
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Bug-Debian: https://bugs.debian.org/968210
++Forwarded: no
++
++--- a/statsmodels/discrete/tests/test_count_model.py
+++++ b/statsmodels/discrete/tests/test_count_model.py
++@@ -1,4 +1,5 @@
++ from statsmodels.compat.platform import PLATFORM_LINUX32
+++import platform
++
++ import numpy as np
++ from numpy.testing import (
++@@ -109,7 +110,7 @@ class TestZeroInflatedModel_probit(Check
++ res2 = RandHIE.zero_inflated_poisson_probit
++ cls.res2 = res2
++
++- @pytest.mark.skipif(PLATFORM_LINUX32, reason="Fails on 32-bit Linux")
+++ @pytest.mark.xfail(condition=PLATFORM_LINUX32 or platform.uname()[4].startswith('aarch'), strict=False, reason="convergence issues, https://bugs.debian.org/968210")
++ def test_fit_regularized(self):
++ super().test_fit_regularized()
++
++--- a/statsmodels/__init__.py
+++++ b/statsmodels/__init__.py
++@@ -1,3 +1,8 @@
+++import platform
+++import sys
+++if 'mips' in platform.uname()[4].lower() and sys.maxsize>2**33:
+++ import warnings
+++ warnings.warn("This appears to be a mips64el system, on which statsmodels is buggy (crashes and possibly wrong answers) - https://bugs.debian.org/968210")
++
++ from statsmodels._version import get_versions
++
--- /dev/null
--- /dev/null
++Description: Xfail / increase tolerance on tests
++
++TestMICE and test_mixedlm vary enough with the random state, and
++corrpsd is close enough to 0, that this is plausibly a rounding non-bug
++
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Bug-Debian: https://bugs.debian.org/997081
++Forwarded: https://github.com/statsmodels/statsmodels/issues/7911
++
++--- a/statsmodels/imputation/tests/test_mice.py
+++++ b/statsmodels/imputation/tests/test_mice.py
++@@ -349,6 +349,7 @@ class TestMICE(object):
++ assert(isinstance(x.family, sm.families.Binomial))
++
++ @pytest.mark.slow
+++ @pytest.mark.xfail(strict=False,reason='bug 7911')
++ def test_combine(self):
++
++ np.random.seed(3897)
++--- a/statsmodels/stats/tests/test_corrpsd.py
+++++ b/statsmodels/stats/tests/test_corrpsd.py
++@@ -193,19 +193,19 @@ def test_corrpsd_threshold(threshold):
++
++ y = corr_nearest(x, n_fact=100, threshold=threshold)
++ evals = np.linalg.eigvalsh(y)
++- assert_allclose(evals[0], threshold, rtol=1e-6, atol=1e-15)
+++ assert_allclose(evals[0], threshold, rtol=1e-6, atol=2e-15)
++
++ y = corr_clipped(x, threshold=threshold)
++ evals = np.linalg.eigvalsh(y)
++- assert_allclose(evals[0], threshold, rtol=0.25, atol=1e-15)
+++ assert_allclose(evals[0], threshold, rtol=0.25, atol=2e-15)
++
++ y = cov_nearest(x, method='nearest', n_fact=100, threshold=threshold)
++ evals = np.linalg.eigvalsh(y)
++- assert_allclose(evals[0], threshold, rtol=1e-6, atol=1e-15)
+++ assert_allclose(evals[0], threshold, rtol=1e-6, atol=2e-15)
++
++ y = cov_nearest(x, n_fact=100, threshold=threshold)
++ evals = np.linalg.eigvalsh(y)
++- assert_allclose(evals[0], threshold, rtol=0.25, atol=1e-15)
+++ assert_allclose(evals[0], threshold, rtol=0.25, atol=2e-15)
++
++
++ class Test_Factor(object):
++--- a/statsmodels/stats/tests/test_mediation.py
+++++ b/statsmodels/stats/tests/test_mediation.py
++@@ -166,7 +166,7 @@ def test_framing_example_moderator_formu
++ diff = np.asarray(med_rslt.summary() - framing_moderated_4231)
++ assert_allclose(diff, 0, atol=1e-6)
++
++-
+++@pytest.mark.xfail(strict=False,reason='bug 7911')
++ def test_mixedlm():
++
++ np.random.seed(3424)
--- /dev/null
--- /dev/null
++Description: Avoid NaN crash in test_structural
++
++Out-of-range frequency.cycle starting values make untransform_params return NaN,
++which crashes summary()
++
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Forwarded: no
++
++--- a/statsmodels/tsa/statespace/tests/results/results_structural.py
+++++ b/statsmodels/tsa/statespace/tests/results/results_structural.py
++@@ -291,12 +291,15 @@ lltrend_cycle_seasonal_reg_ar1_approx_di
++ # Monthly frequency dataset
++ {'level': 'lltrend', 'autoregressive': 1, 'cycle': True,
++ 'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1,
++- 'exog': True, 'mle_regression': False, 'freq': 'MS'},
+++ 'exog': True, 'mle_regression': False, 'freq': 'MS',
+++ 'cycle_period_bounds': (1.5*4, 12*4)},
+++ # explicit cycle_period_bounds needed because the default *12
+++ # makes start_params out of range
++ # Minutely frequency dataset
++ {'level': 'lltrend', 'autoregressive': 1, 'cycle': True,
++ 'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1,
++ 'exog': True, 'mle_regression': False, 'freq': 'T',
++- 'cycle_period_bounds': (1.5*12, 12*12)},
+++ 'cycle_period_bounds': (1.5*4, 12*4)},
++ ],
++ 'params': [0.0001, 0.01, 0.06, 0.0001, 0.0001, 0.1, 2*pi / 10, 0.2],
++ 'start_params': [0.0001, 0.01, 0.06, 0.0001, 0.0001, 0.1, 2*pi / 10, 0.2],
--- /dev/null
--- /dev/null
++Description: Loosen tolerances on some tests on i386
++
++and xfail a test that fails for not producing ConvergenceWarning.
++
++Failure logs:
++test_smoothing
++https://buildd.debian.org/status/fetch.php?pkg=statsmodels&arch=i386&ver=0.9.0-3&stamp=1567157609&raw=0
++test_multivariate_switch_univariate
++https://buildd.debian.org/status/fetch.php?pkg=statsmodels&arch=i386&ver=0.12.0-1&stamp=1599693472&raw=0
++
++As it only fails on i386 and isn't far wrong,
++I suspect different rounding due to x87 excess precision
++
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Bug-Debian: https://bugs.debian.org/938949
++Forwarded: no
++
++--- a/statsmodels/tsa/statespace/tests/test_smoothing.py
+++++ b/statsmodels/tsa/statespace/tests/test_smoothing.py
++@@ -29,8 +29,10 @@ from statsmodels.tsa.statespace.kalman_s
++ SMOOTH_UNIVARIATE)
++
++ current_path = os.path.dirname(os.path.abspath(__file__))
++-
++-
+++import sys
+++import platform
+++import re
+++i386_looser_tolerances=bool(re.match('i.?86|x86',platform.uname()[4])) and np.log2(sys.maxsize)<33
++ class TestStatesAR3(object):
++ @classmethod
++ def setup_class(cls, alternate_timing=False, *args, **kwargs):
++@@ -834,7 +836,7 @@ class TestMultivariateVARUnivariate(obje
++ def test_forecasts_error_cov(self):
++ assert_allclose(
++ self.results.forecasts_error_cov.diagonal(),
++- self.desired[['F1', 'F2', 'F3']]
+++ self.desired[['F1', 'F2', 'F3']],rtol=2e-7 if i386_looser_tolerances else 1e-7
++ )
++
++ def test_predicted_states(self):
++@@ -888,7 +890,7 @@ class TestMultivariateVARUnivariate(obje
++ def test_smoothed_measurement_disturbance_cov(self):
++ assert_allclose(
++ self.results.smoothed_measurement_disturbance_cov.diagonal(),
++- self.desired[['Veps1', 'Veps2', 'Veps3']]
+++ self.desired[['Veps1', 'Veps2', 'Veps3']],rtol=2e-7 if i386_looser_tolerances else 1e-7
++ )
++
++
++--- a/statsmodels/tsa/statespace/tests/test_multivariate_switch_univariate.py
+++++ b/statsmodels/tsa/statespace/tests/test_multivariate_switch_univariate.py
++@@ -19,6 +19,10 @@ Princeton, N.J.: Princeton University Pr
++ """
++ import numpy as np
++ import pytest
+++import sys
+++import platform
+++import re
+++i386_looser_tolerances=bool(re.match('i.?86|x86',platform.uname()[4])) and np.log2(sys.maxsize)<33
++
++ from statsmodels.tsa.statespace import (
++ mlemodel, sarimax, structural, varmax, dynamic_factor)
++@@ -236,7 +240,7 @@ def test_filter_output(univariate, missi
++ # Test the output when the multivariate filter switches to the univariate
++ # filter
++ mod = get_model(univariate, missing, init)
++- check_filter_output(mod, periods)
+++ check_filter_output(mod, periods, atol=1e-10 if i386_looser_tolerances else 0)
++
++
++ @pytest.mark.parametrize('univariate', [True, False])
++@@ -255,7 +259,7 @@ def test_smoother_output(univariate, mis
++ if init == 'diffuse':
++ return
++ mod.ssm.timing_init_filtered = True
++- atol = 1e-12
+++ atol = 1e-8 if i386_looser_tolerances else 1e-12
++ # Tolerance is lower for approximate diffuse for one attribute in this case
++ if missing == 'init' and init == 'approximate_diffuse':
++ atol = 1e-6
++--- a/statsmodels/tsa/holtwinters/tests/test_holtwinters.py
+++++ b/statsmodels/tsa/holtwinters/tests/test_holtwinters.py
++@@ -5,6 +5,8 @@ Modified: Kevin Sheppard
++ import os
++ import re
++ import warnings
+++import sys
+++import platform
++
++ import numpy as np
++ from numpy.testing import assert_allclose, assert_almost_equal
++@@ -1612,6 +1614,7 @@ def test_simulate_boxcox(austourists):
++ assert np.all(np.abs(mean - expected) < 5)
++
++
+++@pytest.mark.xfail(condition=bool(re.match('i.?86|x86',platform.uname()[4])) and np.log2(sys.maxsize)<33,reason="doesn't warn on i386",strict=False)
++ @pytest.mark.parametrize("ix", [10, 100, 1000, 2000])
++ def test_forecast_index(ix):
++ # GH 6549
--- /dev/null
--- /dev/null
++Description: Don't try to use a theme we don't have
++
++Upstream switched to sphinx_material in
++1d7aafad5c3ad634c9ae2122881c1809c2ccbcbe
++and Debian doesn't have that theme.
++
++(I haven't investigated packaging it, which might be a better long-term option.)
++
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Forwarded: not-needed (except maybe the version fix? or is that no longer needed at all?)
++
++--- a/docs/source/conf.py
+++++ b/docs/source/conf.py
++@@ -21,7 +21,6 @@ import sys
++ import yaml
++ from numpydoc.xref import DEFAULT_LINKS
++
++-import sphinx_material
++ from statsmodels import __version__
++
++ # -- Monkey Patch ----------------------------------------------------------
++@@ -100,7 +99,7 @@ release = __version__
++
++ lv = LooseVersion(release)
++ commit = ''
++-full_version = short_version = version = release
+++full_version = version = short_version = lv.vstring
++ if '+' in lv.version:
++ short_version = lv.vstring[:lv.vstring.index('+')]
++ commit = lv.version[lv.version.index('+') + 1]
++@@ -162,10 +161,8 @@ pygments_style = 'default'
++
++ # The theme to use for HTML and HTML Help pages. See the documentation for
++ # a list of builtin themes.
++-extensions.append('sphinx_material')
++-html_theme_path = sphinx_material.html_theme_path()
++-html_context = sphinx_material.get_html_context()
++-html_theme = 'sphinx_material'
+++html_theme = 'nature'
+++html_context = {}
++ html_title = project
++ html_short_title = project
++ # material theme options (see theme.conf for more information)
++@@ -239,7 +236,7 @@ html_static_path = ['_static']
++
++ # Custom sidebar templates, maps document names to template names.
++ html_sidebars = {
++- "**": ["logo-text.html", "globaltoc.html", "localtoc.html", "searchbox.html"]
+++ "**": ["globaltoc.html", "localtoc.html", "searchbox.html"]
++ }
++
++ # Additional templates that should be rendered to pages, maps page names to
--- /dev/null
--- /dev/null
++Description: Use Python 3 in script shebangs
++
++(Skipping the two .pyx files with #!python - they need to be compiled
++with Cython, not directly run with any version of plain Python)
++
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Forwarded: no
++
++--- a/archive/docs/fix_longtable.py
+++++ b/archive/docs/fix_longtable.py
++@@ -1,4 +1,4 @@
++-#!/usr/bin/env python
+++#!/usr/bin/env python3
++ import sys
++ import os
++
++--- a/examples/python/plots_boxplots.py
+++++ b/examples/python/plots_boxplots.py
++@@ -1,4 +1,4 @@
++-#!/usr/bin/env python
+++#!/usr/bin/env python3
++ # coding: utf-8
++
++ # DO NOT EDIT
++--- a/examples/python/robust_models_1.py
+++++ b/examples/python/robust_models_1.py
++@@ -1,4 +1,4 @@
++-#!/usr/bin/env python
+++#!/usr/bin/env python3
++ # coding: utf-8
++
++ # DO NOT EDIT
++--- a/statsmodels/regression/quantile_regression.py
+++++ b/statsmodels/regression/quantile_regression.py
++@@ -1,4 +1,4 @@
++-#!/usr/bin/env python
+++#!/usr/bin/env python3
++
++ '''
++ Quantile regression model
++--- a/statsmodels/sandbox/examples/example_pca.py
+++++ b/statsmodels/sandbox/examples/example_pca.py
++@@ -1,4 +1,4 @@
++-#!/usr/bin/env python
+++#!/usr/bin/env python3
++
++ import numpy as np
++ from statsmodels.sandbox.pca import Pca
++--- a/statsmodels/tools/print_version.py
+++++ b/statsmodels/tools/print_version.py
++@@ -1,4 +1,4 @@
++-#!/usr/bin/env python
+++#!/usr/bin/env python3
++ from functools import reduce
++ import sys
++ from os.path import dirname
++--- a/tools/export_notebooks_to_python.py
+++++ b/tools/export_notebooks_to_python.py
++@@ -1,4 +1,4 @@
++-#!/usr/bin/env python
+++#!/usr/bin/env python3
++ # -*- coding: utf-8 -*-
++ import argparse
++ import glob
++--- a/tools/generate_formula_api.py
+++++ b/tools/generate_formula_api.py
++@@ -1,4 +1,4 @@
++-#!/usr/bin/env python
+++#!/usr/bin/env python3
++ """
++ This will generate an API file for formula in dir/statsmodels/formula/api.py
++
--- /dev/null
--- /dev/null
++Description: Remove code from Stack Overflow and wasteful loop
++
++Stack Overflow content is CC-BY-SA licensed,
++which this package is not supposed to be. This snippet may be
++too small to be copyrightable, but removing it to be safe.
++
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Forwarded: no
++
++--- a/statsmodels/examples/ex_generic_mle_tdist.py
+++++ b/statsmodels/examples/ex_generic_mle_tdist.py
++@@ -198,6 +198,9 @@ class MyPareto(GenericLikelihoodModel):
++ this does not trim lower values during ks optimization
++
++ '''
+++ # all 3 of these are based on
+++ # https://stackoverflow.com/questions/3242326/fitting-a-pareto-distribution-with-python-scipy
+++ # validly licensed because the same person added it there and here
++ rvs = self.endog
++ rvsmin = rvs.min()
++ fixdf = np.nan * np.ones(3)
++--- a/statsmodels/graphics/regressionplots.py
+++++ b/statsmodels/graphics/regressionplots.py
++@@ -434,11 +434,12 @@ def plot_partregress(endog, exog_i, exog
++ ax.set_ylabel("e(%s | X)" % y_axis_endog_name)
++ ax.set_title('Partial Regression Plot', **title_kwargs)
++
++- # NOTE: if we want to get super fancy, we could annotate if a point is
++- # clicked using this widget
+++ # NOTE: it is possible to annotate if a point is clicked using
++ # http://stackoverflow.com/questions/4652439/
++ # is-there-a-matplotlib-equivalent-of-matlabs-datacursormode/
++ # 4674445#4674445
+++ # but for licensing reasons this is _not_ to be directly copied
+++ # into statsmodels itself
++ if obs_labels is True:
++ if data is not None:
++ obs_labels = data.index
++--- a/statsmodels/tools/sequences.py
+++++ b/statsmodels/tools/sequences.py
++@@ -61,24 +61,22 @@ def primes_from_2_to(n):
++ Parameters
++ ----------
++ n : int
++- Sup bound with ``n >= 6``.
+++ Upper limit (exclusive).
++
++ Returns
++ -------
++ primes : list(int)
++ Primes in ``2 <= p < n``.
++-
++- References
++- ----------
++- [1] `StackOverflow <https://stackoverflow.com/questions/2068372>`_.
++ """
++- sieve = np.ones(n // 3 + (n % 6 == 2), dtype=bool)
++- for i in range(1, int(n ** 0.5) // 3 + 1):
++- if sieve[i]:
++- k = 3 * i + 1 | 1
++- sieve[k * k // 3::2 * k] = False
++- sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False
++- return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]
+++ isprime = np.ones((n - 2,)) # 2...n-1, so indices offset by 2 from values
+++ endidx = int(np.sqrt(n)) - 1
+++ nextprime = 2
+++ while True:
+++ isprime[nextprime * nextprime - 2::nextprime] = 0
+++ nextps = np.nonzero(isprime[nextprime - 1:endidx])[0]
+++ if len(nextps) == 0:
+++ return np.nonzero(isprime)[0] + 2
+++ nextprime = nextprime + nextps[0] + 1
++
++
++ def n_primes(n):
++@@ -109,12 +107,13 @@ def n_primes(n):
++ 953, 967, 971, 977, 983, 991, 997][:n]
++
++ if len(primes) < n:
++- big_number = 10
++- while 'Not enought primes':
+++ # this should always be enough - Rosser (1941)
+++ big_number = int(n * (np.log(n) + np.log(np.log(n))))
+++ while True: # not enough primes
++ primes = primes_from_2_to(big_number)[:n]
++ if len(primes) == n:
++ break
++- big_number += 1000
+++ big_number = int(big_number * 1.5)
++
++ return primes
++
--- /dev/null
--- /dev/null
++use-cached-datasets
++up_reduce_test_precision
++use-system-inventory
++xfail_kalman_armhf.patch
++use_available_data.patch
++i386_loosen_test_tolerances.patch
++python3_shebangs.patch
++use_tmp_path.patch
++944054_flaky_tests.patch
++sphinx_autosummary.patch
++no_sphinx_material.patch
++sphinx_ignore_errors.patch
++fix_test_bounds.patch
++sphinx_local_requirejs.patch
++spelling.patch
++xfail_regimeswitching_armhf.patch
++xfail_dynamicfactor_ppc64el.patch
++xfail_no_multiprocessing.patch
++remove_ccbysa_snippet.patch
++968210_ignore_tests.patch
++997081_xfail.patch
--- /dev/null
--- /dev/null
++Description: Fix typos
++
++Origin: lintian
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Forwarded: no
++
++--- a/statsmodels/tsa/_stl.pyx
+++++ b/statsmodels/tsa/_stl.pyx
++@@ -245,11 +245,11 @@ cdef class STL(object):
++ self.low_pass_deg = low_pass_deg # ildeg
++ self.robust = robust
++ if not _is_pos_int(low_pass_jump, False):
++- raise ValueError('low_pass_jump must be a positve integer')
+++ raise ValueError('low_pass_jump must be a positive integer')
++ if not _is_pos_int(seasonal_jump, False):
++- raise ValueError('seasonal_jump must be a positve integer')
+++ raise ValueError('seasonal_jump must be a positive integer')
++ if not _is_pos_int(trend_jump, False):
++- raise ValueError('trend_jump must be a positve integer')
+++ raise ValueError('trend_jump must be a positive integer')
++ self.low_pass_jump = low_pass_jump
++ self.seasonal_jump = seasonal_jump
++ self.trend_jump = trend_jump
++--- a/statsmodels/tsa/tests/test_stl.py
+++++ b/statsmodels/tsa/tests/test_stl.py
++@@ -232,17 +232,17 @@ def test_jump_errors(default_kwargs):
++ class_kwargs, _, _ = _to_class_kwargs(default_kwargs)
++ endog = class_kwargs["endog"]
++ period = class_kwargs["period"]
++- with pytest.raises(ValueError, match="low_pass_jump must be a positve"):
+++ with pytest.raises(ValueError, match="low_pass_jump must be a positive"):
++ STL(endog=endog, period=period, low_pass_jump=0)
++- with pytest.raises(ValueError, match="low_pass_jump must be a positve"):
+++ with pytest.raises(ValueError, match="low_pass_jump must be a positive"):
++ STL(endog=endog, period=period, low_pass_jump=1.0)
++- with pytest.raises(ValueError, match="seasonal_jump must be a positve"):
+++ with pytest.raises(ValueError, match="seasonal_jump must be a positive"):
++ STL(endog=endog, period=period, seasonal_jump=0)
++- with pytest.raises(ValueError, match="seasonal_jump must be a positve"):
+++ with pytest.raises(ValueError, match="seasonal_jump must be a positive"):
++ STL(endog=endog, period=period, seasonal_jump=1.0)
++- with pytest.raises(ValueError, match="trend_jump must be a positve"):
+++ with pytest.raises(ValueError, match="trend_jump must be a positive"):
++ STL(endog=endog, period=period, trend_jump=0)
++- with pytest.raises(ValueError, match="trend_jump must be a positve"):
+++ with pytest.raises(ValueError, match="trend_jump must be a positive"):
++ STL(endog=endog, period=period, trend_jump=1.0)
++
++
--- /dev/null
--- /dev/null
++Description: Make the module reference tables appear
++
++Our numpydoc is old enough to still need this manually set
++
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Bug: https://github.com/numpy/numpydoc/issues/141
++Forwarded: not-needed
++
++--- a/docs/source/conf.py
+++++ b/docs/source/conf.py
++@@ -51,7 +51,7 @@ extensions = ['sphinx.ext.autodoc',
++ 'nbsphinx',
++ 'sphinx.ext.mathjax',
++ 'sphinx.ext.viewcode',
++- # 'sphinx.ext.autosummary',
+++ 'sphinx.ext.autosummary',
++ 'sphinx.ext.inheritance_diagram',
++ 'matplotlib.sphinxext.plot_directive',
++ 'IPython.sphinxext.ipython_console_highlighting',
--- /dev/null
--- /dev/null
++Description: Don't fail documentation build on examples exceptions
++
++If example code outputs an error when run,
++ipython (ipython:: blocks) now fails the whole build by default,
++and tools/nbgenerate.py (.ipynb files) now leaves out that file
++(leaving a broken link in the examples index).
++
++As some examples use downloaded data and are hence expected to be
++unrunnable on a Debian buildd, this instead keeps them
++(::ipython with error output, .ipynb without output).
++
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Forwarded: not-needed
++
++--- a/docs/source/conf.py
+++++ b/docs/source/conf.py
++@@ -66,6 +66,7 @@ else:
++
++ # nbsphinx options
++ nbsphinx_allow_errors = True
+++ipython_warning_is_error = False
++ # sphinxcontrib-spelling options
++ spelling_word_list_filename = ['spelling_wordlist.txt', 'names_wordlist.txt']
++ spelling_ignore_pypi_package_names = True
++--- a/docs/Makefile
+++++ b/docs/Makefile
++@@ -34,6 +34,8 @@ html:
++ mkdir -p $(BUILDDIR)/source/examples/notebooks/generated
++ # Black list notebooks from doc build here
++ $(TOOLSPATH)$(NOTEBOOKBUILD) --parallel --report-errors --skip-existing --execute-only --execution-blacklist statespace_custom_models
+++ @echo "Copying notebooks that failed execution (there are usually several in Debian because some need network and/or dependencies we don't have)"
+++ cp -nav ../examples/notebooks/*.ipynb -t source/examples/notebooks/generated
++ @echo "Running sphinx-build"
++ @echo @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(O)
++ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(O)
--- /dev/null
--- /dev/null
++Description: Use local requirejs/mathjax
++
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Forwarded: not-needed
++
++--- a/docs/source/conf.py
+++++ b/docs/source/conf.py
++@@ -66,6 +66,8 @@ else:
++
++ # nbsphinx options
++ nbsphinx_allow_errors = True
+++nbsphinx_requirejs_path="require.min.js"
+++mathjax_path="MathJax.js"
++ ipython_warning_is_error = False
++ # sphinxcontrib-spelling options
++ spelling_word_list_filename = ['spelling_wordlist.txt', 'names_wordlist.txt']
--- /dev/null
--- /dev/null
++Description: to prevent failed test on i386
++
++Author: Yaroslav Halchenko <debian@onerussian.com>
++Bug: https://github.com/statsmodels/statsmodels/issues/1831
++Forwarded: upstream consider this probably fixed
++
++--- a/statsmodels/emplike/tests/test_regression.py
+++++ b/statsmodels/emplike/tests/test_regression.py
++@@ -147,7 +147,7 @@ class TestRegressionNM(GenRes):
++ def test_ci_beta2(self):
++ beta2ci = self.res1.conf_int_el(2, lower_bound=.59, upper_bound=2.2,
++ method='nm')
++- assert_almost_equal(beta2ci, self.res2.test_ci_beta2, 6)
+++ assert_almost_equal(beta2ci, self.res2.test_ci_beta2, 4)
++
++ @pytest.mark.slow
++ def test_ci_beta3(self):
--- /dev/null
--- /dev/null
++Description: Use cached datasets in tests and examples
++
++Also remove a download that isn't actually used in that example.
++
++This allows the tests to pass, and at least some of the examples to
++be built, in an offline environment such as a Debian buildd.
++
++The cached data is extracted from R packages by debian/datasets/*.
++
++Author: Diane Trout <diane@ghic.org>, Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Forwarded: not-needed
++
++--- a/docs/source/contingency_tables.rst
+++++ b/docs/source/contingency_tables.rst
++@@ -49,7 +49,7 @@ contingency table cell counts:
++ import pandas as pd
++ import statsmodels.api as sm
++
++- df = sm.datasets.get_rdataset("Arthritis", "vcd").data
+++ df = sm.datasets.get_rdataset("Arthritis", "vcd", cache=True).data
++
++ tab = pd.crosstab(df['Treatment'], df['Improved'])
++ tab = tab.loc[:, ["None", "Some", "Marked"]]
++@@ -184,7 +184,7 @@ contingency table.
++
++ .. ipython:: python
++
++- df = sm.datasets.get_rdataset("VisualAcuity", "vcd").data
+++ df = sm.datasets.get_rdataset("VisualAcuity", "vcd", cache=True).data
++ df = df.loc[df.gender == "female", :]
++ tab = df.set_index(['left', 'right'])
++ del tab["gender"]
++--- a/docs/source/duration.rst
+++++ b/docs/source/duration.rst
++@@ -41,7 +41,7 @@ We fit the survival distribution only fo
++
++ import statsmodels.api as sm
++
++- data = sm.datasets.get_rdataset("flchain", "survival").data
+++ data = sm.datasets.get_rdataset("flchain", "survival", cache=True).data
++ df = data.loc[data.sex == "F", :]
++ sf = sm.SurvfuncRight(df["futime"], df["death"])
++
++@@ -152,7 +152,7 @@ depending on the value of the covariates
++ import statsmodels.api as sm
++ import statsmodels.formula.api as smf
++
++- data = sm.datasets.get_rdataset("flchain", "survival").data
+++ data = sm.datasets.get_rdataset("flchain", "survival", cache=True).data
++ del data["chapter"]
++ data = data.dropna()
++ data["lam"] = data["lambda"]
++--- a/docs/source/example_formulas.rst
+++++ b/docs/source/example_formulas.rst
++@@ -47,7 +47,7 @@ and list-wise delete to remove missing o
++
++ .. ipython:: python
++
++- df = sm.datasets.get_rdataset("Guerry", "HistData").data
+++ df = sm.datasets.get_rdataset("Guerry", "HistData", cache=True).data
++ df = df[['Lottery', 'Literacy', 'Wealth', 'Region']].dropna()
++ df.head()
++
++--- a/docs/source/gee.rst
+++++ b/docs/source/gee.rst
++@@ -24,7 +24,7 @@ within clusters using data on epilepsy s
++ import statsmodels.api as sm
++ import statsmodels.formula.api as smf
++
++- data = sm.datasets.get_rdataset('epil', package='MASS').data
+++ data = sm.datasets.get_rdataset('epil', package='MASS', cache=True).data
++
++ fam = sm.families.Poisson()
++ ind = sm.cov_struct.Exchangeable()
++--- a/docs/source/gettingstarted.rst
+++++ b/docs/source/gettingstarted.rst
++@@ -50,7 +50,7 @@ We could download the file locally and t
++
++ .. ipython:: python
++
++- df = sm.datasets.get_rdataset("Guerry", "HistData").data
+++ df = sm.datasets.get_rdataset("Guerry", "HistData", cache=True).data
++
++ The `Input/Output doc page <iolib.html>`_ shows how to import from various
++ other formats.
++--- a/docs/source/index.rst
+++++ b/docs/source/index.rst
++@@ -23,7 +23,7 @@ Here is a simple example using ordinary
++ import statsmodels.formula.api as smf
++
++ # Load data
++- dat = sm.datasets.get_rdataset("Guerry", "HistData").data
+++ dat = sm.datasets.get_rdataset("Guerry", "HistData", cache=True).data
++
++ # Fit regression model (using the natural log of one of the regressors)
++ results = smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=dat).fit()
++--- a/docs/source/mixed_linear.rst
+++++ b/docs/source/mixed_linear.rst
++@@ -83,7 +83,7 @@ Examples
++ import statsmodels.api as sm
++ import statsmodels.formula.api as smf
++
++- data = sm.datasets.get_rdataset("dietox", "geepack").data
+++ data = sm.datasets.get_rdataset("dietox", "geepack", cache=True).data
++
++ md = smf.mixedlm("Weight ~ Time", data, groups=data["Pig"])
++ mdf = md.fit()
++--- a/docs/source/release/version0.6.rst
+++++ b/docs/source/release/version0.6.rst
++@@ -41,7 +41,7 @@ covariates.
++ import statsmodels.api as sm
++ import statsmodels.formula.api as smf
++
++- data = sm.datasets.get_rdataset("epil", "MASS").data
+++ data = sm.datasets.get_rdataset("epil", "MASS", cache=True).data
++
++ md = smf.gee("y ~ age + trt + base", "subject", data,
++ cov_struct=sm.cov_struct.Independence(),
++--- a/docs/source/datasets/index.rst
+++++ b/docs/source/datasets/index.rst
++@@ -30,7 +30,7 @@ The `Rdatasets project <https://vincenta
++ .. ipython:: python
++
++ import statsmodels.api as sm
++- duncan_prestige = sm.datasets.get_rdataset("Duncan", "carData")
+++ duncan_prestige = sm.datasets.get_rdataset("Duncan", "carData", cache=True)
++ print(duncan_prestige.__doc__)
++ duncan_prestige.data.head(5)
++
++--- a/examples/notebooks/markov_regression.ipynb
+++++ b/examples/notebooks/markov_regression.ipynb
++@@ -30,14 +30,7 @@
++ "import pandas as pd\n",
++ "import statsmodels.api as sm\n",
++ "import matplotlib.pyplot as plt\n",
++- "\n",
++- "# NBER recessions\n",
++- "from pandas_datareader.data import DataReader\n",
++- "from datetime import datetime\n",
++- "\n",
++- "usrec = DataReader(\n",
++- " \"USREC\", \"fred\", start=datetime(1947, 1, 1), end=datetime(2013, 4, 1)\n",
++- ")"
+++ "from datetime import datetime\n"
++ ]
++ },
++ {
++--- a/examples/notebooks/mixed_lm_example.ipynb
+++++ b/examples/notebooks/mixed_lm_example.ipynb
++@@ -86,7 +86,7 @@
++ "metadata": {},
++ "outputs": [],
++ "source": [
++- "data = sm.datasets.get_rdataset(\"dietox\", \"geepack\").data\n",
+++ "data = sm.datasets.get_rdataset(\"dietox\", \"geepack\", cache=True).data\n",
++ "md = smf.mixedlm(\"Weight ~ Time\", data, groups=data[\"Pig\"])\n",
++ "mdf = md.fit(method=[\"lbfgs\"])\n",
++ "print(mdf.summary())"
++@@ -318,7 +318,7 @@
++ "metadata": {},
++ "outputs": [],
++ "source": [
++- "data = sm.datasets.get_rdataset(\"Sitka\", \"MASS\").data\n",
+++ "data = sm.datasets.get_rdataset(\"Sitka\", \"MASS\", cache=True).data\n",
++ "endog = data[\"size\"]\n",
++ "data[\"Intercept\"] = 1\n",
++ "exog = data[[\"Intercept\", \"Time\"]]"
++--- a/examples/notebooks/regression_diagnostics.ipynb
+++++ b/examples/notebooks/regression_diagnostics.ipynb
++@@ -47,8 +47,8 @@
++ "import matplotlib.pyplot as plt\n",
++ "\n",
++ "# Load data\n",
++- "url = \"https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/HistData/Guerry.csv\"\n",
++- "dat = pd.read_csv(url)\n",
+++ "import statsmodels.datasets\n",
+++ "dat = statsmodels.datasets.get_rdataset(\"Guerry\", \"HistData\", cache=True).data\n",
++ "\n",
++ "# Fit regression model (using the natural log of one of the regressors)\n",
++ "results = smf.ols(\"Lottery ~ Literacy + np.log(Pop1831)\", data=dat).fit()\n",
++--- a/statsmodels/stats/tests/test_dist_dependant_measures.py
+++++ b/statsmodels/stats/tests/test_dist_dependant_measures.py
++@@ -140,7 +140,7 @@ class TestDistDependenceMeasures(object)
++ 0.1025087
++ """
++ try:
++- iris = get_rdataset("iris").data.values[:, :4]
+++ iris = get_rdataset("iris", cache=True).data.values[:, :4]
++ except IGNORED_EXCEPTIONS:
++ pytest.skip('Failed with HTTPError or URLError, these are random')
++
++@@ -180,7 +180,7 @@ class TestDistDependenceMeasures(object)
++ 30.01526
++ """
++ try:
++- quakes = get_rdataset("quakes").data.values[:, :3]
+++ quakes = get_rdataset("quakes", cache=True).data.values[:, :3]
++ except IGNORED_EXCEPTIONS:
++ pytest.skip('Failed with HTTPError or URLError, these are random')
++
--- /dev/null
--- /dev/null
++Author: Diane Trout <diane@ghic.org>
++Description: Use intersphinx inventory files from other Debian packages.
++Forwarded: not-needed
++
++--- a/docs/source/conf.py
+++++ b/docs/source/conf.py
++@@ -392,12 +392,12 @@ nbsphinx_execute = "never"
++ # Example configuration for intersphinx: refer to the Python standard library.
++ # Example configuration for intersphinx: refer to the Python standard library.
++ intersphinx_mapping = {
++- 'numpy': ('https://docs.scipy.org/doc/numpy/', None),
++- 'python': ('https://docs.python.org/3/', None),
+++ 'numpy': ('https://docs.scipy.org/doc/numpy/', '/usr/share/doc/python-numpy-doc/html/objects.inv'),
+++ 'python': ('https://docs.python.org/3/', '/usr/share/doc/python3-doc/html/objects.inv'),
++ 'pydagogue': ('https://matthew-brett.github.io/pydagogue/', None),
++- 'matplotlib': ('https://matplotlib.org/', None),
++- 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
++- 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
+++ 'matplotlib': ('https://matplotlib.org/', '/usr/share/doc/python-matplotlib-doc/html/objects.inv'),
+++ 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', '/usr/share/doc/python-scipy-doc/html/objects.inv'),
+++ 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', '/usr/share/doc/python-pandas-doc/html/objects.inv'),
++ }
++
++ plot_basedir = join(dirname(dirname(os.path.abspath(__file__))), 'source')
--- /dev/null
--- /dev/null
++Description: Use example data from an R package we have
++
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Forwarded: not-needed
++
++--- a/examples/notebooks/generic_mle.ipynb
+++++ b/examples/notebooks/generic_mle.ipynb
++@@ -311,10 +311,10 @@
++ "\n",
++ "### Usage Example\n",
++ "\n",
++- "The [Medpar](https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/doc/COUNT/medpar.html)\n",
+++ "The [epilepsy](https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/doc/robustbase/epilepsy.html)\n",
++ "dataset is hosted in CSV format at the [Rdatasets repository](https://raw.githubusercontent.com/vincentarelbundock/Rdatasets). We use the ``read_csv``\n",
++ "function from the [Pandas library](https://pandas.pydata.org) to load the data\n",
++- "in memory. We then print the first few columns: \n"
+++ "in memory. We then print the first few entries: \n"
++ ]
++ },
++ {
++@@ -340,9 +340,9 @@
++ },
++ "outputs": [],
++ "source": [
++- "medpar = sm.datasets.get_rdataset(\"medpar\", \"COUNT\", cache=True).data\n",
+++ "epilepsy = sm.datasets.get_rdataset(\"epilepsy\", \"robustbase\", cache=True).data\n",
++ "\n",
++- "medpar.head()"
+++ "epilepsy.head()"
++ ]
++ },
++ {
++@@ -350,8 +350,8 @@
++ "metadata": {},
++ "source": [
++ "The model we are interested in has a vector of non-negative integers as\n",
++- "dependent variable (``los``), and 5 regressors: ``Intercept``, ``type2``,\n",
++- "``type3``, ``hmo``, ``white``.\n",
+++ "dependent variable (``Ysum``), and 3 regressors: ``Intercept``, ``Base``,\n",
+++ "``Trt``.\n",
++ "\n",
++ "For estimation, we need to create two variables to hold our regressors and the outcome variable. These can be ndarrays or pandas objects."
++ ]
++@@ -366,8 +366,9 @@
++ },
++ "outputs": [],
++ "source": [
++- "y = medpar.los\n",
++- "X = medpar[[\"type2\", \"type3\", \"hmo\", \"white\"]].copy()\n",
+++ "y = epilepsy.Ysum\n",
+++ "epilepsy[\"Trtnum\"]=epilepsy[\"Trt\"].map({\"placebo\": 0, \"progabide\": 1})\n",
+++ "X = epilepsy[[\"Base\", \"Trtnum\"]].copy()\n",
++ "X[\"constant\"] = 1"
++ ]
++ },
++@@ -495,25 +496,42 @@
++ "cell_type": "markdown",
++ "metadata": {},
++ "source": [
++- "Or we could compare them to results obtained using the MASS implementation for R:\n",
++- "\n",
++- " url = 'https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/csv/COUNT/medpar.csv'\n",
++- " medpar = read.csv(url)\n",
++- " f = los~factor(type)+hmo+white\n",
++- " \n",
++- " library(MASS)\n",
++- " mod = glm.nb(f, medpar)\n",
++- " coef(summary(mod))\n",
++- " Estimate Std. Error z value Pr(>|z|)\n",
++- " (Intercept) 2.31027893 0.06744676 34.253370 3.885556e-257\n",
++- " factor(type)2 0.22124898 0.05045746 4.384861 1.160597e-05\n",
++- " factor(type)3 0.70615882 0.07599849 9.291748 1.517751e-20\n",
++- " hmo -0.06795522 0.05321375 -1.277024 2.015939e-01\n",
++- " white -0.12906544 0.06836272 -1.887951 5.903257e-02\n",
++- "\n",
+++ "Or we could compare them to results obtained using the MASS implementation for R:\n"
+++ ]
+++ },
+++ {
+++ "cell_type": "code",
+++ "execution_count": null,
+++ "metadata": {
+++ "collapsed": false
+++ },
+++ "outputs": [],
+++ "source": [
+++ "%load_ext rpy2.ipython"
+++ ]
+++ },
+++ {
+++ "cell_type": "code",
+++ "execution_count": null,
+++ "metadata": {
+++ "collapsed": false
+++ },
+++ "outputs": [],
+++ "source": [
+++ "%R f = Ysum~factor(Trt)+Base\n",
+++ "%R data(epilepsy,package='robustbase')\n",
+++ "%R library(MASS)\n",
+++ "%R mod = glm.nb(f, epilepsy)\n",
+++ "%R print(coef(summary(mod)))\n"
+++ ]
+++ },
+++ {
+++ "cell_type": "markdown",
+++ "metadata": {},
+++ "source": [
++ "### Numerical precision \n",
++ "\n",
++- "The ``statsmodels`` generic MLE and ``R`` parameter estimates agree up to the fourth decimal. The standard errors, however, agree only up to the second decimal. This discrepancy is the result of imprecision in our Hessian numerical estimates. In the current context, the difference between ``MASS`` and ``statsmodels`` standard error estimates is substantively irrelevant, but it highlights the fact that users who need very precise estimates may not always want to rely on default settings when using numerical derivatives. In such cases, it is better to use analytical derivatives with the ``LikelihoodModel`` class."
+++ "The ``statsmodels`` generic MLE and ``R`` parameter estimates agree up to the fourth decimal. The standard errors, however, agree only up to the first decimal. This discrepancy is the result of imprecision in our Hessian numerical estimates. In the current context, the difference between ``MASS`` and ``statsmodels`` standard error estimates is substantively irrelevant, but it highlights the fact that users who need very precise estimates may not always want to rely on default settings when using numerical derivatives. In such cases, it is better to use analytical derivatives with the ``LikelihoodModel`` class."
++ ]
++ }
++ ],
--- /dev/null
--- /dev/null
++Description: Don't try to write to the source directory
++
++Not allowed in autopkgtest
++
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Forwarded: no
++
++--- a/statsmodels/datasets/tests/test_utils.py
+++++ b/statsmodels/datasets/tests/test_utils.py
++@@ -9,45 +9,43 @@ import pytest
++
++ from statsmodels.datasets import get_rdataset, webuse, check_internet, utils
++
++-cur_dir = os.path.dirname(os.path.abspath(__file__))
++-
++ IGNORED_EXCEPTIONS = (HTTPError, URLError, SSLError, UnicodeEncodeError,
++ timeout)
++
++
++ @pytest.mark.smoke
++-def test_get_rdataset():
+++def test_get_rdataset(tmp_path):
++ test_url = "https://raw.githubusercontent.com/vincentarelbundock/" \
++ "Rdatasets/master/csv/datasets/cars.csv"
++ internet_available = check_internet(test_url)
++ if not internet_available: # pragma: no cover
++ pytest.skip('Unable to retrieve file - skipping test')
++ try:
++- duncan = get_rdataset("Duncan", "carData", cache=cur_dir)
+++ duncan = get_rdataset("Duncan", "carData", cache=tmp_path)
++ except IGNORED_EXCEPTIONS:
++ pytest.skip('Failed with HTTPError or URLError, these are random')
++ assert_(isinstance(duncan, utils.Dataset))
++- duncan = get_rdataset("Duncan", "carData", cache=cur_dir)
+++ duncan = get_rdataset("Duncan", "carData", cache=tmp_path)
++ assert_(duncan.from_cache)
++
++
++ @pytest.mark.smoke
++-def test_get_rdataset_write_read_cache():
+++def test_get_rdataset_write_read_cache(tmp_path):
++ # test writing and reading cache
++ try:
++- guerry = get_rdataset("Guerry", "HistData", cache=cur_dir)
+++ guerry = get_rdataset("Guerry", "HistData", cache=tmp_path)
++ except IGNORED_EXCEPTIONS:
++ pytest.skip('Failed with HTTPError or URLError, these are random')
++
++ assert_(guerry.from_cache is False)
++- guerry2 = get_rdataset("Guerry", "HistData", cache=cur_dir)
+++ guerry2 = get_rdataset("Guerry", "HistData", cache=tmp_path)
++ assert_(guerry2.from_cache is True)
++ fn = "raw.githubusercontent.com,vincentarelbundock,Rdatasets,master,csv," \
++ "HistData,Guerry-v2.csv.zip"
++- os.remove(os.path.join(cur_dir, fn))
+++ os.remove(os.path.join(tmp_path, fn))
++ fn = "raw.githubusercontent.com,vincentarelbundock,Rdatasets,master,doc," \
++ "HistData,rst,Guerry-v2.rst.zip"
++- os.remove(os.path.join(cur_dir, fn))
+++ os.remove(os.path.join(tmp_path, fn))
++
++
++ def test_webuse():
--- /dev/null
--- /dev/null
++Description: DynamicFactor test fail on ppc64el
++
++Warn on use and xfail test
++
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Forwarded: no
++
++--- a/statsmodels/tsa/statespace/dynamic_factor.py
+++++ b/statsmodels/tsa/statespace/dynamic_factor.py
++@@ -23,6 +23,9 @@ from statsmodels.tsa.tsatools import lag
++ from statsmodels.tools.decorators import cache_readonly
++ import statsmodels.base.wrapper as wrap
++ from statsmodels.compat.pandas import Appender
+++import platform
+++import warnings
+++warn_dfactor_platform= "DynamicFactor can give wrong results on ppc64el" if platform.uname()[4].startswith('ppc') else False # test results at end of https://buildd.debian.org/status/fetch.php?pkg=statsmodels&arch=ppc64el&ver=0.11.0-1&stamp=1580692747&raw=0
++
++
++ class DynamicFactor(MLEModel):
++@@ -141,6 +144,8 @@ class DynamicFactor(MLEModel):
++ def __init__(self, endog, k_factors, factor_order, exog=None,
++ error_order=0, error_var=False, error_cov_type='diagonal',
++ enforce_stationarity=True, **kwargs):
+++ if warn_dfactor_platform:
+++ warnings.warn(warn_dfactor_platform)
++
++ # Model properties
++ self.enforce_stationarity = enforce_stationarity
++--- a/statsmodels/tsa/statespace/tests/test_dynamic_factor.py
+++++ b/statsmodels/tsa/statespace/tests/test_dynamic_factor.py
++@@ -9,6 +9,7 @@ import re
++ import warnings
++
++ import numpy as np
+++from statsmodels.tsa.statespace.dynamic_factor import warn_dfactor_platform
++ from numpy.testing import assert_equal, assert_raises, assert_allclose
++ import pandas as pd
++ import pytest
++@@ -575,7 +576,7 @@ class TestDynamicFactor_general_errors(C
++ r'cov.chol\[3,3\] +' + forg(params[offset + 5], prec=4),
++ table)
++
++-
+++@pytest.mark.xfail(condition=bool(warn_dfactor_platform),reason='known broken on ppc64el',strict=False)
++ class TestDynamicFactor_ar2_errors(CheckDynamicFactor):
++ """
++ Test for a dynamic factor model where errors are as general as possible,
--- /dev/null
--- /dev/null
++Description: KalmanFilter broken on armhf / hppa
++
++Warn on use, and xfail the tests
++
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Bug-Debian: https://bugs.debian.org/924036
++Bug-Ubuntu: https://launchpad.net/bugs/1819227
++Forwarded: no
++
++--- a/statsmodels/tsa/statespace/representation.py
+++++ b/statsmodels/tsa/statespace/representation.py
++@@ -11,6 +11,10 @@ from .tools import (
++ )
++ from .initialization import Initialization
++ from . import tools
+++import platform
+++import warnings
+++import sys
+++warn_kalman = "Representation/KalmanFilter (and hence much of tsa.statespace) can give wrong results on armhf (armv7) and hppa - https://bugs.debian.org/924036" if ((platform.uname()[4].startswith('arm') or platform.uname()[4].startswith('aarch') or platform.uname()[4].startswith('hppa')) and np.log2(sys.maxsize)<=32) else False # test results at end of https://buildd.debian.org/status/fetch.php?pkg=statsmodels&arch=armhf&ver=0.8.0-8&stamp=1551488279&raw=0 ; the use of log2 rather than 2**32 is to avoid overflow on Python 2
++
++
++ class OptionWrapper(object):
++@@ -259,6 +263,8 @@ class Representation(object):
++ design=None, obs_intercept=None, obs_cov=None,
++ transition=None, state_intercept=None, selection=None,
++ state_cov=None, statespace_classes=None, **kwargs):
+++ if warn_kalman:
+++ warnings.warn(warn_kalman)
++ self.shapes = {}
++
++ # Check if k_endog is actually the endog array
++--- a/statsmodels/tsa/statespace/tests/test_kalman.py
+++++ b/statsmodels/tsa/statespace/tests/test_kalman.py
++@@ -22,6 +22,7 @@ import pickle
++ import numpy as np
++ import pandas as pd
++ import os
+++from statsmodels.tsa.statespace.representation import warn_kalman
++ import pytest
++
++ from scipy.linalg.blas import find_best_blas_type
++@@ -296,7 +297,7 @@ class TestClark1987SingleComplex(Clark19
++ atol=1e-2
++ )
++
++-
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ class TestClark1987DoubleComplex(Clark1987):
++ """
++ Basic double precision complex test for the loglikelihood and filtered
++@@ -366,7 +367,7 @@ class TestClark1987ForecastDouble(Clark1
++ cls.model, cls.filter = cls.init_filter()
++ cls.result = cls.run_filter()
++
++-
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ class TestClark1987ForecastDoubleComplex(Clark1987Forecast):
++ """
++ Basic double complex forecasting test for the loglikelihood and filtered
++@@ -659,7 +660,7 @@ class TestClark1989ForecastDouble(Clark1
++ cls.model, cls.filter = cls.init_filter()
++ cls.result = cls.run_filter()
++
++-
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ class TestClark1989ForecastDoubleComplex(Clark1989Forecast):
++ """
++ Basic double complex forecasting test for the loglikelihood and filtered
++--- a/statsmodels/tsa/statespace/tests/test_mlemodel.py
+++++ b/statsmodels/tsa/statespace/tests/test_mlemodel.py
++@@ -9,6 +9,7 @@ from statsmodels.compat.pandas import Nu
++ import os
++ import re
++ import warnings
+++from statsmodels.tsa.statespace.representation import warn_kalman
++
++ import numpy as np
++ import pandas as pd
++@@ -276,7 +277,7 @@ def test_score_misc():
++ def test_from_formula():
++ assert_raises(NotImplementedError, lambda: MLEModel.from_formula(1, 2, 3))
++
++-
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_score_analytic_ar1():
++ # Test the score against the analytic score for an AR(1) model with 2
++ # observations
++@@ -445,6 +446,7 @@ def test_cov_params():
++ mod.fit(res.params, disp=-1, cov_type='invalid_cov_type')
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_transform():
++ # The transforms in MLEModel are noops
++ mod = MLEModel([1, 2], **kwargs)
++--- a/statsmodels/tsa/statespace/tests/test_representation.py
+++++ b/statsmodels/tsa/statespace/tests/test_representation.py
++@@ -16,6 +16,7 @@ MIT Press Books. The MIT Press.
++ import os
++ import warnings
++
+++from statsmodels.tsa.statespace.representation import warn_kalman
++ import numpy as np
++ import pandas as pd
++ import pytest
++@@ -222,7 +223,7 @@ class TestClark1987ForecastDouble(Clark1
++ super(TestClark1987ForecastDouble, cls).setup_class()
++ cls.results = cls.run_filter()
++
++-
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ class TestClark1987ForecastDoubleComplex(Clark1987Forecast):
++ """
++ Basic double complex forecasting test for the loglikelihood and filtered
++@@ -455,7 +456,7 @@ class TestClark1989ForecastDouble(Clark1
++ super(TestClark1989ForecastDouble, cls).setup_class()
++ cls.results = cls.run_filter()
++
++-
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ class TestClark1989ForecastDoubleComplex(Clark1989Forecast):
++ """
++ Basic double complex forecasting test for the loglikelihood and filtered
++--- a/statsmodels/tsa/statespace/tests/test_dynamic_factor.py
+++++ b/statsmodels/tsa/statespace/tests/test_dynamic_factor.py
++@@ -16,13 +16,15 @@ import pytest
++ from statsmodels.tsa.statespace import dynamic_factor
++ from .results import results_varmax, results_dynamic_factor
++ from statsmodels.iolib.summary import forg
+++from statsmodels.tsa.statespace.representation import warn_kalman
+++import pytest
++
++ current_path = os.path.dirname(os.path.abspath(__file__))
++
++ output_path = os.path.join('results', 'results_dynamic_factor_stata.csv')
++ output_results = pd.read_csv(os.path.join(current_path, output_path))
++
++-
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ class CheckDynamicFactor(object):
++ @classmethod
++ def setup_class(cls, true, k_factors, factor_order, cov_type='approx',
++--- a/statsmodels/tsa/statespace/tests/test_sarimax.py
+++++ b/statsmodels/tsa/statespace/tests/test_sarimax.py
++@@ -11,6 +11,7 @@ import warnings
++
++ from statsmodels.compat.platform import PLATFORM_WIN
++
+++from statsmodels.tsa.statespace.representation import warn_kalman
++ import numpy as np
++ import pandas as pd
++ import pytest
++@@ -33,6 +34,7 @@ coverage_path = os.path.join('results',
++ coverage_results = pd.read_csv(os.path.join(current_path, coverage_path))
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ class TestSARIMAXStatsmodels(object):
++ """
++ Test ARIMA model using SARIMAX class against statsmodels ARIMA class
++@@ -133,6 +135,7 @@ class TestRealGDPARStata(object):
++ )
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ class SARIMAXStataTests(object):
++ def test_loglike(self):
++ assert_almost_equal(
++@@ -1030,6 +1033,7 @@ class SARIMAXCoverageTest(object):
++ model.enforce_stationarity = stat
++ model.enforce_invertibility = inv
++
+++ @pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_results(self):
++ self.result = self.model.filter(self.true_params)
++
++@@ -1792,6 +1796,7 @@ class Test_seasonal_arma_trend_polynomia
++ tps = cls.true_params
++ cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2]
++
+++ @pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_results(self):
++ self.result = self.model.filter(self.true_params)
++
++@@ -1842,6 +1847,7 @@ class Test_seasonal_arma_diff_seasonal_d
++ super(Test_seasonal_arma_diff_seasonal_diff, cls).setup_class(
++ 47, *args, **kwargs)
++
+++ @pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_results(self):
++ self.result = self.model.filter(self.true_params)
++
++@@ -2099,6 +2105,7 @@ def test_results():
++ assert_almost_equal(res.maparams, [-0.5])
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_misc_exog():
++ # Tests for missing data
++ nobs = 20
++--- a/statsmodels/tsa/statespace/tests/test_varmax.py
+++++ b/statsmodels/tsa/statespace/tests/test_varmax.py
++@@ -15,6 +15,7 @@ import pytest
++
++ from statsmodels.tsa.statespace import varmax, sarimax
++ from statsmodels.iolib.summary import forg
+++from statsmodels.tsa.statespace.representation import warn_kalman
++
++ from .results import results_varmax
++
++@@ -27,6 +28,7 @@ varmax_path = os.path.join('results', 'r
++ varmax_results = pd.read_csv(os.path.join(current_path, varmax_path))
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ class CheckVARMAX(object):
++ """
++ Test Vector Autoregression against Stata's `dfactor` code (Stata's
++@@ -874,6 +876,7 @@ def test_misspecifications():
++ warnings.resetwarnings()
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_misc_exog():
++ # Tests for missing data
++ nobs = 20
++--- a/statsmodels/tsa/innovations/tests/test_arma_innovations.py
+++++ b/statsmodels/tsa/innovations/tests/test_arma_innovations.py
++@@ -8,8 +8,10 @@ from numpy.testing import assert_allclos
++
++ from statsmodels.tsa.innovations import arma_innovations
++ from statsmodels.tsa.statespace.sarimax import SARIMAX
+++from statsmodels.tsa.statespace.representation import warn_kalman
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ @pytest.mark.parametrize("ar_params,ma_params,sigma2", [
++ (np.array([]), np.array([]), 1),
++ (np.array([0.]), np.array([0.]), 1),
++--- a/statsmodels/tsa/statespace/tests/test_save.py
+++++ b/statsmodels/tsa/statespace/tests/test_save.py
++@@ -5,6 +5,7 @@ import pickle
++ import os
++ import tempfile
++
+++from statsmodels.tsa.statespace.representation import warn_kalman
++ import pytest
++
++ from statsmodels import datasets
++@@ -28,6 +29,7 @@ def temp_filename():
++ "{filename}.".format(filename=filename))
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_sarimax(temp_filename):
++ mod = sarimax.SARIMAX(macrodata['realgdp'].values, order=(4, 1, 0))
++ res = mod.smooth(mod.start_params)
++@@ -52,6 +54,7 @@ def test_sarimax_save_remove_data(temp_f
++ assert_allclose(res.llf, res2.llf)
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_sarimax_pickle():
++ mod = sarimax.SARIMAX(macrodata['realgdp'].values, order=(4, 1, 0))
++ pkl_mod = pickle.loads(pickle.dumps(mod))
++@@ -64,6 +67,7 @@ def test_sarimax_pickle():
++ assert_allclose(res.llf, pkl_res.llf)
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_structural(temp_filename):
++ mod = structural.UnobservedComponents(
++ macrodata['realgdp'].values, 'llevel')
++@@ -76,6 +80,7 @@ def test_structural(temp_filename):
++ assert_allclose(res.llf, res2.llf)
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_structural_pickle():
++ mod = structural.UnobservedComponents(
++ macrodata['realgdp'].values, 'llevel')
++@@ -89,6 +94,7 @@ def test_structural_pickle():
++ assert_allclose(res.llf, pkl_res.llf)
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_dynamic_factor(temp_filename):
++ mod = dynamic_factor.DynamicFactor(
++ macrodata[['realgdp', 'realcons']].diff().iloc[1:].values, k_factors=1,
++@@ -102,6 +108,7 @@ def test_dynamic_factor(temp_filename):
++ assert_allclose(res.llf, res2.llf)
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_dynamic_factor_pickle(temp_filename):
++ mod = varmax.VARMAX(
++ macrodata[['realgdp', 'realcons']].diff().iloc[1:].values,
++@@ -123,6 +130,7 @@ def test_dynamic_factor_pickle(temp_file
++ assert_allclose(res.llf, res2.llf)
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_varmax(temp_filename):
++ mod = varmax.VARMAX(
++ macrodata[['realgdp', 'realcons']].diff().iloc[1:].values,
++@@ -136,6 +144,7 @@ def test_varmax(temp_filename):
++ assert_allclose(res.llf, res2.llf)
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_varmax_pickle(temp_filename):
++ mod = varmax.VARMAX(
++ macrodata[['realgdp', 'realcons']].diff().iloc[1:].values,
++--- a/statsmodels/tsa/statespace/tests/test_structural.py
+++++ b/statsmodels/tsa/statespace/tests/test_structural.py
++@@ -10,6 +10,7 @@ import warnings
++ import numpy as np
++ from numpy.testing import assert_equal, assert_allclose, assert_raises
++ import pandas as pd
+++from statsmodels.tsa.statespace.representation import warn_kalman
++ import pytest
++
++ from statsmodels.datasets import macrodata
++@@ -203,6 +204,7 @@ def test_local_linear_deterministic_tren
++ run_ucm('local_linear_deterministic_trend', use_exact_diffuse=True)
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_local_linear_trend(close_figures):
++ run_ucm('local_linear_trend')
++ run_ucm('local_linear_trend', use_exact_diffuse=True)
++@@ -218,6 +220,7 @@ def test_random_trend(close_figures):
++ run_ucm('random_trend', use_exact_diffuse=True)
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_cycle(close_figures):
++ run_ucm('cycle_approx_diffuse')
++ run_ucm('cycle', use_exact_diffuse=True)
++@@ -238,11 +241,13 @@ def test_reg(close_figures):
++ run_ucm('reg', use_exact_diffuse=True)
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_rtrend_ar1(close_figures):
++ run_ucm('rtrend_ar1')
++ run_ucm('rtrend_ar1', use_exact_diffuse=True)
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ @pytest.mark.slow
++ def test_lltrend_cycle_seasonal_reg_ar1(close_figures):
++ run_ucm('lltrend_cycle_seasonal_reg_ar1_approx_diffuse')
++@@ -365,6 +370,7 @@ def test_forecast():
++ assert_allclose(actual, desired)
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_misc_exog():
++ # Tests for missing data
++ nobs = 20
++--- a/statsmodels/tsa/statespace/tests/test_var.py
+++++ b/statsmodels/tsa/statespace/tests/test_var.py
++@@ -15,6 +15,8 @@ import os
++ import numpy as np
++ from numpy.testing import assert_allclose
++ import pandas as pd
+++from statsmodels.tsa.statespace.representation import warn_kalman
+++import pytest
++
++ from statsmodels.tsa.statespace import varmax
++ from .results import results_var_R
++@@ -100,6 +102,7 @@ def test_var_basic():
++ # FEVD: TODO
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_var_c():
++ test = 'c'
++
++@@ -120,6 +123,7 @@ def test_var_c():
++ # FEVD: TODO
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_var_ct():
++ test = 'ct'
++
++@@ -140,6 +144,7 @@ def test_var_ct():
++ # FEVD: TODO
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_var_ct_as_exog0():
++ test = 'ct_as_exog0'
++
++@@ -163,6 +168,7 @@ def test_var_ct_as_exog0():
++ # FEVD: TODO
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_var_ct_as_exog1():
++ test = 'ct'
++
++@@ -191,6 +197,7 @@ def test_var_ct_as_exog1():
++ # FEVD: TODO
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_var_ctt():
++ test = 'ctt_as_exog1'
++
++@@ -216,6 +223,7 @@ def test_var_ctt():
++ # FEVD: TODO
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_var_ct_exog():
++ test = 'ct_exog'
++
++@@ -240,6 +248,7 @@ def test_var_ct_exog():
++ # FEVD: TODO
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_var_c_2exog():
++ test = 'c_2exog'
++
++--- a/statsmodels/tsa/statespace/tests/test_pickle.py
+++++ b/statsmodels/tsa/statespace/tests/test_pickle.py
++@@ -17,6 +17,7 @@ import pickle
++ import numpy as np
++ import pandas as pd
++ from numpy.testing import assert_equal, assert_allclose
+++from statsmodels.tsa.statespace.representation import warn_kalman
++ import pytest
++
++ from statsmodels.tsa.statespace import sarimax
++@@ -38,6 +39,7 @@ def data():
++ return data_
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_pickle_fit_sarimax(data):
++ # Fit an ARIMA(1,1,0) to log GDP
++ mod = sarimax.SARIMAX(data['lgdp'], order=(1, 1, 0))
++@@ -53,6 +55,7 @@ def test_pickle_fit_sarimax(data):
++ assert_allclose(res.impulse_responses(10), res.impulse_responses(10))
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_unobserved_components_pickle():
++ # Tests for missing data
++ nobs = 20
++@@ -85,6 +88,7 @@ def test_unobserved_components_pickle():
++ assert_allclose(res.impulse_responses(10), res.impulse_responses(10))
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_kalman_filter_pickle(data):
++ # Construct the statespace representation
++ true = results_kalman_filter.uc_uni
++@@ -133,6 +137,7 @@ def test_kalman_filter_pickle(data):
++ pkl_results.filtered_state[3][true['start']:])
++
++
+++@pytest.mark.xfail(condition=bool(warn_kalman),reason='https://bugs.debian.org/924036',strict=False)
++ def test_representation_pickle():
++ nobs = 10
++ k_endog = 2
--- /dev/null
--- /dev/null
++Description: Allow tests to fail if multiprocessing is not available
++
++This is currently the case on hurd-i386
++
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Forwarded: no
++
++--- a/statsmodels/graphics/tests/test_functional.py
+++++ b/statsmodels/graphics/tests/test_functional.py
++@@ -15,6 +15,12 @@ try:
++ import matplotlib.pyplot as plt
++ except ImportError:
++ pass
+++has_multiprocessing = True
+++try:
+++ import multiprocessing
+++ multiprocessing.Pool()
+++except ImportError:
+++ has_multiprocessing = False
++
++
++ data = elnino.load()
++@@ -27,6 +33,10 @@ data = data.raw_data[:, 1:]
++ def test_hdr_basic(close_figures):
++ try:
++ _, hdr = hdrboxplot(data, labels=labels, seed=12345)
+++ except ImportError:
+++ if not has_multiprocessing:
+++ pytest.xfail('Multiprocess not available')
+++ raise
++ except WindowsError:
++ pytest.xfail('Multiprocess randomly crashes in Windows testing')
++
++@@ -69,6 +79,10 @@ def test_hdr_basic(close_figures):
++ def test_hdr_basic_brute(close_figures, reset_randomstate):
++ try:
++ _, hdr = hdrboxplot(data, ncomp=2, labels=labels, use_brute=True)
+++ except ImportError:
+++ if not has_multiprocessing:
+++ pytest.xfail('Multiprocess not available')
+++ raise
++ except WindowsError:
++ pytest.xfail('Multiprocess randomly crashes in Windows testing')
++
++@@ -104,6 +118,10 @@ def test_hdr_plot(close_figures):
++ def test_hdr_alpha(close_figures):
++ try:
++ _, hdr = hdrboxplot(data, alpha=[0.7], seed=12345)
+++ except ImportError:
+++ if not has_multiprocessing:
+++ pytest.xfail('Multiprocess not available')
+++ raise
++ except WindowsError:
++ pytest.xfail('Multiprocess randomly crashes in Windows testing')
++
++@@ -119,6 +137,10 @@ def test_hdr_alpha(close_figures):
++ def test_hdr_multiple_alpha(close_figures):
++ try:
++ _, hdr = hdrboxplot(data, alpha=[0.4, 0.92], seed=12345)
+++ except ImportError:
+++ if not has_multiprocessing:
+++ pytest.xfail('Multiprocess not available')
+++ raise
++ except WindowsError:
++ pytest.xfail('Multiprocess randomly crashes in Windows testing')
++
++@@ -144,6 +166,10 @@ def test_hdr_threshold(close_figures):
++ try:
++ _, hdr = hdrboxplot(data, alpha=[0.8], threshold=0.93,
++ seed=12345)
+++ except ImportError:
+++ if not has_multiprocessing:
+++ pytest.xfail('Multiprocess not available')
+++ raise
++ except WindowsError:
++ pytest.xfail('Multiprocess randomly crashes in Windows testing')
++
++@@ -157,6 +183,10 @@ def test_hdr_threshold(close_figures):
++ def test_hdr_bw(close_figures):
++ try:
++ _, hdr = hdrboxplot(data, bw='cv_ml', seed=12345)
+++ except ImportError:
+++ if not has_multiprocessing:
+++ pytest.xfail('Multiprocess not available')
+++ raise
++ except WindowsError:
++ pytest.xfail('Multiprocess randomly crashes in Windows testing')
++
++@@ -170,6 +200,10 @@ def test_hdr_bw(close_figures):
++ def test_hdr_ncomp(close_figures):
++ try:
++ _, hdr = hdrboxplot(data, ncomp=3, seed=12345)
+++ except ImportError:
+++ if not has_multiprocessing:
+++ pytest.xfail('Multiprocess not available')
+++ raise
++ except WindowsError:
++ pytest.xfail('Multiprocess randomly crashes in Windows testing')
++
--- /dev/null
--- /dev/null
++Description: Markov* broken on armhf
++
++Warn on use, and xfail tests
++
++I suspect https://github.com/statsmodels/statsmodels/pull/5826
++but that improved it enough on amd64 that
++reverting it isn't a good solution.
++
++Author: Rebecca N. Palmer <rebecca_palmer@zoho.com>
++Forwarded: no
++
++--- a/statsmodels/tsa/regime_switching/markov_switching.py
+++++ b/statsmodels/tsa/regime_switching/markov_switching.py
++@@ -35,6 +35,9 @@ from statsmodels.tsa.statespace.tools im
++ prepare_exog,
++ _safe_cond
++ )
+++import platform
+++import sys
+++warn_rswitch_platform = "Regime switching models (Markov(Auto)Regression) can give wrong results on armhf (armv7)" if ((platform.uname()[4].startswith('arm') or platform.uname()[4].startswith('aarch')) and np.log2(sys.maxsize)<=32) else False # test results at end of https://buildd.debian.org/status/fetch.php?pkg=statsmodels&arch=armhf&ver=0.11.0-1&stamp=1580716740&raw=0 ; the use of log2 rather than 2**32 is to avoid overflow on Python 2
++
++ prefix_hamilton_filter_log_map = {
++ 's': shamilton_filter_log, 'd': dhamilton_filter_log,
++@@ -496,6 +499,8 @@ class MarkovSwitching(tsbase.TimeSeriesM
++
++ def __init__(self, endog, k_regimes, order=0, exog_tvtp=None, exog=None,
++ dates=None, freq=None, missing='none'):
+++ if warn_rswitch_platform:
+++ warnings.warn(warn_rswitch_platform)
++
++ # Properties
++ self.k_regimes = k_regimes
++--- a/statsmodels/tsa/regime_switching/tests/test_markov_autoregression.py
+++++ b/statsmodels/tsa/regime_switching/tests/test_markov_autoregression.py
++@@ -15,6 +15,8 @@ import pytest
++
++ from statsmodels.tools import add_constant
++ from statsmodels.tsa.regime_switching import markov_autoregression
+++from statsmodels.tsa.regime_switching.markov_switching import warn_rswitch_platform
+++pytestmark = pytest.mark.xfail(condition=bool(warn_rswitch_platform),reason="known broken on armhf",strict=False)
++
++ current_path = os.path.dirname(os.path.abspath(__file__))
++
++--- a/statsmodels/tsa/regime_switching/tests/test_markov_regression.py
+++++ b/statsmodels/tsa/regime_switching/tests/test_markov_regression.py
++@@ -15,6 +15,8 @@ import pytest
++
++ from statsmodels.tsa.regime_switching import (markov_switching,
++ markov_regression)
+++from statsmodels.tsa.regime_switching.markov_switching import warn_rswitch_platform
+++pytestmark = pytest.mark.xfail(condition=bool(warn_rswitch_platform),reason="known broken on armhf",strict=False)
++
++
++ current_path = os.path.dirname(os.path.abspath(__file__))
--- /dev/null
--- /dev/null
++Document: python-statsmodels-doc
++Title: Statsmodels documentation
++Author: Statsmodels Developers
++Abstract: Documentation for Statsmodels classes and functions for the
++ estimation of many different statistical models, as well as for
++ conducting statistical tests, and statistical data exploration. An
++ extensive list of result statistics are available for each estimator.
++Section: Programming/Python
++
++Format: HTML
++Index: /usr/share/doc/python-statsmodels-doc/html/index.html
++Files: /usr/share/doc/python-statsmodels-doc/html/*
--- /dev/null
--- /dev/null
++build/html
--- /dev/null
--- /dev/null
++examples/*
--- /dev/null
--- /dev/null
++usr/share/doc/python-statsmodels-doc/examples usr/share/doc/python3-statsmodels/examples
++usr/share/doc/python-statsmodels-doc/html usr/share/doc/python3-statsmodels/html
++usr/share/javascript/mathjax/MathJax.js usr/share/doc/python-statsmodels-doc/html/_static/MathJax.js
++usr/share/javascript/requirejs/require.min.js usr/share/doc/python-statsmodels-doc/html/_static/require.min.js
--- /dev/null
--- /dev/null
++symlink_to_dir /usr/share/doc/python-statsmodels-doc/html ../python-statsmodels/html 0.10.2-1~ python-statsmodels-doc
++symlink_to_dir /usr/share/doc/python-statsmodels-doc/examples ../python-statsmodels/examples 0.10.2-1~ python-statsmodels-doc
--- /dev/null
--- /dev/null
++# these are test references, not user-facing text
++python3-statsmodels binary: national-encoding */statsmodels/tsa/vector_ar/tests/JMulTi_results/*.txt
--- /dev/null
--- /dev/null
++#!/usr/bin/make -f
++# -*- mode: makefile; coding: utf-8 -*-
++
++# override HOME (for weave) and matplotlib config directory
++# to allow building in chroots with read-only HOME
++export HOME=$(CURDIR)/build
++export MPLCONFIGDIR=$(HOME)
++
++#block internet access
++export http_proxy=127.0.0.1:9
++export https_proxy=127.0.0.1:9
++
++export PY3VER=$(shell py3versions -vd)
++SOURCE_DATE:=$(shell date -u -d "@$(SOURCE_DATE_EPOCH)" "+%a, %d %b %Y" || echo "xxx, xx xxx xxxx")
++SOURCE_TIME:=$(shell date -u -d "@$(SOURCE_DATE_EPOCH)" "+%T" || echo "xx:xx:xx")
++
++export PYBUILD_NAME=statsmodels
++export PYBUILD_INSTALL_ARGS=--install-layout=deb
++
++export DEB_BUILD_MAINT_OPTIONS=hardening=+all
++
++%:
++ dh $@ --with python3,sphinxdoc --buildsystem=pybuild
++
++# To guarantee HOME existence with mpl 1.3.0
++# See https://github.com/matplotlib/matplotlib/issues/2300
++# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=719384
++$(HOME):
++ mkdir "$@"
++ : # Hardcode backend to Agg to avoid doc build and tests failures
++ echo "backend : Agg" >| $(MPLCONFIGDIR)/matplotlibrc
++
++override_dh_auto_build-arch: $(HOME)
++ dh_auto_build
++
++override_dh_auto_build-indep: $(HOME)
++ : # Documentation depends on built package
++ dh_auto_build
++ : # I: Generate documentation
++ : # Build Documentation -- assure existence of build/html for nodoc
++ : # Reproducibility: stopping the clock (or slowing it down enough to get all 00:00:00) with faketime (near-)hangs in glm_weights.ipynb
++ mkdir -p build/html
++ifeq (,$(filter nodoc,$(DEB_BUILD_OPTIONS)))
++ python3 debian/datasets/prepopulate_cache.py
++ PYTHONPATH=$(shell pybuild --print build_dir --interpreter python3) \
++ BUILDDIR=$(CURDIR)/build \
++ STATSMODELS_DATA=$(CURDIR)/build/datacache/ \
++ LC_ALL=C.UTF-8 make -C docs html
++ mv docs/build/* build/
++ : # replace timestamps and build paths in examples output for reproducibility
++ for html in `find build/html examples -name _modules -prune -o -name "*.html" -o -name "*.ipynb" -o -name "*.ipynb.txt"` ; do \
++ sed -i -e 's#$(CURDIR)/.pybuild/[^/]*/build/statsmodels/#/usr/lib/python3/dist-packages/statsmodels/#g' \
++ -e 's#\(Date:.*\)[A-Z][a-z]\+, \+[0-9]\+,\? \+[A-Z][a-z]\+,\? \+[0-9]\+#\1$(SOURCE_DATE)#g' \
++ -e 's#\(Time:.*\)[0-9][0-9]:[0-9][0-9]:[0-9][0-9]#\1$(SOURCE_TIME)#g' $${html} ; \
++ done
++endif
++
++override_dh_installdocs:
++ rm -rf docs/source/generated/
++ dh_installdocs
++
++override_dh_auto_clean:
++ # this would try to run setup.py clean, which is an error: dh_auto_clean
++ -rm -rf cythonize.dat \
++ *.egg-info \
++ *.png \
++ .pybuild \
++ .pytest_cache \
++ build \
++ docs/build/ \
++ docs/rehab.table \
++ docs/salary.table \
++ docs/source/datasets/generated \
++ docs/source/examples/notebooks \
++ docs/source/savefig \
++ docs/source/dev/generated \
++ docs/source/datasets/statsmodels.datasets.*.rst \
++ docs/source/examples/notebooks/generated \
++ examples/executed \
++ tools/hash_dict.pickle
++ find . -name __pycache__ -print0 | xargs -0 rm -rf
++ find . -name *.pyx -print0 | sed -e "s/\.pyx/.c/g" | xargs -0 rm -f
++ find . -name *.pyx.in -print0 | sed -e "s/\.pyx\.in/.pyx/g" | xargs -0 rm -f
++ : # Remove autogenerated version.py
++ rm -f statsmodels/version.py
++
++override_dh_install:
++ dh_install
++ rm -f debian/*/usr/setup.cfg
++ : # Remove compiled due to testing files
++ find debian -name *.pyc -delete
++ rm -f debian/*/usr/lib/*/dist-packages/enet_poisson.csv debian/*/usr/lib/*/dist-packages/enet_binomial.csv
++ : # strip docs/ since they aren't really a Python module, there is -doc for it
++ : # TODO find debian -wholename \*scikits/statsmodels/docs | xargs rm -rf
++ find debian -iname COPYING -o -iname LICENSE* | xargs -r rm -f
++
++# statsmodels has a conftest.py that uses testroot-only features, so the tests must be run from the statsmodels directory, not its parent
++# https://github.com/statsmodels/statsmodels/issues/5108
++# the cd is to be "anywhere but pkgroot" to avoid subprocesses (test_lazy_imports) importing the source tree (current directory is tried before PYTHONPATH), which doesn't work because the built .so files aren't there
++# the inner 'for' (expected to be a single item, cpython3_${testpy3ver}_statsmodels/build) is because globs aren't expanded when setting env vars
++# TEST_SUCCESS is used because the return status of a for-loop is its last item, not the "fail if any of them do" wanted here
++# Tests are ignored (and expected to crash) on mips64el, #968210
++override_dh_auto_test:
++ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
++ TEST_SUCCESS=true ; cd tools && for testpy3ver in `py3versions -vs` ; do \
++ for testpath in ../.pybuild/*$${testpy3ver}*/*/statsmodels ; do \
++ PYTHONPATH=$${testpath}/.. python$${testpy3ver} -m pytest -v $${testpath} || TEST_SUCCESS=false ; \
++ rm -rf $${testpath}/.pytest_cache ; \
++ done ; done ; [ `dpkg --print-architecture` = mips64el ] || $${TEST_SUCCESS}
++endif
++
++## immediately useable documentation and exemplar scripts/data
++override_dh_compress:
++ dh_compress -X.py -X.html -X.pdf -X.css -X.jpg -X.txt -X.js -X.json -X.rtc -X.inv -Xobjects.inv
++
++override_dh_link:
++ : # deduplicate images - the ||true is because we only build-depend on jdupes if we're building documentation
++ jdupes -r -l debian/python-statsmodels-doc/usr/share/doc || true
++ dh_link
++
++## move binary libraries into -lib
++override_dh_auto_install:
++ dh_auto_install
++ for PACKAGE_NAME in python3-statsmodels; do \
++ for lib in $$(find debian/$${PACKAGE_NAME}/usr -name '*.so'); do \
++ sdir=$$(dirname $$lib) ; \
++ tdir=debian/$${PACKAGE_NAME}-lib/$${sdir#*$${PACKAGE_NAME}/} ; \
++ mkdir -p $$tdir ; \
++ echo "I: Moving '$$lib' into '$$tdir'." ; \
++ mv $$lib $$tdir ; \
++ done ; \
++ done
++ dh_numpy3
++
++override_dh_fixperms:
++ dh_fixperms
++ find debian -name "*.txt" -exec chmod -x \{\} \;
--- /dev/null
--- /dev/null
++3.0 (quilt)
--- /dev/null
--- /dev/null
++# Perfectly fine source file with a little HTML snippet one-lined
++statsmodels source: source-is-missing docs/source/_static/scripts.js *
--- /dev/null
--- /dev/null
++Tests: upstreamtests
++Depends: python3-all, python3-statsmodels, python3-pytest,
++ python3-matplotlib,
++ python3-joblib,
++ python3-colorama,
++ python3-cvxopt,
++ python3-dateutil,
++ python3-tk
++Restrictions: allow-stderr
++
--- /dev/null
--- /dev/null
++#!/bin/sh
++set -e
++pys=`py3versions -r 2>/dev/null`
++# don't import the source tree
++cd "${AUTOPKGTEST_TMP}"
++# avoid timeout on slow architectures
++pytest_options=--skip-slow
++if [ `dpkg --print-architecture` = amd64 ] ; then
++ pytest_options= ;
++fi
++
++for py in $pys ; do
++ $py -m pytest -v $pytest_options /usr/lib/python3/dist-packages/statsmodels ;
++done
--- /dev/null
--- /dev/null
++Name: statsmodels
++Repository: https://github.com/statsmodels/statsmodels
++Documentation: https://www.statsmodels.org/stable/
++Bug-Database: https://github.com/statsmodels/statsmodels/issues
++Contact: https://groups.google.com/group/pystatsmodels
++Reference:
++ Title: "Statsmodels: Econometric and statistical modeling with python"
++ Eprint: https://conference.scipy.org/proceedings/scipy2010/pdfs/seabold.pdf
++ Author: Seabold, Skipper and Perktold, Josef
++ Booktitle: 9th Python in Science Conference
++ Year: 2010
--- /dev/null
--- /dev/null
++version=4
++opts="dversionmangle=s/.dfsg[0-9]*$//,uversionmangle=s/rc/~rc/g;s/dev/~dev/g,filenamemangle=s/.*\/(.*)/statsmodels-$1/,repacksuffix=+dfsg" \
++ https://github.com/statsmodels/statsmodels/tags .*/v?(\d[\d.]+)\.tar\.gz